text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#
# @file TestWriteMathML.py
# @brief Write MathML unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/math/test/TestWriteMathML.cpp
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
def util_NaN():
z = 1e300
z = z * z
return z - z
def util_PosInf():
z = 1e300
z = z * z
return z
def util_NegInf():
z = 1e300
z = z * z
return -z
def wrapString(s):
return s
pass
def MATHML_FOOTER():
return "</math>"
pass
def MATHML_HEADER():
return "<math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n"
pass
def MATHML_HEADER_UNITS():
return "<math xmlns=\"http://www.w3.org/1998/Math/MathML\""
pass
def MATHML_HEADER_UNITS2():
return " xmlns:sbml=\"http://www.sbml.org/sbml/level3/version1/core\">\n"
pass
def XML_HEADER():
return "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
pass
def wrapMathML(s):
r = XML_HEADER()
r += MATHML_HEADER()
r += s
r += MATHML_FOOTER()
return r
pass
def wrapMathMLUnits(s):
r = XML_HEADER()
r += MATHML_HEADER_UNITS()
r += MATHML_HEADER_UNITS2()
r += s
r += MATHML_FOOTER()
return r
pass
class TestWriteMathML(unittest.TestCase):
global S
S = None
global N
N = None
def equals(self, *x):
if len(x) == 2:
return x[0] == x[1]
elif len(x) == 1:
return x[0] == self.OSS.str()
def setUp(self):
self.N = None
self.S = None
pass
def tearDown(self):
self.N = None
self.S = None
pass
def test_MathMLFormatter_ci(self):
expected = wrapMathML(" <ci> foo </ci>\n")
self.N = libsbml.parseFormula("foo")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_cn_e_notation_1(self):
expected = wrapMathML(" <cn type=\"e-notation\"> 0 <sep/> 3 </cn>\n"
)
self.N = libsbml.parseFormula("0e3")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_cn_e_notation_2(self):
expected = wrapMathML(" <cn type=\"e-notation\"> 2 <sep/> 3 </cn>\n"
)
self.N = libsbml.parseFormula("2e3")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_cn_e_notation_3(self):
expected = wrapMathML(" <cn type=\"e-notation\"> 1234567.8 <sep/> 3 </cn>\n"
)
self.N = libsbml.parseFormula("1234567.8e3")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_cn_e_notation_4(self):
expected = wrapMathML(" <cn type=\"e-notation\"> 6.0221367 <sep/> 23 </cn>\n"
)
self.N = libsbml.parseFormula("6.0221367e+23")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_cn_e_notation_5(self):
expected = wrapMathML(" <cn type=\"e-notation\"> 4 <sep/> -6 </cn>\n"
)
self.N = libsbml.parseFormula(".000004")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_cn_e_notation_6(self):
expected = wrapMathML(" <cn type=\"e-notation\"> 4 <sep/> -12 </cn>\n"
)
self.N = libsbml.parseFormula(".000004e-6")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_cn_e_notation_7(self):
expected = wrapMathML(" <cn type=\"e-notation\"> -1 <sep/> -6 </cn>\n"
)
self.N = libsbml.parseFormula("-1e-6")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_cn_integer(self):
expected = wrapMathML(" <cn type=\"integer\"> 5 </cn>\n")
self.N = libsbml.parseFormula("5")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_cn_rational(self):
expected = wrapMathML(" <cn type=\"rational\"> 1 <sep/> 3 </cn>\n"
)
self.N = libsbml.ASTNode()
self.N.setValue(1,3)
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_cn_real_1(self):
expected = wrapMathML(" <cn> 1.2 </cn>\n")
self.N = libsbml.parseFormula("1.2")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_cn_real_2(self):
expected = wrapMathML(" <cn> 1234567.8 </cn>\n")
self.N = libsbml.parseFormula("1234567.8")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_cn_real_3(self):
expected = wrapMathML(" <cn> -3.14 </cn>\n")
self.N = libsbml.parseFormula("-3.14")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_cn_real_locale(self):
expected = wrapMathML(" <cn> 2.72 </cn>\n")
self.N = libsbml.parseFormula("2.72")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_cn_units(self):
expected = wrapMathMLUnits(" <cn sbml:units=\"mole\"> 1.2 </cn>\n")
self.N = libsbml.parseFormula("1.2")
self.N.setUnits("mole")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_constant_exponentiale(self):
expected = wrapMathML(" <exponentiale/>\n")
self.N = libsbml.ASTNode(libsbml.AST_CONSTANT_E)
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_constant_false(self):
expected = wrapMathML(" <false/>\n")
self.N = libsbml.ASTNode(libsbml.AST_CONSTANT_FALSE)
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_constant_infinity(self):
expected = wrapMathML(" <infinity/>\n")
self.N = libsbml.ASTNode()
self.N.setValue( util_PosInf() )
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_constant_infinity_neg(self):
#expected = wrapMathML(" <apply>\n" +
#" <minus/>\n" +
#" <infinity/>\n" +
#" </apply>\n")
#self.N = libsbml.ASTNode()
#self.N.setValue(- util_PosInf())
#self.S = libsbml.writeMathMLToString(self.N)
#self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_constant_notanumber(self):
expected = wrapMathML(" <notanumber/>\n")
self.N = libsbml.ASTNode(libsbml.AST_REAL)
self.N.setValue( util_NaN() )
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_constant_true(self):
expected = wrapMathML(" <true/>\n")
self.N = libsbml.ASTNode(libsbml.AST_CONSTANT_TRUE)
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_csymbol_avogadro(self):
expected = wrapMathML(" <csymbol encoding=\"text\" " + "definitionURL=\"http://www.sbml.org/sbml/symbols/avogadro\"> NA </csymbol>\n")
self.N = libsbml.ASTNode(libsbml.AST_NAME_AVOGADRO)
self.N.setName("NA")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_csymbol_delay(self):
expected = wrapMathML(" <apply>\n" +
" <csymbol encoding=\"text\" definitionURL=\"http://www.sbml.org/sbml/" +
"symbols/delay\"> my_delay </csymbol>\n" +
" <ci> x </ci>\n" +
" <cn> 0.1 </cn>\n" +
" </apply>\n")
self.N = libsbml.parseFormula("delay(x, 0.1)")
self.N.setName("my_delay")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_csymbol_time(self):
expected = wrapMathML(" <csymbol encoding=\"text\" " + "definitionURL=\"http://www.sbml.org/sbml/symbols/time\"> t </csymbol>\n")
self.N = libsbml.ASTNode(libsbml.AST_NAME_TIME)
self.N.setName("t")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_function_1(self):
expected = wrapMathML(" <apply>\n" +
" <ci> foo </ci>\n" +
" <cn type=\"integer\"> 1 </cn>\n" +
" <cn type=\"integer\"> 2 </cn>\n" +
" <cn type=\"integer\"> 3 </cn>\n" +
" </apply>\n")
self.N = libsbml.parseFormula("foo(1, 2, 3)")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_function_2(self):
expected = wrapMathML(" <apply>\n" +
" <ci> foo </ci>\n" +
" <cn type=\"integer\"> 1 </cn>\n" +
" <cn type=\"integer\"> 2 </cn>\n" +
" <apply>\n" +
" <ci> bar </ci>\n" +
" <ci> z </ci>\n" +
" </apply>\n" +
" </apply>\n")
self.N = libsbml.parseFormula("foo(1, 2, bar(z))")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_lambda(self):
expected = wrapMathML(" <lambda>\n" +
" <bvar>\n" +
" <ci> x </ci>\n" +
" </bvar>\n" +
" <bvar>\n" +
" <ci> y </ci>\n" +
" </bvar>\n" +
" <apply>\n" +
" <root/>\n" +
" <degree>\n" +
" <cn type=\"integer\"> 2 </cn>\n" +
" </degree>\n" +
" <apply>\n" +
" <plus/>\n" +
" <apply>\n" +
" <power/>\n" +
" <ci> x </ci>\n" +
" <cn type=\"integer\"> 2 </cn>\n" +
" </apply>\n" +
" <apply>\n" +
" <power/>\n" +
" <ci> y </ci>\n" +
" <cn type=\"integer\"> 2 </cn>\n" +
" </apply>\n" +
" </apply>\n" +
" </apply>\n" +
" </lambda>\n")
self.N = libsbml.parseFormula("lambda(x, y, root(2, x^2 + y^2))")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_lambda_no_bvars(self):
expected = wrapMathML(" <lambda>\n" +
" <apply>\n" +
" <plus/>\n" +
" <cn type=\"integer\"> 2 </cn>\n" +
" <cn type=\"integer\"> 2 </cn>\n" +
" </apply>\n" +
" </lambda>\n")
self.N = libsbml.parseFormula("lambda(2 + 2)")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_log(self):
expected = wrapMathML(" <apply>\n" +
" <log/>\n" +
" <logbase>\n" +
" <cn type=\"integer\"> 2 </cn>\n" +
" </logbase>\n" +
" <ci> N </ci>\n" +
" </apply>\n")
self.N = libsbml.parseFormula("log(2, N)")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_minus(self):
expected = wrapMathML(" <apply>\n" +
" <minus/>\n" +
" <cn type=\"integer\"> 1 </cn>\n" +
" <cn type=\"integer\"> 2 </cn>\n" +
" </apply>\n")
self.N = libsbml.parseFormula("1 - 2")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_minus_unary_1(self):
expected = wrapMathML(" <cn type=\"integer\"> -2 </cn>\n"
)
self.N = libsbml.parseFormula("-2")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_minus_unary_2(self):
expected = wrapMathML(" <apply>\n" +
" <minus/>\n" +
" <ci> a </ci>\n" +
" </apply>\n")
self.N = libsbml.parseFormula("-a")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_piecewise(self):
expected = wrapMathML(" <piecewise>\n" +
" <piece>\n" +
" <apply>\n" +
" <minus/>\n" +
" <ci> x </ci>\n" +
" </apply>\n" +
" <apply>\n" +
" <lt/>\n" +
" <ci> x </ci>\n" +
" <cn type=\"integer\"> 0 </cn>\n" +
" </apply>\n" +
" </piece>\n" +
" <piece>\n" +
" <cn type=\"integer\"> 0 </cn>\n" +
" <apply>\n" +
" <eq/>\n" +
" <ci> x </ci>\n" +
" <cn type=\"integer\"> 0 </cn>\n" +
" </apply>\n" +
" </piece>\n" +
" <piece>\n" +
" <ci> x </ci>\n" +
" <apply>\n" +
" <gt/>\n" +
" <ci> x </ci>\n" +
" <cn type=\"integer\"> 0 </cn>\n" +
" </apply>\n" +
" </piece>\n" +
" </piecewise>\n")
f = "piecewise(-x, lt(x, 0), 0, eq(x, 0), x, gt(x, 0))";
self.N = libsbml.parseFormula(f)
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_piecewise_otherwise(self):
expected = wrapMathML(" <piecewise>\n" +
" <piece>\n" +
" <cn type=\"integer\"> 0 </cn>\n" +
" <apply>\n" +
" <lt/>\n" +
" <ci> x </ci>\n" +
" <cn type=\"integer\"> 0 </cn>\n" +
" </apply>\n" +
" </piece>\n" +
" <otherwise>\n" +
" <ci> x </ci>\n" +
" </otherwise>\n" +
" </piecewise>\n")
self.N = libsbml.parseFormula("piecewise(0, lt(x, 0), x)")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_plus_binary(self):
expected = wrapMathML(" <apply>\n" +
" <plus/>\n" +
" <cn type=\"integer\"> 1 </cn>\n" +
" <cn type=\"integer\"> 2 </cn>\n" +
" </apply>\n")
self.N = libsbml.parseFormula("1 + 2")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_plus_nary_1(self):
expected = wrapMathML(" <apply>\n" +
" <plus/>\n" +
" <cn type=\"integer\"> 1 </cn>\n" +
" <cn type=\"integer\"> 2 </cn>\n" +
" <cn type=\"integer\"> 3 </cn>\n" +
" </apply>\n")
self.N = libsbml.parseFormula("1 + 2 + 3")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_plus_nary_2(self):
expected = wrapMathML(" <apply>\n" +
" <plus/>\n" +
" <cn type=\"integer\"> 1 </cn>\n" +
" <cn type=\"integer\"> 2 </cn>\n" +
" <cn type=\"integer\"> 3 </cn>\n" +
" </apply>\n")
self.N = libsbml.parseFormula("(1 + 2) + 3")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_plus_nary_3(self):
expected = wrapMathML(" <apply>\n" +
" <plus/>\n" +
" <cn type=\"integer\"> 1 </cn>\n" +
" <cn type=\"integer\"> 2 </cn>\n" +
" <cn type=\"integer\"> 3 </cn>\n" +
" </apply>\n")
self.N = libsbml.parseFormula("1 + (2 + 3)")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_plus_nary_4(self):
expected = wrapMathML(" <apply>\n" +
" <plus/>\n" +
" <cn type=\"integer\"> 1 </cn>\n" +
" <cn type=\"integer\"> 2 </cn>\n" +
" <apply>\n" +
" <times/>\n" +
" <ci> x </ci>\n" +
" <ci> y </ci>\n" +
" <ci> z </ci>\n" +
" </apply>\n" +
" <cn type=\"integer\"> 3 </cn>\n" +
" </apply>\n")
self.N = libsbml.parseFormula("1 + 2 + x * y * z + 3")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_root(self):
expected = wrapMathML(" <apply>\n" +
" <root/>\n" +
" <degree>\n" +
" <cn type=\"integer\"> 3 </cn>\n" +
" </degree>\n" +
" <ci> x </ci>\n" +
" </apply>\n")
self.N = libsbml.parseFormula("root(3, x)")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_MathMLFormatter_sin(self):
expected = wrapMathML(" <apply>\n" +
" <sin/>\n" +
" <ci> x </ci>\n" +
" </apply>\n")
self.N = libsbml.parseFormula("sin(x)")
self.S = libsbml.writeMathMLToString(self.N)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestWriteMathML))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
TheCoSMoCompany/biopredyn
|
Prototype/src/libsbml-5.10.0/src/bindings/python/test/math/TestWriteMathML.py
|
Python
|
bsd-3-clause
| 18,887
|
[
"Avogadro",
"VisIt"
] |
2666ed5e04d209f1c147bd9f759d0597554d81bc53bd143d39ffb8669d0b3ed9
|
"""
.. module: security_monkey.auditors.custom.AnchoreAuditor
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Pritam D. Gautam <pritam.gautam@nuagedm.com> @nuagedm
"""
from security_monkey.auditor import Auditor
from security_monkey.watchers.custom.anchoreengine import AnchoreEngine
ANCHORE_VULN_TAG = '{vulnid} Vulnerability'
ANCHORE_VULN_NOTES = '{pkg} package has {sev} vulnerability [{vulnid}]. ' \
'Repository: {reponame}:{repotag}. ' \
'Visit {INFO} for more details on vulnerability. '
class AnchoreAuditor(Auditor):
index = AnchoreEngine.index
i_am_singular = AnchoreEngine.i_am_singular
i_am_plural = AnchoreEngine.i_am_plural
def __init__(self, accounts=None, debug=False):
super(AnchoreAuditor, self).__init__(accounts=accounts, debug=debug)
def check_vuln_status(self, item):
# Following is relationship between Vulnerability Severity Ratings & Score
# Severity | Base Score Range | ThreatAlert Score
# ----------+-----------------------+--------------------
# Low | 0.0-3.9 | 3
# Medium | 4.0-6.9 | 6
# High | 7.0-10.0 | 10
# Unknown | - | 0
#
score_mapping = {'Low':3,'Medium':6,'High':10}
if item.new_config:
for vuln in item.new_config['vulns']:
score = score_mapping.get(vuln['severity'],0)
tag = ANCHORE_VULN_TAG.format(vulnid=vuln.get('vuln_id'))
notes = ANCHORE_VULN_NOTES.format(pkg=item.new_config.get('pkg'), sev=vuln['severity'],
vulnid=vuln.get('vuln_id'),INFO=vuln.get('information'),
reponame=item.new_config.get('Reponame'),
repotag=item.new_config.get('RepoTag'))
self.add_issue(score, tag , item, notes=notes)
|
stackArmor/security_monkey
|
security_monkey/auditors/custom/anchore.py
|
Python
|
apache-2.0
| 2,035
|
[
"VisIt"
] |
62560ee9639d7e9dcd61f67140cee32d7d3b11b98c670019763a473ed362d8d8
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_import
short_description: import file on PAN-OS devices
description:
- Import file on PAN-OS device
notes:
- API reference documentation can be read from the C(/api/) directory of your appliance
- Certificate validation is enabled by default as of Ansible 2.6. This may break existing playbooks but should be disabled with caution.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
- requests
- requests_toolbelt
deprecated:
alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead.
removed_in: "2.12"
why: Consolidating code base.
options:
category:
description:
- Category of file uploaded. The default is software.
- See API > Import section of the API reference for category options.
default: software
file:
description:
- Location of the file to import into device.
url:
description:
- URL of the file that will be imported to device.
validate_certs:
description:
- If C(no), SSL certificates will not be validated. Disabling certificate validation is not recommended.
default: yes
type: bool
version_added: "2.6"
extends_documentation_fragment: panos
'''
EXAMPLES = '''
# import software image PanOS_vm-6.1.1 on 192.168.1.1
- name: import software image into PAN-OS
panos_import:
ip_address: 192.168.1.1
username: admin
password: admin
file: /tmp/PanOS_vm-6.1.1
category: software
'''
RETURN = '''
# Default return values
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import os.path
import xml.etree
import tempfile
import shutil
import os
try:
import pan.xapi
import requests
import requests_toolbelt
HAS_LIB = True
except ImportError:
HAS_LIB = False
def import_file(xapi, module, ip_address, file_, category):
xapi.keygen()
params = {
'type': 'import',
'category': category,
'key': xapi.api_key
}
filename = os.path.basename(file_)
mef = requests_toolbelt.MultipartEncoder(
fields={
'file': (filename, open(file_, 'rb'), 'application/octet-stream')
}
)
r = requests.post(
'https://' + ip_address + '/api/',
verify=module.params['validate_certs'],
params=params,
headers={'Content-Type': mef.content_type},
data=mef
)
# if something goes wrong just raise an exception
r.raise_for_status()
resp = xml.etree.ElementTree.fromstring(r.content)
if resp.attrib['status'] == 'error':
module.fail_json(msg=r.content)
return True, filename
def download_file(url):
r = requests.get(url, stream=True)
fo = tempfile.NamedTemporaryFile(prefix='ai', delete=False)
shutil.copyfileobj(r.raw, fo)
fo.close()
return fo.name
def delete_file(path):
os.remove(path)
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
category=dict(default='software'),
file=dict(),
url=dict(),
validate_certs=dict(type='bool', default=True),
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, required_one_of=[['file', 'url']])
if not HAS_LIB:
module.fail_json(msg='pan-python, requests, and requests_toolbelt are required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
file_ = module.params['file']
url = module.params['url']
category = module.params['category']
# we can get file from URL or local storage
if url is not None:
file_ = download_file(url)
try:
changed, filename = import_file(xapi, module, ip_address, file_, category)
except Exception as exc:
module.fail_json(msg=to_native(exc))
# cleanup and delete file if local
if url is not None:
delete_file(file_)
module.exit_json(changed=changed, filename=filename, msg="okey dokey")
if __name__ == '__main__':
main()
|
simonwydooghe/ansible
|
lib/ansible/modules/network/panos/_panos_import.py
|
Python
|
gpl-3.0
| 5,413
|
[
"Galaxy"
] |
a3594fb41aff7892c0c65d67dde6558f43887607b000dd236778d28e7f8abe48
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements the IRAFStarFinder class.
"""
import inspect
import warnings
from astropy.nddata import extract_array
from astropy.table import QTable
from astropy.utils import lazyproperty
import numpy as np
from .core import StarFinderBase, _StarFinderKernel
from ..utils._convolution import _filter_data
from ..utils._misc import _get_version_info
from ..utils._moments import _moments, _moments_central
from ..utils.exceptions import NoDetectionsWarning
__all__ = ['IRAFStarFinder']
class IRAFStarFinder(StarFinderBase):
"""
Detect stars in an image using IRAF's "starfind" algorithm.
`IRAFStarFinder` searches images for local density maxima that have
a peak amplitude greater than ``threshold`` above the local
background and have a PSF full-width at half-maximum similar to the
input ``fwhm``. The objects' centroid, roundness (ellipticity), and
sharpness are calculated using image moments.
Parameters
----------
threshold : float
The absolute image value above which to select sources.
fwhm : float
The full-width half-maximum (FWHM) of the 2D circular Gaussian
kernel in units of pixels.
minsep_fwhm : float, optional
The minimum separation for detected objects in units of
``fwhm``.
sigma_radius : float, optional
The truncation radius of the Gaussian kernel in units of sigma
(standard deviation) [``1 sigma = FWHM /
2.0*sqrt(2.0*log(2.0))``].
sharplo : float, optional
The lower bound on sharpness for object detection.
sharphi : float, optional
The upper bound on sharpness for object detection.
roundlo : float, optional
The lower bound on roundness for object detection.
roundhi : float, optional
The upper bound on roundness for object detection.
sky : float, optional
The background sky level of the image. Inputing a ``sky`` value
will override the background sky estimate. Setting ``sky``
affects only the output values of the object ``peak``, ``flux``,
and ``mag`` values. The default is ``None``, which means the
sky value will be estimated using the `starfind`_ method.
exclude_border : bool, optional
Set to `True` to exclude sources found within half the size of
the convolution kernel from the image borders. The default is
`False`, which is the mode used by `starfind`_.
brightest : int, None, optional
Number of brightest objects to keep after sorting the full object list.
If ``brightest`` is set to `None`, all objects will be selected.
peakmax : float, None, optional
Maximum peak pixel value in an object. Only objects whose peak pixel
values are *strictly smaller* than ``peakmax`` will be selected.
This may be used to exclude saturated sources. By default, when
``peakmax`` is set to `None`, all objects will be selected.
.. warning::
`IRAFStarFinder` automatically excludes objects whose peak
pixel values are negative. Therefore, setting ``peakmax`` to a
non-positive value would result in exclusion of all objects.
xycoords : `None` or Nx2 `~numpy.ndarray`
The (x, y) pixel coordinates of the approximate centroid
positions of identified sources. If ``xycoords`` are input, the
algorithm will skip the source-finding step.
Notes
-----
For the convolution step, this routine sets pixels beyond the image
borders to 0.0. The equivalent parameters in IRAF's `starfind`_ are
``boundary='constant'`` and ``constant=0.0``.
IRAF's `starfind`_ uses ``hwhmpsf``, ``fradius``, and ``sepmin`` as
input parameters. The equivalent input values for
`IRAFStarFinder` are:
* ``fwhm = hwhmpsf * 2``
* ``sigma_radius = fradius * sqrt(2.0*log(2.0))``
* ``minsep_fwhm = 0.5 * sepmin``
The main differences between `~photutils.detection.DAOStarFinder`
and `~photutils.detection.IRAFStarFinder` are:
* `~photutils.detection.IRAFStarFinder` always uses a 2D
circular Gaussian kernel, while
`~photutils.detection.DAOStarFinder` can use an elliptical
Gaussian kernel.
* `~photutils.detection.IRAFStarFinder` calculates the objects'
centroid, roundness, and sharpness using image moments.
See Also
--------
DAOStarFinder
References
----------
.. [1] https://iraf.net/irafhelp.php?val=starfind
.. _starfind: https://iraf.net/irafhelp.php?val=starfind
"""
def __init__(self, threshold, fwhm, sigma_radius=1.5, minsep_fwhm=2.5,
sharplo=0.5, sharphi=2.0, roundlo=0.0, roundhi=0.2, sky=None,
exclude_border=False, brightest=None, peakmax=None,
xycoords=None):
if not np.isscalar(threshold):
raise TypeError('threshold must be a scalar value.')
if not np.isscalar(fwhm):
raise TypeError('fwhm must be a scalar value.')
self.threshold = threshold
self.fwhm = fwhm
self.sigma_radius = sigma_radius
self.minsep_fwhm = minsep_fwhm
self.sharplo = sharplo
self.sharphi = sharphi
self.roundlo = roundlo
self.roundhi = roundhi
self.sky = sky
self.exclude_border = exclude_border
self.brightest = self._validate_brightest(brightest)
self.peakmax = peakmax
if xycoords is not None:
xycoords = np.asarray(xycoords)
if xycoords.ndim != 2 or xycoords.shape[1] != 2:
raise ValueError('xycoords must be shaped as a Nx2 array')
self.xycoords = xycoords
self.kernel = _StarFinderKernel(self.fwhm, ratio=1.0, theta=0.0,
sigma_radius=self.sigma_radius)
self.min_separation = max(2, int((self.fwhm * self.minsep_fwhm) + 0.5))
@staticmethod
def _validate_brightest(brightest):
if brightest is not None:
if brightest <= 0:
raise ValueError('brightest must be >= 0')
bright_int = int(brightest)
if bright_int != brightest:
raise ValueError('brightest must be an integer')
brightest = bright_int
return brightest
def _get_raw_catalog(self, data, mask=None):
convolved_data = _filter_data(data, self.kernel.data, mode='constant',
fill_value=0.0,
check_normalization=False)
if self.xycoords is None:
xypos = self._find_stars(convolved_data, self.kernel,
self.threshold,
min_separation=self.min_separation,
mask=mask,
exclude_border=self.exclude_border)
else:
xypos = self.xycoords
if xypos is None:
warnings.warn('No sources were found.', NoDetectionsWarning)
return None
cat = _IRAFStarFinderCatalog(data, convolved_data, xypos, self.kernel,
sky=self.sky, sharplo=self.sharplo,
sharphi=self.sharphi,
roundlo=self.roundlo,
roundhi=self.roundhi,
brightest=self.brightest,
peakmax=self.peakmax)
return cat
def find_stars(self, data, mask=None):
"""
Find stars in an astronomical image.
Parameters
----------
data : 2D array_like
The 2D image array.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data``
is masked. Masked pixels are ignored when searching for
stars.
Returns
-------
table : `~astropy.table.QTable` or `None`
A table of found objects with the following parameters:
* ``id``: unique object identification number.
* ``xcentroid, ycentroid``: object centroid.
* ``fwhm``: object FWHM.
* ``sharpness``: object sharpness.
* ``roundness``: object roundness.
* ``pa``: object position angle (degrees counter clockwise from
the positive x axis).
* ``npix``: the total number of (positive) unmasked pixels.
* ``sky``: the local ``sky`` value.
* ``peak``: the peak, sky-subtracted, pixel value of the object.
* ``flux``: the object instrumental flux.
* ``mag``: the object instrumental magnitude calculated as
``-2.5 * log10(flux)``.
`None` is returned if no stars are found.
"""
cat = self._get_raw_catalog(data, mask=mask)
if cat is None:
return None
# apply all selection filters
cat = cat.apply_all_filters()
if cat is None:
return None
# create the output table
return cat.to_table()
class _IRAFStarFinderCatalog:
"""
Class to create a catalog of the properties of each detected star,
as defined by IRAF's ``starfind`` task.
Parameters
----------
data : 2D `~numpy.ndarray`
The 2D image.
convolved_data : 2D `~numpy.ndarray`
The convolved 2D image.
xypos: Nx2 `numpy.ndarray`
A Nx2 array of (x, y) pixel coordinates denoting the central
positions of the stars.
kernel : `_StarFinderKernel`
The convolution kernel. This kernel must match the kernel used
to create the ``convolved_data``.
sky : `None` or float, optional
The local sky level around the source. If sky is ``None``, then
a local sky level will be (crudely) estimated using the IRAF
``starfind`` calculation.
"""
def __init__(self, data, convolved_data, xypos, kernel, sky=None,
sharplo=0.2, sharphi=1.0, roundlo=-1.0, roundhi=1.0,
brightest=None, peakmax=None):
self.data = data
self.convolved_data = convolved_data
self.xypos = xypos
self.kernel = kernel
self._sky = sky
self.sharplo = sharplo
self.sharphi = sharphi
self.roundlo = roundlo
self.roundhi = roundhi
self.brightest = brightest
self.peakmax = peakmax
self.id = np.arange(len(self)) + 1
self.cutout_shape = kernel.shape
self.default_columns = ('id', 'xcentroid', 'ycentroid', 'fwhm',
'sharpness', 'roundness', 'pa', 'npix',
'sky', 'peak', 'flux', 'mag')
def __len__(self):
return len(self.xypos)
def __getitem__(self, index):
newcls = object.__new__(self.__class__)
init_attr = ('data', 'convolved_data', 'kernel', '_sky', 'sharplo',
'sharphi', 'roundlo', 'roundhi', 'brightest', 'peakmax',
'cutout_shape', 'default_columns')
for attr in init_attr:
setattr(newcls, attr, getattr(self, attr))
# xypos determines ordering and isscalar
# NOTE: always keep as a 2D array, even for a single source
attr = 'xypos'
value = getattr(self, attr)[index]
setattr(newcls, attr, np.atleast_2d(value))
keys = set(self.__dict__.keys()) & set(self._lazyproperties)
keys.add('id')
for key in keys:
value = self.__dict__[key]
# do not insert lazy attributes that are always scalar (e.g.,
# isscalar), i.e., not an array/list for each source
if np.isscalar(value):
continue
# value is always at least a 1D array, even for a single source
value = np.atleast_1d(value[index])
newcls.__dict__[key] = value
return newcls
@lazyproperty
def isscalar(self):
"""
Whether the instance is scalar (e.g., a single source).
"""
return self.xypos.shape == (1, 2)
@property
def _lazyproperties(self):
"""
Return all lazyproperties (even in superclasses).
"""
def islazyproperty(obj):
return isinstance(obj, lazyproperty)
return [i[0] for i in inspect.getmembers(self.__class__,
predicate=islazyproperty)]
def reset_ids(self):
"""Reset the ID column to be consecutive integers."""
self.id = np.arange(len(self)) + 1
@lazyproperty
def sky(self):
if self._sky is None:
skymask = ~self.kernel.mask.astype(bool) # 1=sky, 0=obj
nsky = np.count_nonzero(skymask)
axis = (1, 2)
if nsky == 0.:
sky = (np.max(self.cutout_data_nosub, axis=axis)
- np.max(self.cutout_convdata, axis=axis))
else:
sky = (np.sum(self.cutout_data_nosub * skymask, axis=axis)
/ nsky)
else:
sky = np.full(len(self), fill_value=self._sky)
return sky
def make_cutouts(self, data):
cutouts = []
for xpos, ypos in self.xypos:
cutouts.append(extract_array(data, self.cutout_shape, (ypos, xpos),
fill_value=0.0))
return np.array(cutouts)
@lazyproperty
def cutout_data_nosub(self):
return self.make_cutouts(self.data)
@lazyproperty
def cutout_data(self):
data = ((self.cutout_data_nosub - self.sky[:, np.newaxis, np.newaxis])
* self.kernel.mask)
# IRAF starfind discards negative pixels
data[data < 0] = 0.0
return data
@lazyproperty
def cutout_convdata(self):
return self.make_cutouts(self.convolved_data)
@lazyproperty
def npix(self):
return np.count_nonzero(self.cutout_data, axis=(1, 2))
@lazyproperty
def moments(self):
return np.array([_moments(arr, order=1) for arr in self.cutout_data])
@lazyproperty
def cutout_centroid(self):
moments = self.moments
# ignore divide-by-zero RuntimeWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
ycentroid = moments[:, 1, 0] / moments[:, 0, 0]
xcentroid = moments[:, 0, 1] / moments[:, 0, 0]
return np.transpose((ycentroid, xcentroid))
@lazyproperty
def cutout_xcentroid(self):
return np.transpose(self.cutout_centroid)[1]
@lazyproperty
def cutout_ycentroid(self):
return np.transpose(self.cutout_centroid)[0]
@lazyproperty
def cutout_xorigin(self):
return np.transpose(self.xypos)[0] - self.kernel.xradius
@lazyproperty
def cutout_yorigin(self):
return np.transpose(self.xypos)[1] - self.kernel.yradius
@lazyproperty
def xcentroid(self):
return self.cutout_xcentroid + self.cutout_xorigin
@lazyproperty
def ycentroid(self):
return self.cutout_ycentroid + self.cutout_yorigin
@lazyproperty
def peak(self):
return np.array([np.max(arr) for arr in self.cutout_data])
@lazyproperty
def flux(self):
return np.array([np.sum(arr) for arr in self.cutout_data])
@lazyproperty
def mag(self):
return -2.5 * np.log10(self.flux)
@lazyproperty
def moments_central(self):
moments = np.array([_moments_central(arr, center=(xcen_, ycen_),
order=2)
for arr, xcen_, ycen_ in
zip(self.cutout_data, self.cutout_xcentroid,
self.cutout_ycentroid)])
return moments / self.moments[:, 0, 0][:, np.newaxis, np.newaxis]
@lazyproperty
def mu_sum(self):
return self.moments_central[:, 0, 2] + self.moments_central[:, 2, 0]
@lazyproperty
def mu_diff(self):
return self.moments_central[:, 0, 2] - self.moments_central[:, 2, 0]
@lazyproperty
def fwhm(self):
return 2.0 * np.sqrt(np.log(2.0) * self.mu_sum)
@lazyproperty
def roundness(self):
return np.sqrt(self.mu_diff**2
+ 4.0 * self.moments_central[:, 1, 1]**2) / self.mu_sum
@lazyproperty
def sharpness(self):
return self.fwhm / self.kernel.fwhm
@lazyproperty
def pa(self):
pa = np.rad2deg(0.5 * np.arctan2(2.0 * self.moments_central[:, 1, 1],
self.mu_diff))
pa = np.where(pa < 0, pa + 180, pa)
return pa
def apply_filters(self):
"""Filter the catalog."""
mask = np.count_nonzero(self.cutout_data, axis=(1, 2)) > 1
mask &= ((self.sharpness > self.sharplo)
& (self.sharpness < self.sharphi)
& (self.roundness > self.roundlo)
& (self.roundness < self.roundhi))
if self.peakmax is not None:
mask &= (self.peak < self.peakmax)
newcat = self[mask]
if len(newcat) == 0:
warnings.warn('Sources were found, but none pass the sharpness, '
'roundness, or peakmax criteria',
NoDetectionsWarning)
return None
return newcat
def select_brightest(self):
"""
Sort the catalog by the brightest fluxes and select the
top brightest sources.
"""
newcat = self
if self.brightest is not None:
idx = np.argsort(self.flux)[::-1][:self.brightest]
newcat = self[idx]
return newcat
def apply_all_filters(self):
"""
Apply all filters, select the brightest, and reset the source
ids.
"""
cat = self.apply_filters()
if cat is None:
return None
cat = cat.select_brightest()
cat.reset_ids()
return cat
def to_table(self, columns=None):
meta = {'version': _get_version_info()}
table = QTable(meta=meta)
if columns is None:
columns = self.default_columns
for column in columns:
table[column] = getattr(self, column)
return table
|
astropy/photutils
|
photutils/detection/irafstarfinder.py
|
Python
|
bsd-3-clause
| 18,591
|
[
"Gaussian"
] |
c2a9fd53dc19c41d4d6c7f2867d680b73f85de3643c2f8bf036bc77961e824cd
|
import tensorflow as tf # neural network for function approximation
import gym # environment
import numpy as np # matrix operation and math functions
from gym import wrappers
import gym_morph # customized environment for cart-pole
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
start_time = time.time()
MAX_TEST = 10
for test_num in range(1, MAX_TEST+1):
# Hyperparameters
RANDOM_NUMBER_SEED = test_num
ENVIRONMENT1 = "morph-v0"
MAX_EPISODES = 20000 # number of episodes
EPISODE_LENGTH = 4000 # single episode length
HIDDEN_SIZE = 16
DISPLAY_WEIGHTS = False # Help debug weight update
gamma = 0.90 # Discount per step
RENDER = False # Render the cart-pole system
VIDEO_INTERVAL = 100 # Generate a video at this interval
CONSECUTIVE_TARGET = 100 # Including previous 100 rewards
CONST_LR = True # Constant or decaying learing rate
# Constant learning rate
const_learning_rate_in = 0.003
# Decay learning rate
start_learning_rate_in = 0.003
decay_steps_in = 100
decay_rate_in = 0.96
DIR_PATH_SAVEFIG = "/root/cartpole_plot/"
if CONST_LR:
learning_rate = const_learning_rate_in
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_clr" + str(learning_rate).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
else:
start_learning_rate = start_learning_rate_in
decay_steps = decay_steps_in
decay_rate = decay_rate_in
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_dlr_slr" + str(start_learning_rate).replace(".", "p") \
+ "_ds" + str(decay_steps) \
+ "_dr" + str(decay_rate).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
env = gym.make(ENVIRONMENT1)
env.seed(RANDOM_NUMBER_SEED)
np.random.seed(RANDOM_NUMBER_SEED)
tf.set_random_seed(RANDOM_NUMBER_SEED)
# Input and output sizes
input_size = 4
output_size = 2
# input_size = env.observation_space.shape[0]
# try:
# output_size = env.action_space.shape[0]
# except AttributeError:
# output_size = env.action_space.n
# Tensorflow network setup
x = tf.placeholder(tf.float32, shape=(None, input_size))
y = tf.placeholder(tf.float32, shape=(None, 1))
if not CONST_LR:
# decay learning rate
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(start_learning_rate, global_step, decay_steps, decay_rate, staircase=False)
expected_returns = tf.placeholder(tf.float32, shape=(None, 1))
# Xavier (2010) weights initializer for uniform distribution:
# x = sqrt(6. / (in + out)); [-x, x]
w_init = tf.contrib.layers.xavier_initializer()
hidden_W = tf.get_variable("W1", shape=[input_size, HIDDEN_SIZE],
initializer=w_init)
hidden_B = tf.Variable(tf.zeros(HIDDEN_SIZE))
dist_W = tf.get_variable("W2", shape=[HIDDEN_SIZE, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
hidden = tf.nn.elu(tf.matmul(x, hidden_W) + hidden_B)
dist = tf.tanh(tf.matmul(hidden, dist_W) + dist_B)
dist_soft = tf.nn.log_softmax(dist)
dist_in = tf.matmul(dist_soft, tf.Variable([[1.], [0.]]))
pi = tf.contrib.distributions.Bernoulli(dist_in)
pi_sample = pi.sample()
log_pi = pi.log_prob(y)
if CONST_LR:
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train = optimizer.minimize(-1.0 * expected_returns * log_pi)
else:
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train = optimizer.minimize(-1.0 * expected_returns * log_pi, global_step=global_step)
# saver = tf.train.Saver()
# Create and initialize a session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
def run_episode(environment, ep, render=False):
raw_reward = 0
discounted_reward = 0
cumulative_reward = []
discount = 1.0
states = []
actions = []
obs = environment.reset()
done = False
while not done:
states.append(obs)
cumulative_reward.append(discounted_reward)
if render and ((ep % VIDEO_INTERVAL) == 0):
environment.render()
action = sess.run(pi_sample, feed_dict={x: [obs]})[0]
actions.append(action)
obs, reward, done, info = env.step(action[0])
raw_reward += reward
if reward > 0:
discounted_reward += reward * discount
else:
discounted_reward += reward
discount *= gamma
return raw_reward, discounted_reward, cumulative_reward, states, actions
def display_weights(session):
w1 = session.run(hidden_W)
b1 = session.run(hidden_B)
w2 = session.run(dist_W)
b2 = session.run(dist_B)
print(w1, b1, w2, b2)
returns = []
mean_returns = []
for ep in range(MAX_EPISODES):
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, ep, RENDER)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
returns.append(raw_G)
running_returns = returns[max(0, ep-CONSECUTIVE_TARGET):(ep+1)]
mean_return = np.mean(running_returns)
mean_returns.append(mean_return)
if CONST_LR:
msg = "Test: {}/{}, Episode: {}/{}, Time: {}, Learning rate: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(test_num, MAX_TEST, ep+1, MAX_EPISODES, time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time)), learning_rate, raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
else:
msg = "Test: {}/{}, Episode: {}/{}, Time: {}, Learning rate: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(test_num, MAX_TEST, ep+1, MAX_EPISODES, time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time)), sess.run(learning_rate), raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
env.close() # close openai gym environment
tf.reset_default_graph() # clear tensorflow graph
# Plot
# plt.style.use('ggplot')
plt.style.use('dark_background')
episodes_plot = np.arange(MAX_EPISODES)
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.85)
if CONST_LR:
ax.set_title("The Cart-Pole Problem Test %i \n \
Episode Length: %i \
Discount Factor: %.2f \n \
Number of Hidden Neuron: %i \
Constant Learning Rate: %.5f" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, learning_rate))
else:
ax.set_title("The Cart-Pole Problem Test %i \n \
EpisodeLength: %i DiscountFactor: %.2f NumHiddenNeuron: %i \n \
Decay Learning Rate: (start: %.5f, steps: %i, rate: %.2f)" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, start_learning_rate, decay_steps, decay_rate))
ax.set_xlabel("Episode")
ax.set_ylabel("Return")
ax.set_ylim((0, EPISODE_LENGTH))
ax.grid(linestyle='--')
ax.plot(episodes_plot, returns, label='Instant return')
ax.plot(episodes_plot, mean_returns, label='Averaged return')
legend = ax.legend(loc='best', shadow=True)
fig.savefig(DIR_PATH_SAVEFIG + file_name_savefig, dpi=500)
# plt.show()
|
GitYiheng/reinforcement_learning_test
|
test03_monte_carlo/t42_change_gamma.py
|
Python
|
mit
| 7,663
|
[
"NEURON"
] |
a6768668e6035f2e4c617127f2cc967fc31af3d8b67d1ec4189944f0c177273b
|
import pymatgen
from pymatgen.core.composition import Composition
from pymatgen.matproj.rest import MPRester
from pymatgen.phasediagram.maker import PhaseDiagram
from pymatgen.phasediagram.maker import CompoundPhaseDiagram
from pymatgen.phasediagram.entries import PDEntry
from pymatgen.phasediagram.entries import TransformedPDEntry
from pymatgen.phasediagram.analyzer import PDAnalyzer
from pymatgen.phasediagram.plotter import PDPlotter
from pymatgen.core.periodic_table import DummySpecie, Element
import collections
from pandas import DataFrame
import pandas as pd
import string
pd.set_option('display.width', 400)
#reading names and energies from file - change file names to change system
file_names = open("data/nam/bicucl.dat","r")
file_energies_lda = open("data/lda/bicucl.dat","r")
file_energies_pbe = open("data/pbe/bicucl.dat","r")
#creating array with compound names
fname = file_names.read()
names = fname.splitlines()
# creating array with energies from lda
enlda = []
for y in file_energies_lda.read().splitlines():
enlda.append(float(y))
# creating array with energies from pbe
enpbe = []
for y in file_energies_pbe.read().splitlines():
enpbe.append(float(y))
term_comp = [Composition(names[7]),Composition(names[11]),Composition(names[32])]
# creating entries from for the LDA phase diagram
entries_lda = []
for i in range (len(names)):
# entries_lda.append(PDEntry(names[i],enlda[i], names[i], "LDA"))
entries_lda.append(PDEntry(names[i],enlda[i], " ", "LDA"))
# creating the phase diagram for LDA
pd_lda = PhaseDiagram(entries_lda)
cpd_lda = CompoundPhaseDiagram(entries_lda,term_comp, normalize_terminal_compositions=True)
a_lda = PDAnalyzer(pd_lda)
# creating entries from for the PBE phase diagram
entries_pbe = []
for i in range (len(names)):
# entries_pbe.append(PDEntry(names[i],enpbe[i], names[i], "PBE"))
entries_pbe.append(PDEntry(names[i],enpbe[i], " ", "PBE"))
# creating the phase diagram for PBE
pd_pbe = PhaseDiagram(entries_pbe)
cpd_pbe = CompoundPhaseDiagram(entries_pbe,term_comp, normalize_terminal_compositions=True)
a_pbe = PDAnalyzer(pd_pbe)
# # visualize quaternary phase diagrams
# plotter_lda = PDPlotter(pd_lda,show_unstable = 200)
# plotter_lda.show()
# plotter_pbe = PDPlotter(pd_pbe,show_unstable = 200)
# plotter_pbe.show()
#
# printing results of the phase diagram
data_lda = collections.defaultdict(list)
data_pbe = collections.defaultdict(list)
print("LDA Phase diagram")
for e in entries_lda:
decomp, ehull = a_lda.get_decomp_and_e_above_hull(e)
formen = pd_lda.get_form_energy_per_atom(e )
data_lda["Composition"].append(e.composition.reduced_formula)
data_lda["Ehull (meV/atom)"].append(ehull*13.605698066*1000)
data_lda["Decomposition"].append(" + ".join(["%.2f %s" % (v,k.composition.formula) for k, v in decomp.items()]))
data_lda["Formation Energy (eV/atom)"].append(formen*13.605698066)
df1 = DataFrame(data_lda,columns=["Composition", "Ehull (meV/atom)", "Formation Energy (eV/atom)", "Decomposition"])
print(df1)
print(" ")
print(" ")
print("PBE Phase diagram")
for e in entries_pbe:
decomp, ehull = a_pbe.get_decomp_and_e_above_hull(e)
formen = pd_pbe.get_form_energy_per_atom(e )
data_pbe["Composition"].append(e.composition.reduced_formula)
data_pbe["Ehull (meV/atom)"].append(ehull*13.605698066*1000)
data_pbe["Decomposition"].append(" + ".join(["%.2f %s" % (v,k.composition.formula) for k, v in decomp.items()]))
data_pbe["Formation Energy (eV/atom)"].append(formen*13.605698066)
df3 = DataFrame(data_pbe,columns=["Composition", "Ehull (meV/atom)", "Formation Energy (eV/atom)", "Decomposition"])
print(df3)
# # # visualize pseudo-ternary phase diagrams
# plotter_lda = PDPlotter(cpd_lda,show_unstable = 200)
# plot_lda = plotter_lda.get_contour_pd_plot()
# plot_lda.show()
#
# plotter_pbe = PDPlotter(cpd_pbe,show_unstable = 200)
# plot_pbe = plotter_pbe.get_contour_pd_plot()
# plot_pbe.show()
|
mmdg-oxford/papers
|
Filip-JPCC-2018/Scripts/quatphase.py
|
Python
|
gpl-3.0
| 4,028
|
[
"pymatgen"
] |
5f171954b607bdf8a3d799bbc9e672b62a78789846f76cffb35612aa739d67c9
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import unittest
from PyQt5.QtCore import Qt, QMimeData
from PyQt5.QtWidgets import QMenu, QApplication, QMessageBox
from PyQt5.QtGui import QDragEnterEvent, QDropEvent
from peacock.Input.ExecutableInfo import ExecutableInfo
from peacock.Input.InputTree import InputTree
from peacock.utils import Testing
from peacock.Input.BlockTree import BlockTree
from mock import patch
class Tests(Testing.PeacockTester):
qapp = QApplication([])
def setUp(self):
super(Tests, self).setUp()
self.test_exe = Testing.find_moose_test_exe()
self.test_input_file = "../../common/fsp_test.i"
self.changed_count = 0
self.block_selected_count = 0
self.block_clicked_count = 0
self.block_doubleclicked_count = 0
self.last_block = None
def newWidget(self, modules=False):
app_info = ExecutableInfo()
if modules:
app_info.setPath(Testing.find_moose_test_exe(dirname="modules/combined", exe_base="combined"))
else:
app_info.setPath(Testing.find_moose_test_exe())
tree = InputTree(app_info)
tree.setInputFile(self.test_input_file)
w = BlockTree(tree)
w.changed.connect(self.changed)
w.blockSelected.connect(self.blockSelected)
w.blockClicked.connect(self.blockClicked)
w.blockDoubleClicked.connect(self.blockDoubleClicked)
#w.show()
#w.raise_()
return w
def changed(self, block):
self.changed_count += 1
self.last_block = block
def blockSelected(self, block):
self.block_selected_count += 1
self.last_block = block
def blockClicked(self, block):
self.block_clicked_count += 1
self.last_block = block
def blockDoubleClicked(self, block):
self.block_doubleclicked_count += 1
self.last_block = block
def checkInputItem(self, item, name, num_children, checked):
self.assertEqual(item.text(0), name)
node = self.widget.getNode(item)
self.assertEqual(node.name, name)
self.assertEqual(item.checkState(0) == Qt.Checked, checked)
self.assertEqual(len(node.children), num_children)
self.assertEqual(item.childCount(), num_children)
def numChildrenChecked(self, item):
count = 0
for i in range(item.childCount()):
if item.child(i).checkState(0) == Qt.Checked:
count += 1
return count
def testDumpTree(self):
# Just get some coverage
w = self.newWidget(modules=True)
s = w.dumpTreeToString()
self.assertIn("Preconditioning: True: True", s)
self.assertIn(" FSP: True: True", s)
self.assertIn(" u: False: True", s)
self.assertIn(" uv: False: True", s)
self.assertIn(" v: False: True", s)
def testChanges(self):
w = self.newWidget()
item = w._path_item_map.get("/Mesh")
self.assertNotEqual(item, None)
self.assertEqual(self.changed_count, 0)
b = w.tree.getBlockInfo("/Mesh")
self.assertEqual(b.included, True)
item.setCheckState(0, Qt.Unchecked)
self.assertEqual(self.changed_count, 1)
self.assertEqual(b.included, False)
self.assertEqual(self.last_block, b)
item.setCheckState(0, Qt.Checked)
self.assertEqual(self.changed_count, 2)
self.assertEqual(b.included, True)
self.assertEqual(self.block_clicked_count, 0)
w.onItemClicked(item, 0)
self.assertEqual(self.block_clicked_count, 1)
self.assertEqual(b, self.last_block)
self.assertEqual(self.block_doubleclicked_count, 0)
w.onItemDoubleClicked(item, 0)
self.assertEqual(self.block_doubleclicked_count, 1)
self.assertEqual(b, self.last_block)
@unittest.skip("Requires work for Python3")
def testMove(self):
w = self.newWidget()
item = w._path_item_map.get("/Variables/u")
#b = w.tree.getBlockInfo("/Variables/u")
w.scrollToItem(item)
point = w.visualItemRect(item).center()
item1 = w._path_item_map.get("/Variables/v")
#b1 = w.tree.getBlockInfo("/Variables/v")
w.scrollToItem(item1)
point1 = w.visualItemRect(item1).bottomLeft()
#idx = b.parent.children_list.index(b.name)
#idx1 = b.parent.children_list.index(b1.name)
w.setCurrentItem(item)
mime = QMimeData()
mime.setData(w._mime_type, "some data")
ee = QDragEnterEvent(w.mapToGlobal(point), Qt.MoveAction, mime, Qt.LeftButton, Qt.NoModifier)
w.dragEnterEvent(ee)
#Testing.process_events(t=1)
de = QDropEvent(w.mapToGlobal(point1), Qt.MoveAction, mime, Qt.LeftButton, Qt.NoModifier)
w.dropEvent(de)
# This doesn't seem to work for some reason
#self.assertEqual(idx1, b.parent.children_list.index(b.name))
#self.assertEqual(idx, b.parent.children_list.index(b1.name))
w.setCurrentItem(None)
self.assertEqual(w._current_drag, None)
w.dropEvent(de)
w.dragEnterEvent(ee)
self.assertEqual(w._current_drag, None)
w.setCurrentItem(item1)
w.dragEnterEvent(ee)
self.assertNotEqual(w._current_drag, None)
w.dropEvent(de)
def testRename(self):
w = self.newWidget()
b = w.tree.getBlockInfo("/Variables/u")
w.renameBlock(b, "foo")
self.assertEqual(b.path, "/Variables/foo")
self.assertEqual(b.name, "foo")
item = w._path_item_map.get("/Variables/foo")
self.assertNotEqual(item, None)
self.assertEqual(item.text(0), "foo")
b1 = w.tree.getBlockInfo("/Variables/u")
self.assertEqual(b1, None)
b1 = w.tree.getBlockInfo("/Variables/foo")
self.assertEqual(b1, b)
@patch.object(QMenu, "exec_")
@patch.object(QMessageBox, "question")
def testRemove(self, mock_question, mock_exec):
w = self.newWidget()
b = w.tree.getBlockInfo("/Variables/u")
parent = b.parent
self.assertEqual(len(parent.children_list), 2)
w.removeBlock(b)
self.assertEqual(b.parent, None)
self.assertNotIn(b.name, parent.children_list)
item = w._path_item_map.get("/Variables/u")
self.assertEqual(item, None)
self.assertEqual(len(parent.children_list), 1)
b = w.tree.getBlockInfo("/Variables/v")
mock_exec.return_value = w.remove_action
mock_question.return_value = QMessageBox.No
item = w._path_item_map.get(b.path)
self.assertNotEqual(item, None)
w.scrollToItem(item)
point = w.visualItemRect(item).center()
w._treeContextMenu(point)
self.assertEqual(len(parent.children_list), 1)
mock_question.return_value = QMessageBox.Yes
w._treeContextMenu(point)
self.assertEqual(len(parent.children_list), 0)
item = w._path_item_map.get("/Variables/v")
self.assertEqual(item, None)
@patch.object(QMenu, "exec_")
def testCopy(self, mock_exec):
w = self.newWidget()
v = w.tree.getBlockInfo("/Variables")
b = w.tree.getBlockInfo("/Variables/u")
self.assertEqual(len(b.children_list), 1)
nchilds = len(v.children_list)
self.assertEqual(self.changed_count, 0)
w.copyBlock(b)
self.assertEqual(nchilds+1, len(v.children_list))
self.assertEqual(self.changed_count, 1)
new_block = w.tree.getBlockInfo("/Variables/New_0")
self.assertEqual(len(new_block.children_list), 1)
self.assertEqual(["InitialCondition"], new_block.children_list)
item = w._path_item_map.get("/Variables/New_0/InitialCondition")
self.assertNotEqual(item, None)
# simulate them not choosing any of the menu options
mock_exec.return_value = None
nchilds = len(v.children_list)
item = w._path_item_map.get(b.path)
w.scrollToItem(item)
point = w.visualItemRect(item).center()
w._treeContextMenu(point)
self.assertEqual(nchilds, len(v.children_list))
self.assertEqual(self.changed_count, 1)
mock_exec.return_value = w.add_action
w._treeContextMenu(point)
# simulate choosing the add menu option
self.assertEqual(nchilds+1, len(v.children_list))
self.assertEqual(self.changed_count, 2)
# /Variables is a star so we can copy it
b = w.tree.getBlockInfo("/Variables")
nchilds = len(v.children_list)
w.copyBlock(b)
self.assertEqual(nchilds+1, len(v.children_list))
self.assertEqual(self.changed_count, 3)
p = "/Variables/%s" % v.children_list[-1]
new_block = w.tree.getBlockInfo(p)
self.assertEqual(len(new_block.children_list), 1)
self.assertEqual(["InitialCondition"], new_block.children_list)
item = w._path_item_map.get("%s/InitialCondition" % p)
self.assertNotEqual(item, None)
mock_exec.return_value = None
nchilds = len(v.children_list)
item = w._path_item_map.get(v.path)
w.scrollToItem(item)
point = w.visualItemRect(item).center()
w._treeContextMenu(point)
self.assertEqual(nchilds, len(v.children_list))
self.assertEqual(self.changed_count, 3)
mock_exec.return_value = w.add_action
w._treeContextMenu(point)
self.assertEqual(nchilds+1, len(v.children_list))
self.assertEqual(self.changed_count, 4)
w.setCurrentItem(item)
w._newBlockShortcut()
self.assertEqual(nchilds+2, len(v.children_list))
self.assertEqual(self.changed_count, 5)
# /Executioner is not a star so we shouldn't be able to copy it
b = w.tree.getBlockInfo("/Executioner")
nchilds = len(b.children_list)
w.copyBlock(b)
self.assertEqual(nchilds, len(b.children_list))
item = w._path_item_map.get(b.path)
w.scrollToItem(item)
point = w.visualItemRect(item).center()
w._treeContextMenu(point)
if __name__ == '__main__':
Testing.run_tests()
|
nuclear-wizard/moose
|
python/peacock/tests/input_tab/BlockTree/test_BlockTree.py
|
Python
|
lgpl-2.1
| 10,469
|
[
"MOOSE"
] |
e751a29f4fd1780e1cd35a54eb7fa7461e57f9d70fe494109775f7c7a3f98d9c
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import map, range, zip
from six import string_types
from itertools import cycle
import warnings
import numpy as np
from matplotlib import use
use('Agg', warn=False)
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Polygon, Rectangle
def boxplots(distributions, x_values=None, x_tick_labels=None, title=None,
x_label=None, y_label=None, x_tick_labels_orientation='vertical',
y_min=None, y_max=None, whisker_length=1.5, box_width=0.5,
box_colors=None, figure_width=None, figure_height=None,
legend=None):
"""Generate a figure with a boxplot for each distribution.
Parameters
----------
distributions: 2-D array_like
Distributions to plot. A boxplot will be created for each distribution.
x_values : list of numbers, optional
List indicating where each boxplot should be placed. Must be the same
length as `distributions` if provided.
x_tick_labels : list of str, optional
List of x-axis tick labels.
title : str, optional
Title of the plot.
x_label : str, optional
x-axis label.
y_label : str, optional
y-axis label.
x_tick_labels_orientation : {'vertical', 'horizontal'}
Orientation of the x-axis labels.
y_min : scalar, optional
Minimum value of the y-axis. If ``None``, uses matplotlib's autoscale.
y_max : scalar, optional
Maximum value of the y-axis. If ``None``, uses matplotlib's autoscale.
whisker_length : scalar, optional
Length of the whiskers as a function of the IQR. For example, if 1.5,
the whiskers extend to ``1.5 * IQR``. Anything outside of that range is
treated as an outlier.
box_width : scalar, optional
Width of each box in plot units.
box_colors : str, tuple, or list of colors, optional
Either a matplotlib-compatible string or tuple that indicates the color
to be used for every boxplot, or a list of colors to color each boxplot
individually. If ``None``, boxes will be the same color as the plot
background. If a list of colors is provided, a color must be provided
for each boxplot. Can also supply ``None`` instead of a color, which
will color the box the same color as the plot background.
figure_width : scalar, optional
Width of the plot figure in inches. If not provided, will default to
matplotlib's default figure width.
figure_height : scalar, optional
Height of the plot figure in inches. If not provided, will default to
matplotlib's default figure height.
legend : tuple or list, optional
Two-element tuple or list that contains a list of valid matplotlib
colors as the first element and a list of labels (strings) as the
second element. The lengths of the first and second elements must be
the same. If ``None``, a legend will not be plotted.
Returns
-------
matplotlib.figure.Figure
Figure containing a boxplot for each distribution.
See Also
--------
matplotlib.pyplot.boxplot
scipy.stats.ttest_ind
Notes
-----
This is a convenience wrapper around matplotlib's ``boxplot`` function that
allows for coloring of boxplots and legend generation.
Examples
--------
Create a plot with two boxplots:
.. plot::
>>> from skbio.draw import boxplots
>>> fig = boxplots([[2, 2, 1, 3, 4, 4.2, 7], [0, -1, 4, 5, 6, 7]])
Plot three distributions with custom colors and labels:
.. plot::
>>> from skbio.draw import boxplots
>>> fig = boxplots(
... [[2, 2, 1, 3], [0, -1, 0, 0.1, 0.3], [4, 5, 6, 3]],
... x_tick_labels=('Control', 'Treatment 1', 'Treatment 2'),
... box_colors=('green', 'blue', 'red'))
"""
distributions = _validate_distributions(distributions)
num_dists = len(distributions)
_validate_x_values(x_values, x_tick_labels, num_dists)
# Create a new figure to plot our data on, and then plot the distributions.
fig, ax = plt.subplots()
box_plot = plt.boxplot(distributions, positions=x_values,
whis=whisker_length, widths=box_width)
if box_colors is not None:
if _is_single_matplotlib_color(box_colors):
box_colors = [box_colors] * num_dists
_color_box_plot(ax, box_plot, box_colors)
# Set up the various plotting options, such as x- and y-axis labels, plot
# title, and x-axis values if they have been supplied.
_set_axes_options(ax, title, x_label, y_label,
x_tick_labels=x_tick_labels,
x_tick_labels_orientation=x_tick_labels_orientation,
y_min=y_min, y_max=y_max)
if legend is not None:
if len(legend) != 2:
raise ValueError("Invalid legend was provided. The legend must be "
"a two-element tuple/list where the first "
"element is a list of colors and the second "
"element is a list of labels.")
_create_legend(ax, legend[0], legend[1], 'colors')
_set_figure_size(fig, figure_width, figure_height)
return fig
def grouped_distributions(plot_type, data, x_values=None,
data_point_labels=None, distribution_labels=None,
distribution_markers=None, x_label=None,
y_label=None, title=None,
x_tick_labels_orientation='vertical', y_min=None,
y_max=None, whisker_length=1.5,
error_bar_type='stdv', distribution_width=None,
figure_width=None, figure_height=None):
"""Generate a figure with distributions grouped at points along the x-axis.
Parameters
----------
plot_type : {'bar', 'scatter', 'box'}
Type of plot to visualize distributions with.
data : list of lists of lists
Each inner list represents a data point along the x-axis. Each data
point contains lists of data for each distribution in the group at that
point. This nesting allows for the grouping of distributions at each
data point.
x_values : list of scalars, optional
Spacing of data points along the x-axis. Must be the same length as the
number of data points and be in ascending sorted order. If not
provided, plots will be spaced evenly.
data_point_labels : list of str, optional
Labels for data points.
distribution_labels : list of str, optional
Labels for each distribution in a data point grouping.
distribution_markers : list of str or list of tuple, optional
Matplotlib-compatible strings or tuples that indicate the color or
symbol to be used to distinguish each distribution in a data point
grouping. Colors will be used for bar charts or box plots, while
symbols will be used for scatter plots.
x_label : str, optional
x-axis label.
y_label : str, optional
y-axis label.
title : str, optional
Plot title.
x_tick_labels_orientation : {'vertical', 'horizontal'}
Orientation of x-axis labels.
y_min : scalar, optional
Minimum value of the y-axis. If ``None``, uses matplotlib's autoscale.
y_max : scalar, optional
Maximum value of the y-axis. If ``None``, uses matplotlib's autoscale.
whisker_length : scalar, optional
If `plot_type` is ``'box'``, determines the length of the whiskers as a
function of the IQR. For example, if 1.5, the whiskers extend to
``1.5 * IQR``. Anything outside of that range is seen as an outlier.
If `plot_type` is not ``'box'``, this parameter is ignored.
error_bar_type : {'stdv', 'sem'}
Type of error bars to use if `plot_type` is ``'bar'``. Can be either
``'stdv'`` (for standard deviation) or ``'sem'`` for the standard error
of the mean. If `plot_type` is not ``'bar'``, this parameter is
ignored.
distribution_width : scalar, optional
Width in plot units of each individual distribution (e.g. each bar if
the plot type is a bar chart, or the width of each box if the plot type
is a boxplot). If None, will be automatically determined.
figure_width : scalar, optional
Width of the plot figure in inches. If not provided, will default to
matplotlib's default figure width.
figure_height : scalar, optional
Height of the plot figure in inches. If not provided, will default to
matplotlib's default figure height.
Returns
-------
matplotlib.figure.Figure
Figure containing distributions grouped at points along the x-axis.
Examples
--------
Create a plot with two distributions grouped at three points:
.. plot::
>>> from skbio.draw import grouped_distributions
>>> fig = grouped_distributions('bar',
... [[[2, 2, 1,], [0, 1, 4]],
... [[1, 1, 1], [4, 4.5]],
... [[2.2, 2.4, 2.7, 1.0], [0, 0.2]]],
... distribution_labels=['Treatment 1',
... 'Treatment 2'])
"""
# Set up different behavior based on the plot type.
if plot_type == 'bar':
plotting_function = _plot_bar_data
distribution_centered = False
marker_type = 'colors'
elif plot_type == 'scatter':
plotting_function = _plot_scatter_data
distribution_centered = True
marker_type = 'symbols'
elif plot_type == 'box':
plotting_function = _plot_box_data
distribution_centered = True
marker_type = 'colors'
else:
raise ValueError("Invalid plot type '%s'. Supported plot types are "
"'bar', 'scatter', or 'box'." % plot_type)
num_points, num_distributions = _validate_input(data, x_values,
data_point_labels,
distribution_labels)
# Create a list of matplotlib markers (colors or symbols) that can be used
# to distinguish each of the distributions. If the user provided a list of
# markers, use it and loop around to the beginning if there aren't enough
# markers. If they didn't provide a list, or it was empty, use our own
# predefined list of markers (again, loop around to the beginning if we
# need more markers).
distribution_markers = _get_distribution_markers(marker_type,
distribution_markers,
num_distributions)
# Now calculate where each of the data points will start on the x-axis.
x_locations = _calc_data_point_locations(num_points, x_values)
assert (len(x_locations) == num_points), "The number of x_locations " +\
"does not match the number of data points."
if distribution_width is None:
# Find the smallest gap between consecutive data points and divide this
# by the number of distributions + 1 for some extra spacing between
# data points.
min_gap = max(x_locations)
for i in range(len(x_locations) - 1):
curr_gap = x_locations[i + 1] - x_locations[i]
if curr_gap < min_gap:
min_gap = curr_gap
distribution_width = min_gap / float(num_distributions + 1)
else:
if distribution_width <= 0:
raise ValueError("The width of a distribution cannot be less than "
"or equal to zero.")
result, plot_axes = plt.subplots()
# Iterate over each data point, and plot each of the distributions at that
# data point. Increase the offset after each distribution is plotted,
# so that the grouped distributions don't overlap.
for point, x_pos in zip(data, x_locations):
dist_offset = 0
for dist_index, dist, dist_marker in zip(range(num_distributions),
point, distribution_markers):
dist_location = x_pos + dist_offset
plotting_function(plot_axes, dist, dist_marker, distribution_width,
dist_location, whisker_length, error_bar_type)
dist_offset += distribution_width
# Set up various plot options that are best set after the plotting is done.
# The x-axis tick marks (one per data point) are centered on each group of
# distributions.
plot_axes.set_xticks(_calc_data_point_ticks(x_locations,
num_distributions,
distribution_width,
distribution_centered))
_set_axes_options(plot_axes, title, x_label, y_label, x_values,
data_point_labels, x_tick_labels_orientation, y_min,
y_max)
if distribution_labels is not None:
_create_legend(plot_axes, distribution_markers, distribution_labels,
marker_type)
_set_figure_size(result, figure_width, figure_height)
# matplotlib seems to sometimes plot points on the rightmost edge of the
# plot without adding padding, so we need to add our own to both sides of
# the plot. For some reason this has to go after the call to draw(),
# otherwise matplotlib throws an exception saying it doesn't have a
# renderer. Boxplots need extra padding on the left.
if plot_type == 'box':
left_pad = 2 * distribution_width
else:
left_pad = distribution_width
plot_axes.set_xlim(plot_axes.get_xlim()[0] - left_pad,
plot_axes.get_xlim()[1] + distribution_width)
return result
def _validate_distributions(distributions):
dists = []
for distribution in distributions:
try:
distribution = np.asarray(distribution, dtype=float)
except ValueError:
raise ValueError("Each value in each distribution must be "
"convertible to a number.")
# Empty distributions are plottable in mpl < 1.4.0. In 1.4.0, a
# ValueError is raised. This has been fixed in mpl 1.4.0-dev (see
# https://github.com/matplotlib/matplotlib/pull/3571). In order to
# support empty distributions across mpl versions, we replace them with
# [np.nan]. See https://github.com/pydata/pandas/issues/8382,
# https://github.com/matplotlib/matplotlib/pull/3571, and
# https://github.com/pydata/pandas/pull/8240 for details.
# If we decide to only support mpl > 1.4.0 in the future, this code can
# likely be removed in favor of letting mpl handle empty distributions.
if distribution.size > 0:
dists.append(distribution)
else:
dists.append(np.array([np.nan]))
return dists
def _validate_input(data, x_values, data_point_labels, distribution_labels):
"""Returns a tuple containing the number of data points and distributions
in the data.
Validates plotting options to make sure they are valid with the supplied
data.
"""
if data is None or not data or isinstance(data, string_types):
raise ValueError("The data must be a list type, and it cannot be "
"None or empty.")
num_points = len(data)
num_distributions = len(data[0])
empty_data_error_msg = ("The data must contain at least one data "
"point, and each data point must contain at "
"least one distribution to plot.")
if num_points == 0 or num_distributions == 0:
raise ValueError(empty_data_error_msg)
for point in data:
if len(point) == 0:
raise ValueError(empty_data_error_msg)
if len(point) != num_distributions:
raise ValueError("The number of distributions in each data point "
"grouping must be the same for all data points.")
# Make sure we have the right number of x values (one for each data point),
# and make sure they are numbers.
_validate_x_values(x_values, data_point_labels, num_points)
if (distribution_labels is not None and
len(distribution_labels) != num_distributions):
raise ValueError("The number of distribution labels must be equal "
"to the number of distributions.")
return num_points, num_distributions
def _validate_x_values(x_values, x_tick_labels, num_expected_values):
"""Validates the x values provided by the user, making sure they are the
correct length and are all numbers.
Also validates the number of x-axis tick labels.
Raises a ValueError if these conditions are not met.
"""
if x_values is not None:
if len(x_values) != num_expected_values:
raise ValueError("The number of x values must match the number "
"of data points.")
try:
list(map(float, x_values))
except:
raise ValueError("Each x value must be a number.")
if x_tick_labels is not None:
if len(x_tick_labels) != num_expected_values:
raise ValueError("The number of x-axis tick labels must match the "
"number of data points.")
def _get_distribution_markers(marker_type, marker_choices, num_markers):
"""Returns a list of length num_markers of valid matplotlib colors or
symbols.
The markers will be comprised of those found in marker_choices (if not None
and not empty) or a list of predefined markers (determined by marker_type,
which can be either 'colors' or 'symbols'). If there are not enough
markers, the list of markers will be reused from the beginning again (as
many times as are necessary).
"""
if num_markers < 0:
raise ValueError("num_markers must be greater than or equal to zero.")
if marker_choices is None or len(marker_choices) == 0:
if marker_type == 'colors':
marker_choices = ['b', 'g', 'r', 'c', 'm', 'y', 'w']
elif marker_type == 'symbols':
marker_choices = \
['s', 'o', '^', '>', 'v', '<', 'd', 'p', 'h', '8', '+', 'x']
else:
raise ValueError("Invalid marker_type: '%s'. marker_type must be "
"either 'colors' or 'symbols'." % marker_type)
if len(marker_choices) < num_markers:
# We don't have enough markers to represent each distribution uniquely,
# so let the user know. We'll add as many markers (starting from the
# beginning of the list again) until we have enough, but the user
# should still know because they may want to provide a new list of
# markers.
warnings.warn(
"There are not enough markers to uniquely represent each "
"distribution in your dataset. You may want to provide a list "
"of markers that is at least as large as the number of "
"distributions in your dataset.",
RuntimeWarning)
marker_cycle = cycle(marker_choices[:])
while len(marker_choices) < num_markers:
marker_choices.append(next(marker_cycle))
return marker_choices[:num_markers]
def _calc_data_point_locations(num_points, x_values=None):
"""Returns the x-axis location for each of the data points to start at.
Note: A numpy array is returned so that the overloaded "+" operator can be
used on the array.
The x-axis locations are scaled by x_values if it is provided, or else the
x-axis locations are evenly spaced. In either case, the x-axis locations
will always be in the range [1, num_points].
"""
if x_values is None:
# Evenly space the x-axis locations.
x_locs = np.arange(1, num_points + 1)
else:
if len(x_values) != num_points:
raise ValueError("The number of x-axis values must match the "
"number of data points.")
# Scale to the range [1, num_points]. Taken from
# http://www.heatonresearch.com/wiki/Range_Normalization
x_min = min(x_values)
x_max = max(x_values)
x_range = x_max - x_min
n_range = num_points - 1
x_locs = np.array([(((x_val - x_min) * n_range) / float(x_range)) + 1
for x_val in x_values])
return x_locs
def _calc_data_point_ticks(x_locations, num_distributions, distribution_width,
distribution_centered):
"""Returns a 1D numpy array of x-axis tick positions.
These positions will be centered on each data point.
Set distribution_centered to True for scatter and box plots because their
plot types naturally center over a given horizontal position. Bar charts
should use distribution_centered = False because the leftmost edge of a bar
starts at a given horizontal position and extends to the right for the
width of the bar.
"""
dist_size = num_distributions - 1 if distribution_centered else\
num_distributions
return x_locations + ((dist_size * distribution_width) / 2)
def _plot_bar_data(plot_axes, distribution, distribution_color,
distribution_width, x_position, whisker_length,
error_bar_type):
"""Returns the result of plotting a single bar in matplotlib."""
result = None
# We do not want to plot empty distributions because matplotlib will not be
# able to render them as PDFs.
if len(distribution) > 0:
avg = np.mean(distribution)
if error_bar_type == 'stdv':
error_bar = np.std(distribution)
elif error_bar_type == 'sem':
error_bar = np.std(distribution) / np.sqrt(len(distribution))
else:
raise ValueError(
"Invalid error bar type '%s'. Supported error bar types are "
"'stdv' and 'sem'." % error_bar_type)
result = plot_axes.bar(x_position, avg, distribution_width,
yerr=error_bar, ecolor='black',
facecolor=distribution_color)
return result
def _plot_scatter_data(plot_axes, distribution, distribution_symbol,
distribution_width, x_position, whisker_length,
error_bar_type):
"""Returns the result of plotting a single scatterplot in matplotlib."""
result = None
x_vals = [x_position] * len(distribution)
# matplotlib's scatter function doesn't like plotting empty data.
if len(x_vals) > 0 and len(distribution) > 0:
result = plot_axes.scatter(x_vals, distribution,
marker=distribution_symbol, c='k')
return result
def _plot_box_data(plot_axes, distribution, distribution_color,
distribution_width, x_position, whisker_length,
error_bar_type):
"""Returns the result of plotting a single boxplot in matplotlib."""
result = None
if len(distribution) > 0:
result = plot_axes.boxplot([distribution], positions=[x_position],
widths=distribution_width,
whis=whisker_length)
_color_box_plot(plot_axes, result, [distribution_color])
return result
def _is_single_matplotlib_color(color):
"""Returns True if color is a single (not a list) mpl color."""
single_color = False
if (isinstance(color, str)):
single_color = True
elif len(color) == 3 or len(color) == 4:
single_color = True
for e in color:
if not (isinstance(e, float) or isinstance(e, int)):
single_color = False
return single_color
def _color_box_plot(plot_axes, box_plot, colors):
"""Color boxes in the box plot with the specified colors.
If any of the colors are None, the box will not be colored.
The box_plot argument must be the dictionary returned by the call to
matplotlib's boxplot function, and the colors argument must consist of
valid matplotlib colors.
"""
# Note: the following code is largely taken from this matplotlib boxplot
# example:
# http://matplotlib.sourceforge.net/examples/pylab_examples/
# boxplot_demo2.html
num_colors = len(colors)
num_box_plots = len(box_plot['boxes'])
if num_colors != num_box_plots:
raise ValueError("The number of colors (%d) does not match the number "
"of boxplots (%d)." % (num_colors, num_box_plots))
for box, median, color in zip(box_plot['boxes'],
box_plot['medians'],
colors):
if color is not None:
box_x = []
box_y = []
# There are five points in the box. The first is the same as
# the last.
for i in range(5):
box_x.append(box.get_xdata()[i])
box_y.append(box.get_ydata()[i])
box_coords = list(zip(box_x, box_y))
box_polygon = Polygon(box_coords, facecolor=color)
plot_axes.add_patch(box_polygon)
# Draw the median lines back over what we just filled in with
# color.
median_x = []
median_y = []
for i in range(2):
median_x.append(median.get_xdata()[i])
median_y.append(median.get_ydata()[i])
plot_axes.plot(median_x, median_y, 'black')
def _set_axes_options(plot_axes, title=None, x_label=None, y_label=None,
x_values=None, x_tick_labels=None,
x_tick_labels_orientation='vertical', y_min=None,
y_max=None):
"""Applies various labelling options to the plot axes."""
if title is not None:
plot_axes.set_title(title)
if x_label is not None:
plot_axes.set_xlabel(x_label)
if y_label is not None:
plot_axes.set_ylabel(y_label)
if (x_tick_labels_orientation != 'vertical' and
x_tick_labels_orientation != 'horizontal'):
raise ValueError("Invalid orientation for x-axis tick labels: '%s'. "
"Valid orientations are 'vertical' or 'horizontal'."
% x_tick_labels_orientation)
# If labels are provided, always use them. If they aren't, use the x_values
# that denote the spacing between data points as labels. If that isn't
# available, simply label the data points in an incremental fashion,
# i.e. 1, 2, 3, ..., n, where n is the number of data points on the plot.
if x_tick_labels is not None:
plot_axes.set_xticklabels(x_tick_labels,
rotation=x_tick_labels_orientation)
elif x_tick_labels is None and x_values is not None:
plot_axes.set_xticklabels(x_values, rotation=x_tick_labels_orientation)
else:
plot_axes.set_xticklabels(
range(1, len(plot_axes.get_xticklabels()) + 1),
rotation=x_tick_labels_orientation)
# Set the y-axis range if specified.
if y_min is not None:
plot_axes.set_ylim(bottom=float(y_min))
if y_max is not None:
plot_axes.set_ylim(top=float(y_max))
def _create_legend(plot_axes, distribution_markers, distribution_labels,
marker_type):
"""Creates a legend on the supplied axes."""
# We have to use a proxy artist for the legend because box plots currently
# don't have a very useful legend in matplotlib, and using the default
# legend for bar/scatterplots chokes on empty/null distributions.
#
# Note: This code is based on the following examples:
# http://matplotlib.sourceforge.net/users/legend_guide.html
# http://stackoverflow.com/a/11423554
if len(distribution_markers) != len(distribution_labels):
raise ValueError("The number of distribution markers does not match "
"the number of distribution labels.")
if marker_type == 'colors':
legend_proxy = [Rectangle((0, 0), 1, 1, fc=marker)
for marker in distribution_markers]
plot_axes.legend(legend_proxy, distribution_labels, loc='best')
elif marker_type == 'symbols':
legend_proxy = [Line2D(range(1), range(1), color='white',
markerfacecolor='black', marker=marker)
for marker in distribution_markers]
plot_axes.legend(legend_proxy, distribution_labels, numpoints=3,
scatterpoints=3, loc='best')
else:
raise ValueError("Invalid marker_type: '%s'. marker_type must be "
"either 'colors' or 'symbols'." % marker_type)
def _set_figure_size(fig, width=None, height=None):
"""Sets the plot figure size and makes room for axis labels, titles, etc.
If both width and height are not provided, will use matplotlib defaults.
Making room for labels will not always work, and if it fails, the user will
be warned that their plot may have cut-off labels.
"""
# Set the size of the plot figure, then make room for the labels so they
# don't get cut off. Must be done in this order.
if width is not None and height is not None and width > 0 and height > 0:
fig.set_size_inches(width, height)
try:
fig.tight_layout()
except ValueError:
warnings.warn(
"Could not automatically resize plot to make room for "
"axes labels and plot title. This can happen if the labels or "
"title are extremely long and the plot size is too small. Your "
"plot may have its labels and/or title cut-off. To fix this, "
"try increasing the plot's size (in inches) and try again.",
RuntimeWarning)
|
Kleptobismol/scikit-bio
|
skbio/draw/_distributions.py
|
Python
|
bsd-3-clause
| 30,658
|
[
"scikit-bio"
] |
aa64db627e816ca64fb16ea873f19485cf34f08df7d45e7e5c32fba28030934e
|
# test_hcinput.py
"""
Series of tests for hcinput.py
"""
# Import modules
from heisenberg_coefficients import hcinput
from pathlib import Path
import unittest
import numpy as np
# Create test classes
class TestINPUT(unittest.TestCase):
"""
Series of tests for input.py module.
"""
# Setup
def setUp(self):
"""
Set poscar_path for each test.
"""
self.poscar_path = (Path.cwd() /
Path("heisenberg_coefficients/tests/POSCAR.vasp"))
def test_realfile(self):
"""
Test that POSCAR path gives a real file.
"""
self.assertTrue(self.poscar_path.is_file())
def test_latticevecs(self):
"""
Tests that lattice vectors were extracted.
"""
# Constants that should match test file_content
latscaleb = np.float(1.0)
# Vectors that should match test file
b = np.array([[8.3519001007, 0.0000000000, 0.0000000000],
[-4.1759500504, 7.2329576571, 0.0000000000],
[0.0000000000, 0.0000000000, 9.0845003128]])
# Instantiate poscar_input class and parse
myposcar = hcinput.poscar_input(str(self.poscar_path))
myposcar.parse()
# Extract latscale and latvecs from file
a = myposcar.getlatvecs()
latscalea = myposcar.getlatscale()
# Test that they match
self.assertEqual(latscalea, latscaleb)
for row in range(3):
for col in range(3):
self.assertEqual(a[row, col], b[row, col])
def test_atomlocs(self):
"""
Tests that atomic species, number, and locations
are properly extracted from POSCAR.
"""
# Species names and total that should match test file_content
atom_names_b = ["Sc", "Mn", "Al", "Si"]
atom_numbers_b = np.array([6, 6, 14, 10], dtype=int)
atom_total_b = np.int(6 + 6 + 14 + 10)
# Locations for Mn atoms
mn_vecs_b = np.array([[0.500000000, 0.000000000, 0.000000000],
[0.000000000, 0.500000000, 0.000000000],
[0.500000000, 0.500000000, 0.000000000],
[0.500000000, 0.000000000, 0.500000000],
[0.000000000, 0.500000000, 0.500000000],
[0.500000000, 0.500000000, 0.500000000]])
# Instantiate poscar_input class and parse
myposcar = hcinput.poscar_input(str(self.poscar_path))
myposcar.parse()
# Extract species names, numbers, totals
atom_names_a = myposcar.getatomnames()
atom_numbers_a = myposcar.getatomnumbers()
atom_total_a = myposcar.getatomtotal()
# Extract Mn atom positions
mn_vecs_a = myposcar.getatomvecs(1)
# Test that they match
self.assertEqual(atom_total_a, atom_total_b)
for entries in range(4):
self.assertEqual(atom_numbers_a[entries], atom_numbers_b[entries])
for entries in range(4):
self.assertEqual(atom_names_a[entries], atom_names_b[entries])
for row in range(6):
for col in range(3):
self.assertEqual(mn_vecs_a[row, col], mn_vecs_b[row, col])
def test_database(self):
"""
Tests the output of a generated pandas database.
"""
# List of atoms
atom_list = [0, 1]
# Run function to generate poscar_input and dataframe
print("\nBuilding database for Sc and Mn atoms taken from "
"the POSCAR file.\n")
mydf = hcinput.build_database(str(self.poscar_path),
atom_list)
# Test that mydf is a valid pandas DataFrame object
self.assertIsInstance(mydf, hcinput.pd.DataFrame)
print("Outputting database:\n")
print(mydf)
# Run tests when invoked
if __name__ == '__main__':
unittest.main()
|
jkglasbrenner/heisenberg_coefficients
|
heisenberg_coefficients/tests/test_hcinput.py
|
Python
|
gpl-3.0
| 3,971
|
[
"VASP"
] |
986f2eb4366f6332dff2c4570364dee66c2224825ff14894ea9443807f49f559
|
#!/usr/bin/env python2
desc="""Align pairs/mates onto contigs and run SSPACE scaffolder.
Example:
fastq2sspace.py -v -f contigs.fa -n pe300 pe600 pe5000 -1 ../../_archives/PL429.{3,6,50}00_read1*.fastq.gz -2 ../../_archives/PL429.{3,6,50}00_read2*.fastq.gz -i 300 600 5000 -s 0.15 0.25 0.5 -t FR FR RF -u 5000000
"""
epilog="""Author:
l.p.pryszcz+git@gmail.com
19/06/2012 Dublin
"""
import argparse, commands, os, subprocess, sys
from datetime import datetime
def parse_sam( handle ):
"""Return sam tuple"""
for l in handle:
l = l.strip()
if not l or l.startswith('@'):
continue
yield l.split('\t')
def get_start_stop( start,length,flag ):
"""Return start-end read boundaries.
Return end-start if reverse aligned (flag & 16)."""
if flag & 16:
end = start
start += length
else:
end = start + length
return start,end
def sam2sspace_tab( inhandle,outhandle,mapqTh=0,verbose=False ):
"""Convert SAM to SSPACE TAB file."""
i = j = k = pq1 = 0
sam = parse_sam( inhandle )
while 1:
try:
#read pair sam
sam1 = sam.next()
sam2 = sam.next()
#get variables
q1,flag1,ref1,start1,mapq1,len1 = sam1[0],int(sam1[1]),sam1[2],int(sam1[3]),int(sam1[4]),len(sam1[9])
q2,flag2,ref2,start2,mapq2,len2 = sam2[0],int(sam2[1]),sam2[2],int(sam2[3]),int(sam2[4]),len(sam2[9])
i += 1
#gem uses entire fasta header as seq name
ref1 = ref1.split()[0]
ref2 = ref2.split()[0]
#skip 0 quality pair
if mapqTh:
if mapq1<mapqTh or mapq2<mapqTh:
continue
if q1!=q2:
sys.stderr.write("Warning: Queries has different names: %s vs %s\n" % (q1,q2) )
continue
j += 1
#skip self matches
if ref1==ref2:
continue
k += 1
#define start-stop ranges
start1,end1 = get_start_stop( start1,len1,flag1 )
start2,end2 = get_start_stop( start2,len2,flag2 )
#print output
outhandle.write( "%s\t%s\t%s\t%s\t%s\t%s\n" % ( ref1,start1,end1,ref2,start2,end2 ) )
except StopIteration:
break
sys.stderr.write( " %s pairs. %s passed filtering [%.2f%s]. %s in different contigs [%.2f%s].\n" % (i,j,j*100.0/i,'%',k,k*100.0/i,'%') )
def _get_bowtie2_proc( fn1,fn2,ref,maxins,cores,upto,verbose,bufsize=-1):
"""Return bowtie subprocess.
bufsize: 0 no buffer; 1 buffer one line; -1 set system default.
"""
fformat = "-q"
if fn1.endswith(('.fa','.fasta','.fa.gz','.fasta.gz')):
fformat = "-f"
bwtArgs = ['bowtie2','--quiet','--very-fast-local','-p',str(cores),'-x',ref, fformat,"-1", fn1, "-2", fn2,"--maxins",str(maxins) ]
if upto:
bwtArgs += [ "--upto",str(upto) ]
if verbose:
sys.stderr.write( " %s\n" % " ".join(bwtArgs) )
#select ids
bwtProc = subprocess.Popen( bwtArgs,bufsize=bufsize,stdout=subprocess.PIPE )
return bwtProc
def _get_gem_proc( fn1,fn2,ref,maxins,upto,cores,verbose,bufsize=-1):
"""Return bowtie subprocess.
bufsize: 0 no buffer; 1 buffer one line; -1 set system default.
"""
preArgs = [ 'fastq2shuffledFastQ.py', fn1, fn2 ]
if upto:
preArgs += [ '-u',str(upto) ]
gemArgs = [ 'gem-mapper','-I',ref+'.gem','--unique-mapping','--threads',str(cores),'-q','offset-33','--max-insert-size',str(maxins)]#,'2>','/dev/null' ]
gem2samArgs = [ 'gem-2-sam','-I',ref,'--expect-paired-end-reads','-q','offset-33']#,'2>','/dev/null']
sam2uniqArgs = [ 'samgem2unique.py', ]
if verbose:
sys.stderr.write( "%s | %s | %s | %s\n" % (" ".join(preArgs)," ".join(gemArgs)," ".join(gem2samArgs)," ".join(sam2uniqArgs)) )
#select ids
preProc = subprocess.Popen( preArgs,bufsize=bufsize,stdout=subprocess.PIPE )
gemProc = subprocess.Popen( gemArgs,bufsize=bufsize,stdout=subprocess.PIPE,stdin=preProc.stdout )
gem2samProc = subprocess.Popen( gem2samArgs,bufsize=bufsize,stdout=subprocess.PIPE,stdin=gemProc.stdout )
sam2uniqProc = subprocess.Popen( sam2uniqArgs,bufsize=bufsize,stdout=subprocess.PIPE,stdin=gem2samProc.stdout )
return sam2uniqProc
def get_tab_files( outdir,reffile,libNames,fReadsFnames,rReadsFnames,inserts,iBounds,cores,mapqTh,upto,verbose ):
"""Prepare genome index, align all libs and save TAB file"""
#create genome index
ref = reffile.name
#'''
idxfn = ref + ".1.bt2"
if not os.path.isfile( idxfn ):
cmd = "bowtie2-build %s %s" % (ref,ref)
if verbose:
sys.stderr.write( " Creating index...\n %s\n" % cmd )
bwtmessage = commands.getoutput( cmd )
'''
idxfn = ref + ".gem"
if not os.path.isfile( idxfn ):
cmd = "gem-indexer -i %s -o %s" % (ref,ref)
if verbose:
sys.stderr.write( " Creating index...\n %s\n" % cmd )
bwtmessage = commands.getoutput( cmd )#'''
tabFnames = []
#process all libs
for libName,f1,f2,iSize,iFrac in zip( libNames,fReadsFnames,rReadsFnames,inserts,iBounds ):
if verbose:
sys.stderr.write( "[%s] [lib] %s\n" % (datetime.ctime(datetime.now()),libName) )
#define tab output
outfn = "%s.%s.tab" % ( outdir,libName )
#skip if file exists
if os.path.isfile( outfn ):
sys.stderr.write( " File exists: %s\n" % outfn )
tabFnames.append( outfn )
continue
out = open( outfn,"w" )
#define max insert size allowed
maxins = ( 1.0+iFrac ) * iSize
#run bowtie2 for all libs
proc = _get_bowtie2_proc( f1.name,f2.name,ref,maxins,cores,upto,verbose )
#proc = _get_gem_proc( f1.name,f2.name,ref,maxins,upto,cores,verbose )
#parse botwie output
sam2sspace_tab( proc.stdout,out,mapqTh )
#close file
out.close()
tabFnames.append( outfn )
return tabFnames
def get_libs( outdir,libFn,libNames,tabFnames,inserts,iBounds,orientations,verbose ):
"""Save lib fname and return it's path"""
lines = []
#load libs from file
if libFn:
if verbose:
sys.stderr.write( " Reading libs from %s\n" % libFn )
lines = open(libFn).readlines()
#add TAB libs
for t in zip( libNames,tabFnames,inserts,iBounds,orientations ):
lines.append( "%s\tTAB\t%s\t%s\t%s\t%s\n" % t )
outfn = "%s.libs.txt" % outdir #os.path.join( outdir,"libs.txt" )
if verbose:
sys.stderr.write( " Updated libs saved to: %s\n" % outfn )
out = open( outfn,"w" ); out.write( "".join(lines) ); out.close()
return outfn
def main():
usage = "%(prog)s [options]"
parser = argparse.ArgumentParser( usage=usage,description=desc,epilog=epilog )
parser.add_argument("-v", dest="verbose", default=False, action="store_true")
parser.add_argument("-f", dest="fasta", required=True, type=file,
help="genome fasta [mandatory]")
parser.add_argument("-k", dest="minlinks", default=5, type=int,
help="min number of links [%(default)s]")
parser.add_argument("-l", dest="lib", default="",
help="lib file [No libs]")
parser.add_argument("-o", dest="out", default="sspace_out",
help="output basename [%(default)s]")
parser.add_argument("-n", dest="libnames", nargs="+",
help="libraries names [%(default)s]")
parser.add_argument("-1", dest="libFs", nargs="+", type=file,
help="libs forward reads [%(default)s]")
parser.add_argument("-2", dest="libRs", nargs="+", type=file,
help="libs reverse reads [%(default)s]")
parser.add_argument("-i", dest="libIS", nargs="+", type=int,
help="libs insert sizes [%(default)s]")
parser.add_argument("-s", dest="libISStDev", nargs="+", type=float,
help="libs IS StDev [%(default)s]")
parser.add_argument("-t", dest="orientations", nargs="+", #type=float,
help="libs orientations [%(default)s]")
parser.add_argument("-c", dest="cores", default=2, type=int,
help="no. of cpus [%(default)s]")
parser.add_argument("-q", dest="mapq", default=10, type=int,
help="min map quality [%(default)s]")
parser.add_argument("-u", dest="upto", default=0, type=int,
help="process up to pairs [all]")
o = parser.parse_args()
if o.verbose:
sys.stderr.write( "Options: %s\n" % str(o) )
if len(o.libnames)*6 != len(o.libnames)+len(o.libFs)+len(o.libRs)+len(o.libIS)+len(o.libISStDev)+len(o.orientations):
parser.error("Wrong number of arguments!")
#generate outdirs if out contain dir and dir not exists
if os.path.dirname(o.out):
if not os.path.isdir( os.path.dirname(o.out) ):
os.makedirs( os.path.dirname(o.out) )
#get tab files
if o.verbose:
sys.stderr.write("[%s] Generating TAB file(s) for %s library/ies...\n" % (datetime.ctime(datetime.now()),len(o.libnames)) )
tabFnames = get_tab_files( o.out,o.fasta,o.libnames,o.libFs,o.libRs,o.libIS,o.libISStDev,o.cores,o.mapq,o.upto,o.verbose )
#generate lib file
if o.verbose:
sys.stderr.write("[%s] Generating libraries file...\n" % datetime.ctime(datetime.now()) )
libFn = get_libs( o.out,o.lib,o.libnames,tabFnames,o.libIS,o.libISStDev,o.orientations,o.verbose )
#print sspace cmd
cmd = "perl /users/tg/lpryszcz/src/SSPACE-BASIC-2.0_linux-x86_64/SSPACE_Basic_v2.0.pl -l %s -a 0.7 -k %s -s %s -b %s > %s.sspace.log" % ( libFn,o.minlinks,o.fasta.name,o.out,o.out ); print cmd
os.system( cmd )
if __name__=='__main__':
t0 = datetime.now()
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\nCtrl-C pressed! \n")
dt = datetime.now()-t0
sys.stderr.write( "#Time elapsed: %s\n" % dt )
|
lpryszcz/bin
|
fastq2sspace.py
|
Python
|
gpl-3.0
| 10,284
|
[
"Bowtie"
] |
674809718308b439811748c7da6e5751c3c8c43279229fd6a3b3572fe7b4662f
|
""" DataStore is the service for inserting accounting reports (rows) in the Accounting DB
"""
import datetime
from DIRAC import S_OK, S_ERROR, gConfig, gLogger
from DIRAC.AccountingSystem.DB.MultiAccountingDB import MultiAccountingDB
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.DISET.RequestHandler import RequestHandler,getServiceOption
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Core.DISET.RPCClient import RPCClient
__RCSID__ = "$Id$"
class DataStoreHandler( RequestHandler ):
__acDB = None
@classmethod
def initializeHandler( cls, svcInfoDict ):
multiPath = PathFinder.getDatabaseSection( "Accounting/MultiDB" )
cls.__acDB = MultiAccountingDB( multiPath )
#we can run multiple services in read only mode. In that case we do not bucket
cls.runBucketing = getServiceOption( svcInfoDict, 'RunBucketing', True )
if cls.runBucketing:
cls.__acDB.autoCompactDB() #pylint: disable=no-member
result = cls.__acDB.markAllPendingRecordsAsNotTaken() #pylint: disable=no-member
if not result[ 'OK' ]:
return result
gThreadScheduler.addPeriodicTask( 60, cls.__acDB.loadPendingRecords ) #pylint: disable=no-member
return S_OK()
types_registerType = [ basestring, list, list, list ]
def export_registerType( self, typeName, definitionKeyFields, definitionAccountingFields, bucketsLength ):
"""
Register a new type. (Only for all powerful admins)
(Bow before me for I am admin! :)
"""
retVal = gConfig.getSections( "/DIRAC/Setups" )
if not retVal[ 'OK' ]:
return retVal
errorsList = []
for setup in retVal[ 'Value' ]:
retVal = self.__acDB.registerType( setup, typeName, definitionKeyFields, definitionAccountingFields, bucketsLength ) #pylint: disable=too-many-function-args,no-member
if not retVal[ 'OK' ]:
errorsList.append( retVal[ 'Message' ] )
if errorsList:
return S_ERROR( "Error while registering type:\n %s" % "\n ".join( errorsList ) )
return S_OK()
types_setBucketsLength = [ basestring, list ]
def export_setBucketsLength( self, typeName, bucketsLength ):
"""
Change the buckets Length. (Only for all powerful admins)
(Bow before me for I am admin! :)
"""
retVal = gConfig.getSections( "/DIRAC/Setups" )
if not retVal[ 'OK' ]:
return retVal
errorsList = []
for setup in retVal[ 'Value' ]:
retVal = self.__acDB.changeBucketsLength( setup, typeName, bucketsLength ) #pylint: disable=too-many-function-args,no-member
if not retVal[ 'OK' ]:
errorsList.append( retVal[ 'Message' ] )
if errorsList:
return S_ERROR( "Error while changing bucketsLength type:\n %s" % "\n ".join( errorsList ) )
return S_OK()
types_regenerateBuckets = [ basestring ]
def export_regenerateBuckets( self, typeName ):
"""
Recalculate buckets. (Only for all powerful admins)
(Bow before me for I am admin! :)
"""
retVal = gConfig.getSections( "/DIRAC/Setups" )
if not retVal[ 'OK' ]:
return retVal
errorsList = []
for setup in retVal[ 'Value' ]:
retVal = self.__acDB.regenerateBuckets( setup, typeName ) #pylint: disable=too-many-function-args,no-member
if not retVal[ 'OK' ]:
errorsList.append( retVal[ 'Message' ] )
if errorsList:
return S_ERROR( "Error while recalculating buckets for type:\n %s" % "\n ".join( errorsList ) )
return S_OK()
types_getRegisteredTypes = []
def export_getRegisteredTypes( self ):
"""
Get a list of registered types (Only for all powerful admins)
(Bow before me for I am admin! :)
"""
return self.__acDB.getRegisteredTypes() #pylint: disable=no-member
types_deleteType = [ basestring ]
def export_deleteType( self, typeName ):
"""
Delete accounting type and ALL its contents. VERY DANGEROUS! (Only for all powerful admins)
(Bow before me for I am admin! :)
"""
retVal = gConfig.getSections( "/DIRAC/Setups" )
if not retVal[ 'OK' ]:
return retVal
errorsList = []
for setup in retVal[ 'Value' ]:
retVal = self.__acDB.deleteType( setup, typeName ) #pylint: disable=too-many-function-args,no-member
if not retVal[ 'OK' ]:
errorsList.append( retVal[ 'Message' ] )
if errorsList:
return S_ERROR( "Error while deleting type:\n %s" % "\n ".join( errorsList ) )
return S_OK()
types_commit = [ basestring, datetime.datetime, datetime.datetime, list ]
def export_commit( self, typeName, startTime, endTime, valuesList ):
"""
Add a record for a type
"""
setup = self.serviceInfoDict[ 'clientSetup' ]
startTime = int( Time.toEpoch( startTime ) )
endTime = int( Time.toEpoch( endTime ) )
return self.__acDB.insertRecordThroughQueue( setup, typeName, startTime, endTime, valuesList ) #pylint: disable=too-many-function-args,no-member
types_commitRegisters = [ list ]
def export_commitRegisters( self, entriesList ):
"""
Add a record for a type
"""
setup = self.serviceInfoDict[ 'clientSetup' ]
expectedTypes = [ basestring, datetime.datetime, datetime.datetime, list ]
for entry in entriesList:
if len( entry ) != 4:
return S_ERROR( "Invalid records" )
for i in range( len( entry ) ):
if not isinstance(entry[i], expectedTypes[i]):
gLogger.error( "Unexpected type in report",
": field %d in the records should be %s (and it is %s)" % ( i, expectedTypes[i], type(entry[i])) )
return S_ERROR( "Unexpected type in report" )
records = []
for entry in entriesList:
startTime = int( Time.toEpoch( entry[1] ) )
endTime = int( Time.toEpoch( entry[2] ) )
records.append( ( setup, entry[0], startTime, endTime, entry[3] ) )
return self.__acDB.insertRecordBundleThroughQueue( records )
types_compactDB = []
def export_compactDB( self ):
"""
Compact the db by grouping buckets
"""
#if we are running slaves (not only one service) we can redirect the request to the master
#For more information please read the Administrative guide Accounting part!
#ADVICE: If you want to trigger the bucketing, please make sure the bucketing is not running!!!!
if self.runBucketing:
return self.__acDB.compactBuckets() #pylint: disable=no-member
else:
return RPCClient('Accounting/DataStoreMaster').compactDB()
types_remove = [ basestring, datetime.datetime, datetime.datetime, list ]
def export_remove( self, typeName, startTime, endTime, valuesList ):
"""
Remove a record for a type
"""
setup = self.serviceInfoDict[ 'clientSetup' ]
startTime = int( Time.toEpoch( startTime ) )
endTime = int( Time.toEpoch( endTime ) )
return self.__acDB.deleteRecord( setup, typeName, startTime, endTime, valuesList ) #pylint: disable=too-many-function-args,no-member
types_removeRegisters = [ list ]
def export_removeRegisters( self, entriesList ):
"""
Remove a record for a type
"""
setup = self.serviceInfoDict[ 'clientSetup' ]
expectedTypes = [ basestring, datetime.datetime, datetime.datetime, list ]
for entry in entriesList:
if len( entry ) != 4:
return S_ERROR( "Invalid records" )
for i in range( len( entry ) ):
if not isinstance( entry[i], expectedTypes[i] ):
return S_ERROR( "%s field in the records should be %s" % ( i, expectedTypes[i] ) )
ok = 0
for entry in entriesList:
startTime = int( Time.toEpoch( entry[1] ) )
endTime = int( Time.toEpoch( entry[2] ) )
record = entry[3]
result = self.__acDB.deleteRecord( setup, entry[0], startTime, endTime, record ) #pylint: disable=too-many-function-args,no-member
if not result[ 'OK' ]:
return S_OK( ok )
ok += 1
return S_OK( ok )
|
Andrew-McNab-UK/DIRAC
|
AccountingSystem/Service/DataStoreHandler.py
|
Python
|
gpl-3.0
| 7,935
|
[
"DIRAC"
] |
cd8241852eb39d50790718bd801bc3992423d8fb839f8c3f33e15257d675381d
|
"""
.. module:: polar
:platform: Windows
:synopsis: Implementation of a class for optical
pumping simulations.
.. moduleauthor:: Wouter Gins <wouter.gins@fys.kuleuven.be>
"""
import numpy as np
from satlas.profiles import Voigt, Lorentzian
from satlas.basemodel import BaseModel
from scipy import integrate
from sympy.physics.wigner import wigner_6j, wigner_3j
import scipy.constants as csts
import lmfit
import itertools
W6J = wigner_6j
W3J = wigner_3j
__all__ = ['Polar']
# Define constants
C = csts.physical_constants['speed of light in vacuum'][0] # Speed of light, m/s
H = csts.physical_constants['Planck constant'][0] # Planck's constant, Js
PI = np.pi # pi...
GL = 1.0 # Orbital g-factor
GS = 2.00232 # Spin g-factor
MUB = csts.physical_constants['Bohr magneton'][0] # Bohr magneton
EV_TO_MHZ = csts.physical_constants['electron volt-hertz relationship'][0] * 1e-6 # eV to MHz conversion factor
#######################
# CALCULATION OBJECTS #
#######################
class BxRho_Voigt(Voigt):
def __init__(self, A=None, fwhmG=None, mu=None, laser=None, fwhmL=None):
self._fwhmG = fwhmG
self._fwhmL = fwhmL
self._laser = laser
self._A = A
self._lorentzian = 0
super(BxRho_Voigt, self).__init__(mu=mu, fwhm=[fwhmG, fwhmL], ampIsArea=True, amp=1.0)
@property
def A(self):
return self._A
@A.setter
def A(self, value):
self._A = value
self.set_factor()
@property
def gaussian(self):
return self._gaussian
@gaussian.setter
def gaussian(self, value):
self._gaussian = value
self.fwhm = [value, self.fwhmL]
@property
def lorentzian(self):
return self._lorentzian
@lorentzian.setter
def lorentzian(self, value):
self._lorentzian = value
self.fwhm = [self.fwhmG, value]
@property
def mu(self):
return self._mu
@mu.setter
def mu(self, value):
self._mu = value * 1e6
self.set_factor()
@property
def laser(self):
return self._laser
@laser.setter
def laser(self, value):
self._laser = value
self.set_factor()
def set_factor(self):
self._factor = self.A * self.laser * C * C / (8 * PI * H * self.mu * self.mu)
def __call__(self, x):
return super(BxRho_Voigt, self).__call__(x*1e6) * self._factor / (x*1e6)
class BxRho_Lorentzian(Lorentzian):
def __init__(self, A=None, mu=None, laser=None, fwhm=None):
self._laser = laser
self._A = A
super(BxRho_Lorentzian, self).__init__(mu=mu, fwhm=fwhm, ampIsArea=True, amp=1.0)
@property
def A(self):
return self._A
@A.setter
def A(self, value):
self._A = value
self.set_factor()
@property
def mu(self):
return self._mu
@mu.setter
def mu(self, value):
self._mu = value * 1e6
self.set_factor()
@property
def laser(self):
return self._laser
@laser.setter
def laser(self, value):
self._laser = value
self.set_factor()
def set_factor(self):
self._factor = self.A * self.laser * C * C / (8 * PI * H * self.mu * self.mu)
def __call__(self, x):
return super(BxRho_Lorentzian, self).__call__(x * 1e6) * self._factor / (x * 1e6)
##############
# MAIN CLASS #
##############
class RateModel(BaseModel):
def __init__(self, I, J, L, ABC, centroids, energies, A_array, scale=1.0, shape='Voigt', laser_intensity=80, laser_mode=None, interaction_time=1e-6, fwhmG=0.1, fwhmL=None, background_params=[0], field=0, fixed_frequencies=None, frequency_mode='fixed', purity=1.0):
super(RateModel, self).__init__()
self.I = I
self.J = J
self.L = L
self.A_array = A_array
self.shape = shape
try:
lasers = len(laser_intensity)
except:
laser_intensity = [laser_intensity]
laser_mode = [laser_mode]
if fixed_frequencies is not None:
self.fixed_frequencies = fixed_frequencies
else:
self.fixed_frequencies = []
self.vary_freqs = len(laser_intensity) - len(self.fixed_frequencies)
self.frequency_mode = frequency_mode
self.laser_intensity = laser_intensity
self.mode = laser_mode
self._calculate_F_levels()
self._set_energies(energies)
self._calculate_energy_coefficients()
self._params = self._populate_params(laser_intensity, ABC, centroids, shape, scale, fwhmG, fwhmL, interaction_time, background_params, field, purity)
self._set_population()
self._calculate_A_partial()
self._calculate_energy_changes()
self._create_D_matrix()
self.params = self._params
@property
def params(self):
return self._check_variation(self._params)
@params.setter
def params(self, params):
self._params = self._check_variation(params)
self._calculate_energy_changes()
A = np.zeros((self.level_counts_cs[-1], self.level_counts_cs[-1]))
for key in self.transition_indices:
for x, y in self.transition_indices[key]:
A[x, y] = params[key].value
A = A * self.partial_A
A = np.transpose(A) - np.eye(A.shape[0]) * A.sum(axis=1)
self.A_array_used = A
self._edit_D_matrix()
def _set_energies(self, energies):
N = self.level_counts.sum()
# Pre-allocate the energy and population vectors.
E = np.zeros(N)
Nlevcs = self.level_counts.cumsum()
for i, (n, ncs) in enumerate(zip(self.level_counts, Nlevcs)):
E[ncs - n:ncs] = energies[i]
self.energies = E * EV_TO_MHZ
def _populate_params(self, laser_intensity, ABC, centroids, shape, scale, fwhmG, FWHML, interaction_time, background_params, field, purity):
p = lmfit.Parameters()
for i, val in enumerate(laser_intensity):
p.add('Laser_intensity_' + str(i), value=val, min=0, max=None)
for i, j in enumerate(self.Jlist):
p.add('A_level_' + str(i), value=ABC[i][0])
p.add('B_level_' + str(i), value=ABC[i][1])
p.add('C_level_' + str(i), value=ABC[i][2])
if not i == len(self.Jlist)-1:
p.add('Centroid_level_' + str(i), value=centroids[i])
else:
p.add('Centroid_level_' + str(i), value=0, vary=False)
for i, _ in enumerate(self.level_counts):
for j, _ in enumerate(self.level_counts):
if i < j and np.isfinite(self.A_array[i, j]):
p.add('Transition_strength_' + str(i) + '_to_' + str(j), value=self.A_array[i, j], min=0, vary=False)
fwhmL = self.A_array[i, j]/(2*PI)*1e-6 if FWHML is None else FWHML
p.add('FWHML_' + str(i) + '_to_' + str(j), value=fwhmL, min=0)
if shape.lower() == 'voigt':
par_lor_name = 'FWHML_' + str(i) + '_to_' + str(j)
par_gauss_name = 'FWHMG_' + str(i) + '_to_' + str(j)
expr = '0.5346*{0}+(0.2166*{0}**2+{1}**2)**0.5'
p.add('FWHMG_' + str(i) + '_to_' + str(j), value=fwhmG, min=0.0001)
p.add('TotalFWHM_' + str(i) + '_to_' + str(j), value=0, vary=False, expr=expr.format(par_lor_name, par_gauss_name))
else:
p.add('FWHMG', value=fwhmG, vary=fwhmG > 0, min=0)
p.add('Scale', value=scale)
p.add('Interaction_time', value=interaction_time, min=0)
for i, value in enumerate(background_params):
p.add('Background' + str(i), value=value)
p.add('Field', value=field)
p.add('Purity', value=purity, min=0, max=1)
return self._check_variation(p)
def _check_variation(self, p):
for key in self._vary:
if key in p:
p[key].vary = self._vary[key]
for i, j in enumerate(self.Jlist):
if j[0] < 1.5 or self.I < 1.5:
p['C_level_' + str(i)].value = 0
p['C_level_' + str(i)].vary = False
if j[0] < 1.0 or self.I < 1.0:
p['B_level_' + str(i)].value = 0
p['B_level_' + str(i)].vary = False
if j[0] < 0.5 or self.I < 0.5:
p['A_level_' + str(i)].value = 0
p['A_level_' + str(i)].vary = False
return p
def _calculate_F_levels(self):
I = self.I
J = self.J
L = self.L
self.Flist = []
self.MFlist = []
self.Jlist = []
self.Llist = []
dummyJ = np.array([])
dummyF = np.array([])
dummyFz = np.array([])
dummy = np.array([])
dummyL = np.array([])
for i, (j, l) in enumerate(zip(J, L)):
F = np.arange(np.abs(j - I), j + I + 1) # Values of F
Flen = (2 * F + 1).astype('int') # Lengths of F_z
starts = np.cumsum(np.append([0], Flen[:-1])) # Index for different F states
# Pre-allocate
f = np.zeros(int((2 * F + 1).sum())) # F-states
mz = np.zeros(int((2 * F + 1).sum())) # F_z-states
# Fill the pre-allocated arrays
for i, (entry, start) in enumerate(zip(Flen, starts)):
mz[start:start + entry] = np.arange(-F[i], F[i] + 1)
f[start:start + entry] = F[i]
self.Flist.append(f)
self.MFlist.append(mz)
self.Jlist.append([j]*len(f))
self.Llist.append([l]*len(f))
dummyF = np.append(dummyF, f)
dummyFz = np.append(dummyFz, mz)
dummyJ = np.append(dummyJ, np.ones(len(f))*j)
dummyL = np.append(dummyL, np.ones(len(f))*l)
dummy = np.append(dummy, np.array([len(f)]))
self.F = dummyF
self.Mf = dummyFz
self.J = dummyJ
self.L = dummyL
self.level_counts = dummy.astype('int')
self.level_counts_cs = self.level_counts.cumsum()
def _calculate_energy_coefficients(self):
S = 0.5
L = self.L
# Since I, J and F do not change, these factors can be calculated once
# and then stored.
I, J, F = self.I, self.J, self.F
C = (F*(F+1) - I*(I+1) - J*(J + 1)) * (J/J) if I > 0 else 0 * J #*(J/J) is a dirty trick to avoid checking for J=0
D = (3*C*(C+1) - 4*I*(I+1)*J*(J+1)) / (2*I*(2*I-1)*J*(2*J-1))
E = (10*(0.5*C)**3 + 20*(0.5*C)**2 + C*(-3*I*(I+1)*J*(J+1) + I*(I+1) + J*(J+1) + 3) - 5*I*(I+1)*J*(J+1)) / (I*(I-1)*(2*I-1)*J*(J-1)*(2*J-1))
C = np.where(np.isfinite(C), 0.5 * C, 0)
D = np.where(np.isfinite(D), 0.25 * D, 0)
E = np.where(np.isfinite(E), E, 0)
gJ = GL * (J * (J + 1) + L * (L + 1) - S * (S + 1)) / \
(2 * J * (J + 1)) + GS * (J * (J + 1) - L * (L + 1) + S *
(S + 1)) / (2 * J * (J + 1))
gJ = np.where(np.isfinite(gJ), gJ, 0)
gF = gJ * (F * (F + 1) + J * (J + 1) - I * (I + 1)) / \
(2 * F * (F + 1))
gF = np.where(np.isfinite(gF), -gF, 0)
self.A_coeff, self.B_coeff, self.C_coeff, self.field_coeff = C, D, E, gF * MUB * self.Mf * ((10 ** (-6)) / H)
def _calculate_energy_changes(self):
field = self._params['Field'].value
A = np.zeros(self.level_counts_cs[-1])
B = np.zeros(self.level_counts_cs[-1])
C = np.zeros(self.level_counts_cs[-1])
centr = np.zeros(self.level_counts_cs[-1])
for i, (ncs, n) in enumerate(zip(self.level_counts_cs, self.level_counts)):
A[ncs-n:ncs] = self._params['A_level_' + str(i)].value
B[ncs-n:ncs] = self._params['B_level_' + str(i)].value
C[ncs-n:ncs] = self._params['C_level_' + str(i)].value
centr[ncs-n:ncs] = self._params['Centroid_level_' + str(i)].value
self.energy_change = centr + self.A_coeff * A + self.B_coeff * B + self.C_coeff * C + self.field_coeff * field
def _set_population(self, level=-1):
try:
levels = len(level)
except:
levels = 1
level = [level]
total_number = sum(self.level_counts[level])
P = np.zeros(self.level_counts_cs[-1])
for lev in level:
N = self.level_counts_cs[lev]
P[N - self.level_counts[lev]:N] = 1.0 / total_number
self.P = P
def _calculate_A_partial(self):
I = self.I
J = self.Jlist
F = self.Flist
Mf = self.MFlist
N = self.level_counts_cs[-1]
self.partial_A = np.zeros((N, N))
self.transition_indices = {}
for i, _ in enumerate(self.level_counts):
for j, _ in enumerate(self.level_counts):
if i < j and not np.isclose(self.A_array[i, j], 0):
indices_ex = []
indices_gr = []
for k, (Jex, Fe, Mze) in enumerate(zip(J[i], F[i], Mf[i])):
for l, (Jgr, Fg, Mzg) in enumerate(zip(J[j], F[j], Mf[j])):
A = float((2 * Jex + 1) * (2 * Fe + 1) * (2 * Fg + 1))
W3 = W3J(Fg, 1.0, Fe, -Mzg, Mzg - Mze, Mze)
W6 = W6J(Jgr, Fg, I, Fe, Jex, 1.0)
A = A * (W3 ** 2)
A = A * (W6 ** 2)
x = self.level_counts_cs[i] - self.level_counts[i] + k
y = self.level_counts_cs[j] - self.level_counts[j] + l
self.partial_A[x, y] = A
indices_ex.append(x)
indices_gr.append(y)
self.transition_indices['Transition_strength_' + str(i) + '_to_' + str(j)] = list(zip(indices_ex, indices_gr))
def _create_D_matrix(self):
N = self.level_counts_cs[-1]
D = np.zeros((N, N, len(self.laser_intensity)), dtype='object')
bxrho = BxRho_Voigt if self.shape.lower() == 'voigt' else BxRho_Lorentzian
self.indices = []
for laser_index, laser in enumerate(self.laser_intensity):
for i, j in itertools.combinations(range(len(self.level_counts)), 2):
for k, (fe, mze) in enumerate(zip(self.Flist[i], self.MFlist[i])):
for l, (fg, mzg) in enumerate(zip(self.Flist[j], self.MFlist[j])):
x = self.level_counts_cs[i] - self.level_counts[i] + k
y = self.level_counts_cs[j] - self.level_counts[j] + l
if np.isclose(self.A_array[i, j], 0) or np.isclose(self.partial_A[x, y], 0):
continue
purity = self._params['Purity'].value
frac = purity if self.mode[laser_index] == (mze - mzg) else (1.0 - purity) if self.mode[laser_index] == -(mze - mzg) else 0
if frac == 0:
pass
else:
intensity = frac * self._params['Laser_intensity_' + str(laser_index)].value
A = self._params['Transition_strength_' + str(i) + '_to_' + str(j)].value
mu = (self.energies[k] + self.energy_change[k]) - (self.energies[l] + self.energy_change[l])
kwargs = {'A': A, 'mu': mu, 'laser': intensity}
if self.shape.lower() == 'voigt':
kwargs['fwhmG'] = self._params['FWHMG_' + str(i) + '_to_' + str(j)].value * 1e6
kwargs['fwhmL'] = self._params['FWHML_' + str(i) + '_to_' + str(j)].value * 1e6
else:
kwargs['fwhm'] = self._params['FWHML_' + str(i) + '_to_' + str(j)].value * 1e6
D[x, y, laser_index] = bxrho(**kwargs)
self.indices.append((x, y, laser_index, i, j, mze, mzg))
self.D = D
def _edit_D_matrix(self):
self.locations = []
self.transitions = []
for x, y, laser_index, i, j, mze, mzg in self.indices:
purity = self._params['Purity'].value
frac = purity if self.mode[laser_index] == (mze - mzg) else (1.0 - purity) if self.mode[laser_index] == -(mze - mzg) else 0
intensity = frac * self._params['Laser_intensity_' + str(laser_index)].value
A = self.A_array_used[y, x]
mu = (self.energies[x] + self.energy_change[x]) - (self.energies[y] + self.energy_change[y])
self.D[x, y, laser_index].mu = mu
self.D[x, y, laser_index].A = A
if self.shape.lower() == 'voigt':
self.D[x, y, laser_index].gaussian = self._params['FWHMG_' + str(i) + '_to_' + str(j)].value * 1e6
self.D[x, y, laser_index].lorentzian = self._params['FWHML_' + str(i) + '_to_' + str(j)].value * 1e6
else:
self.D[x, y, laser_index].fwhm = self._params['FWHML_' + str(i) + '_to_' + str(j)].value * 1e6
self.D[x, y, laser_index].laser = intensity
self.locations.append(mu)
self.transitions.append((self.F[x], self.F[y]))
self.locations, indices = np.unique(np.array(self.locations), return_index=True)
self.transitions = np.array(self.transitions)[indices]
def _evaluate_matrices(self, f):
D = np.zeros(self.D.shape)
for i, j, laser_index, _, _, _, _ in self.indices:
if laser_index < self.vary_freqs:
freq = f
else:
freq = self.fixed_frequencies[laser_index - self.vary_freqs]
if self.frequency_mode.lower() == 'offset':
freq += f
D[i, j, laser_index] = self.D[i, j, laser_index](freq)
D = D.sum(axis=2)
D = np.transpose(D) + D
D = D - np.eye(D.shape[0]) * D.sum(axis=1)
self.M = self.A_array_used + D
self.decay_matrix = np.abs(np.diag(np.diag(self.M)))
def _rhsint(self, y, t):
"""Define the system of ODE's for use in the odeint method from SciPy.
Note that the input is (y, t)."""
return np.dot(self.M, y)
def _process_population(self, y):
raise NotImplementedError('Function should be implemented in child classes!')
def __call__(self, x):
try:
response = np.zeros(x.size)
for i, f in enumerate(x.flatten()):
self._evaluate_matrices(f)
dt = self._params['Interaction_time'].value / 400
y = integrate.odeint(self._rhsint, self.P, np.arange(0, self._params['Interaction_time'].value, dt))
response[i] = self._process_population(y)[-1]
response = response.reshape(x.shape)
except:
self._evaluate_matrices(x)
dt = self._params['Interaction_time'].value / 400
y = integrate.odeint(self._rhsint, self.P, np.arange(0, self._params['Interaction_time'].value, dt))
response = self._process_population(y)[-1]
response = self._params['Scale'].value * response
background_params = [self._params[par_name].value for par_name in self._params if par_name.startswith('Background')]
return response + np.polyval(background_params, x)
def integrate_with_time(self, x, beginning, duration, steps=401, mode='integral'):
backup = self._params['Interaction_time'].value
self._params['Interaction_time'].value = beginning
time_vector = np.linspace(beginning, beginning + duration, steps)
response = np.zeros(x.size)
try:
for i, f in enumerate(x.flatten()):
self._evaluate_matrices(f)
dt = self._params['Interaction_time'].value / 400
if True:
y = integrate.odeint(self._rhsint, self.P, np.arange(0, self._params['Interaction_time'].value, dt))
y = integrate.odeint(self._rhsint, y[-1, :], time_vector)
else:
y = integrate.odeint(self._rhsint, self.P, time_vector)
y = self._process_population(y)
if mode == 'integral':
response[i] = integrate.simps(y, time_vector)
elif mode == 'mean' or mode == 'average':
response[i] = np.mean(y)
response = response.reshape(x.shape)
except:
self._evaluate_matrices(x)
dt = self._params['Interaction_time'].value / 400
if not np.isclose(dt, 0):
y = integrate.odeint(self._rhsint, self.P, np.arange(0, self._params['Interaction_time'].value, dt))
y = integrate.odeint(self._rhsint, y[-1, :], time_vector)
else:
y = integrate.odeint(self._rhsint, self.P, time_vector)
y = self._process_population(y)
if mode == 'integral':
response = integrate.simps(y, time_vector)
elif mode == 'mean' or mode == 'average':
response = np.mean(y)
response = self._params['Scale'].value * response + self._params['Background'].value
self._params['Interaction_time'].value = backup
return response
def convolve_with_gaussian(function, x, sigma=1):
# Dumb implementation: calculate at several steps, take sums weighted by gaussian distribution
# Not exactly correct as a convolution...
# theta = np.linspace(-5, 5, 50)
# x_grid, theta_grid = np.meshgrid(x, theta)
# x_grid = x_grid + sigma * theta_grid
# y_grid = function(x_grid)
sqrt2pi = np.sqrt(2*np.pi)
# weights = np.exp(-0.5 * theta * theta / sigma**2) / (sigma * sqrt2pi)
# integral_value = np.average(y_grid, axis=0, weights=weights)
integral_value = [integrate.quad(lambda y: np.exp(-0.5*(y/sigma)**2)/(sigma*sqrt2pi) * function(X-y), -np.inf, np.inf)[0] for X in x]
return integral_value
class RateModelDecay(RateModel):
def _process_population(self, y):
return np.einsum('ij,kj->k', self.decay_matrix, y)
class RateModelPolar(RateModel):
def __init__(self, *args, **kwargs):
super(RateModelPolar, self).__init__(*args, **kwargs)
self._convertFMftoMIMJ()
def _convertFMftoMIMJ(self):
self.MIlist = []
self.MJlist = []
self.MI = np.array([])
self.MJ = np.array([])
for i, (J, F, Mf) in enumerate(zip(self.Jlist, self.Flist, self.MFlist)):
I = self.I
A = self._params['A_level_' + str(i)].value
J = J[0]
# Create the array of possible F-values.
f = np.arange(np.abs(I - J), I + J + 1)
# Create grids of MI and MJ
I = np.arange(-I, I + 1)
J = np.arange(-J, J + 1)
I, J = np.meshgrid(I, J)
# Calculate the total projection
mf = I + J
# Create an equal-size matrix with the correct
# F-numbers in each place, depending on the sign of A
M = np.zeros(I.shape)
for i, val in enumerate(reversed(f)):
if np.sign(A) == 1:
if i != 0:
M[0:-i, i] = val
M[-i - 1, i:] = val
else:
M[:, 0] = val
M[-1, :] = val
else:
M[i, 0:- 1 - i] = val
M[i:, - 1 - i] = val
f_select = []
m_select = []
for f, m in zip(F, Mf):
f_select.append(np.isclose(M, f))
m_select.append(np.isclose(mf, m))
MI = []
MJ = []
for f, mf in zip(f_select, m_select):
MI.append(I[np.bitwise_and(f, mf)][0])
MJ.append(J[np.bitwise_and(f, mf)][0])
self.MIlist.append(MI)
self.MJlist.append(MJ)
self.MI = np.append(self.MI, np.array(MI))
self.MJ = np.append(self.MJ, np.array(MJ))
def _process_population(self, y):
return np.einsum('j,kj->k', self.MI, y) / self.I
|
woutergins/polarization
|
polarization/satlasaddon.py
|
Python
|
mit
| 24,328
|
[
"Gaussian"
] |
f59d9f9d7ab8a6292aacd4d524c74aab5c77b947823ae57f4c71a62afd1cc6f8
|
# Copyright 2007 by Tiago Antao <tiagoantao@gmail.com>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
'''
Support for asynchronous execution.
'''
import os
import thread
class Async(object):
'''Abstract Asynchronous execution class.
This is the top abstract class.
Concrete classes must implement the _run_program method.
'''
def __init__(self):
'''Async constructor.
Initializes the queues, among other things.
Of notice, is the access_ds lock for controlling exclusive
access to this object.
'''
self.running = {}
self.waiting = []
self.done = {}
self.id = 0
self.hooks = {}
self.access_ds = thread.allocate_lock()
def run_program(self, program, parameters, input_files):
'''Runs a program.
Real _run_program to be implemented by concrete classes.
parameters:
program String identifying program.
parameters List of String parameters.
input_files Hash of Input file descriptors.
returns:
Task Id.
The input_files hash key is the path that is passed
to the program. It should always be relative.
Value is a stream.
'''
if program in self.hooks:
self.access_ds.acquire()
self.id += 1
id = self.id
self.access_ds.release()
self._run_program(id, self.hooks[program], parameters, input_files)
return id
def _run_program(self, id, program, parameters, input_files):
"""Actually run the program, handled by a subclass (PRIVATE).
This method should be replaced by any derived class to do
something useful. It will be called by the run_program method.
"""
raise NotImplementedError("This object should be subclassed")
def get_result(self, id):
''' Returns the results for a certain Id, the info for that Id is
forgotten.
parameters:
id Id of the task.
returns:
(return_code, output_files) return code and file access
object.
The output_files hash key is a relative file name, and the value a
output stream.
'''
self.access_ds.acquire()
if id in self.done:
returnCode, fileObject = self.done[id]
del self.done[id]
self.access_ds.release()
else:
self.access_ds.release()
return None
class FileRetriever(object):
'''An Abstract Support class to retrieve files.
'''
def __init__(self):
self.file_list=[]
def get_File_list(self):
'''Returns the list of available files.
'''
return self.file_list
def get_file(self, name):
raise NotImplementedError('Abstract method')
class DirectoryRetriever(FileRetriever):
'''Retrieves a directory content.
'''
def __init__(self, directory):
FileRetriever.__init__(self)
self.directory = directory
walk_list = os.walk(directory)
for dir, dir_list, file_list in walk_list:
for file in file_list:
self.file_list.append(file[len(directory)+1:])
def get_file(self, name):
return open(self.directory + os.sep + name)
|
bryback/quickseq
|
genescript/Bio/PopGen/Async/__init__.py
|
Python
|
mit
| 3,538
|
[
"Biopython"
] |
86dcd9efb502dc2e47335e6a7ba3b9ca35596e70220ee1e3250dd1142a071047
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('whsites', '0002_auto_20150102_1905'),
]
operations = [
migrations.RemoveField(
model_name='visit',
name='site',
),
migrations.RemoveField(
model_name='visit',
name='visitor',
),
migrations.DeleteModel(
name='Visit',
),
]
|
robjordan/unesco_project
|
unesco/whsites/migrations/0003_auto_20150207_1347.py
|
Python
|
apache-2.0
| 521
|
[
"VisIt"
] |
7d8c25ea03b76af8085a00e2a8cfdd7443d48028693f58bbf467b251646c790a
|
# coding=utf-8
import logging
from deprecated import deprecated
from requests import HTTPError
from .base import BitbucketBase
from atlassian.bitbucket.cloud import Cloud
log = logging.getLogger(__name__)
class Bitbucket(BitbucketBase):
def __init__(self, url, *args, **kwargs):
if "cloud" not in kwargs and ("bitbucket.org" in url):
kwargs["cloud"] = True
if "api_version" not in kwargs:
kwargs["api_version"] = "2.0" if "cloud" in kwargs and kwargs["cloud"] else "1.0"
if "cloud" in kwargs:
kwargs["api_root"] = "" if "api.bitbucket.org" in url else "rest/api"
super(Bitbucket, self).__init__(url, *args, **kwargs)
def markup_preview(self, data):
"""
Preview generated HTML for the given markdown content.
Only authenticated users may call this resource.
:param data:
:return:
"""
url = self.resource_url("markup/preview")
return self.post(url, data=data)
################################################################################################
# Administrative functions
################################################################################################
def _url_admin(self, api_version=None):
return self.resource_url("admin", api_version=api_version)
def group_members(self, group, start=0, limit=None):
"""
Get group of members
:param group: The group name to query
:param start:
:param limit:
:return: A list of group members
"""
url = "{}/groups/more-members".format(self._url_admin())
params = {}
if start:
params["start"] = start
if limit:
params["limit"] = limit
if group:
params["context"] = group
return self._get_paged(url, params=params)
def all_project_administrators(self):
"""
Get the list of project administrators
:return: A generator object containing a map with the project_key, project_name and project_administrators
"""
for project in self.project_list():
log.info("Processing project: {0} - {1}".format(project.get("key"), project.get("name")))
yield {
"project_key": project.get("key"),
"project_name": project.get("name"),
"project_administrators": [
{"email": x["emailAddress"], "name": x["displayName"]}
for x in self.project_users_with_administrator_permissions(project["key"])
],
}
def reindex(self):
"""
Rebuild the bundled Elasticsearch indexes for Bitbucket Server
:return:
"""
url = self.resource_url("sync", api_root="rest/indexing", api_version="latest")
return self.post(url)
def check_reindexing_status(self):
"""
Check reindexing status
:return:
"""
url = self.resource_url("status", api_root="rest/indexing", api_version="latest")
return self.get(url)
def get_users(self, user_filter=None, limit=25, start=0):
"""
Get list of bitbucket users.
Use 'user_filter' for get specific users or get all users if necessary.
:param user_filter: str - username, displayname or email
:param limit: int - paginated limit to retrieve
:param start: int - paginated point to start retreiving
:return: The collection as JSON with all relevant information about the licensed user
"""
url = self.resource_url("users", api_version="1.0")
params = {}
if user_filter:
params["filter"] = user_filter
if limit:
params["limit"] = limit
if start:
params["start"] = start
return self.get(url, params=params)
def get_users_info(self, user_filter=None, start=0, limit=25):
"""
The authenticated user must have the LICENSED_USER permission to call this resource.
:param user_filter: if specified only users with usernames, display name or email addresses
containing the supplied string will be returned
:param limit:
:param start:
:return:
"""
url = "{}/users".format(self._url_admin(api_version="1.0"))
params = {}
if limit:
params["limit"] = limit
if start:
params["start"] = start
if user_filter:
params["filter"] = user_filter
return self._get_paged(url, params=params)
def get_current_license(self):
"""
Retrieves details about the current license, as well as the current status of the system with
regards to the installed license. The status includes the current number of users applied
toward the license limit, as well as any status messages about the license (warnings about expiry
or user counts exceeding license limits).
The authenticated user must have ADMIN permission. Unauthenticated users, and non-administrators,
are not permitted to access license details.
:return:
"""
url = "{}/license".format(self._url_admin())
return self.get(url)
def _url_mail_server(self):
return "{}/mail-server".format(self._url_admin())
def get_mail_configuration(self):
"""
Retrieves the current mail configuration.
The authenticated user must have the SYS_ADMIN permission to call this resource.
:return:
"""
url = self._url_mail_server()
return self.get(url)
def _url_mail_server_sender_address(self):
return "{}/sender-address".format(self._url_mail_server())
def get_mail_sender_address(self):
"""
Retrieves the server email address
:return:
"""
url = self._url_mail_server_sender_address()
return self.get(url)
def remove_mail_sender_address(self):
"""
Clears the server email address.
The authenticated user must have the ADMIN permission to call this resource.
:return:
"""
url = self._url_mail_server_sender_address()
return self.delete(url)
def get_ssh_settings(self):
"""
Retrieve ssh settings for user
:return:
"""
url = self.resource_url("settings", api_root="rest/ssh")
return self.get(url)
def health_check(self):
"""
Get health status
https://confluence.atlassian.com/jirakb/how-to-retrieve-health-check-results-using-rest-api-867195158.html
:return:
"""
# check as Troubleshooting & Support Tools Plugin
response = self.get("rest/troubleshooting/1.0/check/")
if not response:
# check as support tools
response = self.get("rest/supportHealthCheck/1.0/check/")
return response
def get_associated_build_statuses(self, commit):
"""
To get the build statuses associated with a commit.
:commit: str- commit id
:return:
"""
url = self.resource_url("commits/{commitId}".format(commitId=commit), api_root="rest/build-status")
return self.get(url)
def _url_announcement_banner(self):
return "{}/banner".format(self._url_admin())
def get_announcement_banner(self):
"""
Gets the announcement banner, if one exists and is available to the user
:return:
"""
url = self._url_announcement_banner()
return self.get(url)
def set_announcement_banner(self, body):
"""
Sets the announcement banner with the provided JSON.
Only users authenticated as Admins may call this resource
:param body
{
"id": "https://docs.atlassian.com/jira/REST/schema/rest-announcement-banner#",
"title": "Rest Announcement Banner",
"type": "object"
}
:return:
"""
url = self._url_announcement_banner()
return self.put(url, data=body)
def delete_announcement_banner(self):
"""
Gets the announcement banner, if one exists and is available to the user
:return:
"""
url = self._url_announcement_banner()
return self.delete(url)
def upload_plugin(self, plugin_path):
"""
Provide plugin path for upload into BitBucket e.g. useful for auto deploy
:param plugin_path:
:return:
"""
upm_token = self.request(
method="GET",
path="rest/plugins/1.0/",
headers=self.no_check_headers,
trailing=True,
).headers["upm-token"]
url = "rest/plugins/1.0/?token={}".format(upm_token)
files = {"plugin": open(plugin_path, "rb")}
return self.post(url, files=files, headers=self.no_check_headers)
################################################################################################
# Functions related to projects
################################################################################################
def _url_projects(self, api_root=None, api_version=None):
return self.resource_url("projects", api_root, api_version)
def project_list(self, start=0, limit=None):
"""
Provide the project list
:return: A list of projects
"""
url = self._url_projects()
params = {}
if start:
params["start"] = start
if limit:
params["limit"] = limit
return self._get_paged(url, params=params)
def create_project(self, key, name, description=""):
"""
Create project
:param key: The project key
:param name: The project name
:param description: The project description
:return: The value of the post request.
"""
url = self._url_projects()
data = {"key": key, "name": name, "description": description}
return self.post(url, data=data)
################################################################################################
# Functions related to a specific project
################################################################################################
def _url_project(self, project_key, api_root=None, api_version=None):
return "{}/{}".format(self._url_projects(api_root, api_version), project_key)
def project(self, key):
"""
Provide project info
:param key: The project key
:return:
"""
url = self._url_project(key)
return self.get(url) or {}
def project_exists(self, project_key):
"""
Check if project with the provided project key exists and available.
:param project_key: Key of the project where to check for repository.
:return: False is requested repository doesn't exist in the project or not accessible to the requestor
"""
exists = False
try:
self.project(project_key)
exists = True
except HTTPError as e:
if e.response.status_code in (401, 404):
pass
return exists
def update_project(self, key, **params):
"""
Update project
:param key: The project key
:return: The value of the put request.
"""
url = self._url_project(key)
return self.put(url, data=params)
def _url_project_avatar(self, project_key):
return "{}/avatar.png".format(self._url_project(project_key))
def project_summary(self, key):
"""
Get a project summary
:param key: The project key
:return: Map with the project information
"""
return {
"key": key,
"data": self.project(key),
"users": self.project_users(key),
"groups": self.project_groups(key),
"avatar": self.project_avatar(key),
}
def project_avatar(self, key, content_type="image/png"):
"""
Get project avatar
:param key: The project key
:param content_type: The content type to get
:return: Value of get request
"""
url = self._url_project_avatar(key)
headers = dict(self.default_headers)
headers["Accept"] = content_type
headers["X-Atlassian-Token"] = "no-check"
return self.get(url, not_json_response=True, headers=headers)
def set_project_avatar(self, key, icon, content_type="image/png"):
"""
Set project avatar
:param key: The Project key
:param icon: The icon file
:param content_type: The content type of icon
:return: Value of post request
"""
url = self._url_project_avatar(key)
headers = {"X-Atlassian-Token": "no-check"}
files = {"avatar": ("avatar.png", icon, content_type)}
return self.post(url, files=files, headers=headers)
def project_keys(self, key, start=0, limit=None, filter_str=None):
"""
Get SSH access keys added to the project
:param start:
:param limit:
:param key: The project key
:param filter_str: OPTIONAL: users filter string
:return: The list of SSH access keys
"""
url = "{}/ssh".format(self._url_project(key, api_root="rest/keys"))
params = {}
if start:
params["start"] = start
if limit:
params["limit"] = limit
if filter_str:
params["filter"] = filter_str
return self._get_paged(url, params=params)
def _url_project_users(self, project_key):
return "{}/permissions/users".format(self._url_project(project_key))
def project_users(self, key, start=0, limit=None, filter_str=None):
"""
Get users who has permission in project
:param key: The project key
:param filter_str: OPTIONAL: users filter string
:param start:
:param limit:
:return: The list of project users
"""
url = self._url_project_users(key)
params = {}
if start:
params["start"] = start
if limit:
params["limit"] = limit
if filter_str:
params["filter"] = filter_str
return self._get_paged(url, params=params)
def project_users_with_administrator_permissions(self, key):
"""
Get project administrators for project
:param key: The project key
:return: List of project administrators
"""
project_administrators = [
user["user"] for user in self.project_users(key) if user["permission"] == "PROJECT_ADMIN"
]
for group in self.project_groups_with_administrator_permissions(key):
for user in self.group_members(group):
project_administrators.append(user)
return project_administrators
def project_grant_user_permissions(self, project_key, username, permission):
"""
Grant the specified project permission to an specific user
:param project_key: The project key
:param username: user name to be granted
:param permission: the project permissions available are 'PROJECT_ADMIN', 'PROJECT_WRITE' and 'PROJECT_READ'
:return:
"""
url = self._url_project_users(project_key)
params = {"permission": permission, "name": username}
return self.put(url, params=params)
def project_remove_user_permissions(self, project_key, username):
"""
Revoke all permissions for the specified project for a user.
The authenticated user must have PROJECT_ADMIN permission for
the specified project or a higher global permission to call this resource.
In addition, a user may not revoke their own project permissions if they do not have a higher global permission.
:param project_key: The project key
:param username: user name to be granted
:return:
"""
url = self._url_project_users(project_key)
params = {"name": username}
return self.delete(url, params=params)
def _url_project_groups(self, project_key):
return "{}/permissions/groups".format(self._url_project(project_key))
def project_groups(self, key, start=0, limit=None, filter_str=None):
"""
Get Project Groups
:param limit:
:param limit:
:param start:
:param key: The project key
:param filter_str: OPTIONAL: group filter string
:return:
"""
url = self._url_project_groups(key)
params = {}
if start:
params["start"] = start
if limit:
params["limit"] = limit
if filter_str:
params["filter"] = filter_str
return self._get_paged(url, params=params)
def project_grant_group_permissions(self, project_key, group_name, permission):
"""
Grant the specified project permission to an specific group
:param project_key: The project key
:param group_name: group to be granted
:param permission: the project permissions available are 'PROJECT_ADMIN', 'PROJECT_WRITE' and 'PROJECT_READ'
:return:
"""
url = self._url_project_groups(project_key)
params = {"permission": permission, "name": group_name}
return self.put(url, params=params)
def project_remove_group_permissions(self, project_key, groupname):
"""
Revoke all permissions for the specified project for a group.
The authenticated user must have PROJECT_ADMIN permission for the specified project
or a higher global permission to call this resource.
In addition, a user may not revoke a group's permissions
if it will reduce their own permission level.
:param project_key: The project key
:param groupname: group to be granted
:return:
"""
url = self._url_project_groups(project_key)
params = {"name": groupname}
return self.delete(url, params=params)
def project_default_permissions(self, project_key, permission):
"""
Check if the specified permission is the default permission for a given project
:param project_key: The project key
:param permission: the project permissions available are 'PROJECT_ADMIN', 'PROJECT_WRITE' and 'PROJECT_READ'
:return:
"""
url = "{}/permissions/{}/all".format(self._url_project(project_key), permission)
return self.get(url)
def project_grant_default_permissions(self, project_key, permission):
"""
Grant the specified project permission to all users for a given project
:param project_key: The project key
:param permission: the project permissions available are 'PROJECT_ADMIN', 'PROJECT_WRITE' and 'PROJECT_READ'
:return:
"""
url = "{}/permissions/{}/all".format(self._url_project(project_key), permission)
return self.post(url, params={"allow": True})
def project_remove_default_permissions(self, project_key, permission):
"""
Revoke the specified project permission for all users for a given project
:param project_key: The project key
:param permission: the project permissions available are 'PROJECT_ADMIN', 'PROJECT_WRITE' and 'PROJECT_READ'
:return:
"""
url = "{}/permissions/{}/all".format(self._url_project(project_key), permission)
return self.post(url, params={"allow": False})
def _url_project_repo_hook_settings(self, project_key):
return "{}/settings/hooks".format(self._url_project(project_key))
def all_project_repo_hook_settings(self, project_key, start=0, limit=None, filter_type=None):
"""
Get all repository hooks for a given project
:param project_key: The project key
:param start:
:param limit: OPTIONAL: The limit of the number of changes to return, this may be restricted by
fixed system limits. Default by built-in method: None
:param filter_type: OPTIONAL: PRE_RECEIVE|POST_RECEIVE if present, controls how repository hooks should be filtered.
:return:
"""
url = self._url_project_repo_hook_settings(project_key)
params = {}
if filter_type:
params["type"] = filter_type
if start:
params["start"] = start
if limit:
params["limit"] = limit
return self._get_paged(url, params)
def get_project_repo_hook_settings(self, project_key, hook_key):
"""
Get a repository hook from a given project
:param project_key: The project key
:param hook_key: The repository hook key
:return:
"""
url = "{}/{}".format(self._url_project_repo_hook_settings(project_key), hook_key)
return self.get(url)
def enable_project_repo_hook_settings(self, project_key, hook_key):
"""
Enable a repository hook for a given project
:param project_key: The project key
:param hook_key: The repository hook key
:return:
"""
url = "{}/{}/enabled".format(self._url_project_repo_hook_settings(project_key), hook_key)
return self.put(url)
def disable_project_repo_hook_settings(self, project_key, hook_key):
"""
Disable a repository hook for a given project
:param project_key: The project key
:param hook_key: The repository hook key
:return:
"""
url = "{}/{}/enabled".format(self._url_project_repo_hook_settings(project_key), hook_key)
return self.delete(url)
def _url_project_conditions(self, project_key):
return "{}/conditions".format(
self._url_project(project_key, api_root="rest/default-reviewers", api_version="1.0")
)
def get_project_conditions(self, project_key):
"""
Request type: GET
Return a page of defaults conditions with reviewers list that have been configured for this project.
For further information visit:
https://docs.atlassian.com/bitbucket-server/rest/5.16.0/bitbucket-default-reviewers-rest.html#idm52264904368
:projectKey: str
:return:
"""
url = self._url_project_conditions(project_key)
return self.get(url) or {}
def _url_project_condition(self, project_key, id_condition=None):
url = "{}/condition".format(
self._url_project(project_key, api_root="rest/default-reviewers", api_version="1.0")
)
if id_condition is not None:
url += "/{}".format(id_condition)
return url
def get_project_condition(self, project_key, id_condition):
"""
Request type: GET
Return a specific condition with reviewers list that has been configured for this project.
For further information visit:
https://docs.atlassian.com/bitbucket-server/rest/5.16.0/bitbucket-default-reviewers-rest.html#idm52264901504
:projectKey: str - project key involved
:idCondition: int - condition id involved
:return:
"""
url = self._url_project_condition(project_key, id_condition)
return self.get(url) or {}
def create_project_condition(self, project_key, condition):
"""
Request type: POST
Create a new condition for this project.
For further information visit:
https://docs.atlassian.com/bitbucket-server/rest/5.16.0/bitbucket-default-reviewers-rest.html#idm52264893584
:projectKey: str- project key involved
:data: condition: dictionary object
:example condition: '{"sourceMatcher":
{"id":"any",
"type":{"id":"ANY_REF"}},
"targetMatcher":{"id":"refs/heads/master","type":{"id":"BRANCH"}},
"reviewers":[{"id": 12}],"requiredApprovals":"0"
}'
:return:
"""
url = self._url_project_condition(project_key)
return self.post(url, data=condition) or {}
def update_project_condition(self, project_key, condition, id_condition):
"""
Request type: PUT
Update a new condition for this project.
For further information visit:
https://docs.atlassian.com/bitbucket-server/rest/5.16.0/bitbucket-default-reviewers-rest.html#idm52264927632
:projectKey: str- project key involved
:idCondition: int - condition id involved
:data: condition: dictionary object
:example condition: '{"sourceMatcher":
{"id":"any",
"type":{"id":"ANY_REF"}},
"targetMatcher":{"id":"refs/heads/master","type":{"id":"BRANCH"}},
"reviewers":[{"id": 12}],"requiredApprovals":"0"
}'
:return:
"""
url = self._url_project_condition(project_key, id_condition)
return self.put(url, data=condition) or {}
def delete_project_condition(self, project_key, id_condition):
"""
Request type: DELETE
Delete a specific condition for this repository slug inside project.
For further information visit:
https://docs.atlassian.com/bitbucket-server/rest/5.16.0/bitbucket-default-reviewers-rest.html#idm52264896304
:projectKey: str- project key involved
:idCondition: int - condition id involved
:return:
"""
url = self._url_project_condition(project_key, id_condition)
return self.delete(url) or {}
def _url_project_audit_log(self, project_key):
if self.cloud:
raise Exception("Not supported in Bitbucket Cloud")
return "{}/events".format(self._url_project(project_key, api_root="rest/audit"))
def get_project_audit_log(self, project_key, start=0, limit=None):
"""
Get the audit log of the project
:param start:
:param limit:
:param project_key: The project key
:return: List of events of the audit log
"""
url = self._url_project_audit_log(project_key)
params = {}
if start:
params["start"] = start
if limit:
params["limit"] = limit
return self._get_paged(url, params=params)
def _url_repos(self, project_key, api_root=None, api_version=None):
return "{}/repos".format(self._url_project(project_key, api_root, api_version))
def repo_list(self, project_key, start=0, limit=25):
"""
Get repositories list from project
:param project_key: The project key
:param start:
:param limit:
:return:
"""
url = self._url_repos(project_key)
params = {}
if start:
params["start"] = start
if limit:
params["limit"] = limit
return self._get_paged(url, params=params)
def repo_all_list(self, project_key):
"""
Get all repositories list from project
:param project_key:
:return:
"""
return self.repo_list(project_key, limit=None)
def create_repo(self, project_key, repository_slug, forkable=False, is_private=True):
"""Create a new repository.
Requires an existing project in which this repository will be created. The only parameters which will be used
are name and scmId.
The authenticated user must have PROJECT_ADMIN permission for the context project to call this resource.
:param project_key: The project matching the projectKey supplied in the resource path as shown in URL.
:type project_key: str
:param repository_slug: Name of repository to create (i.e. "My repo").
:param forkable: Set the repository to be forkable or not.
:type forkable: bool
:param is_private: Set the repository to be private or not.
:type is_private: bool
:return:
201 - application/json (repository)
400 - application/json (errors)
401 - application/json (errors)
409 - application/json (errors)
:rtype: requests.Response
"""
url = self._url_repos(project_key)
data = {
"name": repository_slug,
"scmId": "git",
"forkable": forkable,
"is_private": is_private,
}
return self.post(url, data=data)
################################################################################################
# Functions related to a specific repository
################################################################################################
def _url_repo(self, project_key, repo, api_root=None, api_version=None):
return "{}/{}".format(self._url_repos(project_key, api_root, api_version), repo)
def reindex_repo(self, project_key, repository_slug):
"""
Reindex repo
:param project_key:
:param repository_slug:
:return:
"""
url = "{urlRepo}/sync".format(
urlRepo=self._url_repo(
project_key,
repository_slug,
api_root="rest/indexing",
api_version="1.0",
)
)
return self.post(url)
def reindex_repo_dev_panel(self, project_key, repository_slug):
"""
Reindex all of the Jira issues related to this repository_slug, including branches and pull requests.
This automatically happens as part of an upgrade, and calling this manually should only be required
if something unforeseen happens and the index becomes out of sync.
The authenticated user must have REPO_ADMIN permission for the specified repository to call this resource.
:param project_key:
:param repository_slug:
:return:
"""
url = "{}/reindex".format(self._url_repo(project_key, repository_slug, api_root="rest/jira-dev"))
return self.post(url)
def get_repo(self, project_key, repository_slug):
"""
Get a specific repository from a project. This operates based on slug not name which may
be confusing to some users.
:param project_key: Key of the project you wish to look in.
:param repository_slug: url-compatible repository identifier
:return: Dictionary of request response
"""
url = self._url_repo(project_key, repository_slug)
return self.get(url)
def repo_exists(self, project_key, repository_slug):
"""
Check if given combination of project and repository exists and available.
:param project_key: Key of the project where to check for repository.
:param repository_slug: url-compatible repository identifier to look for.
:return: False is requested repository doesn't exist in the project or not accessible to the requestor
"""
exists = False
try:
self.get_repo(project_key, repository_slug)
exists = True
except HTTPError as e:
if e.response.status_code in (401, 404):
pass
return exists
def update_repo(self, project_key, repository_slug, **params):
"""
Update a repository in a project. This operates based on slug not name which may
be confusing to some users.
:param project_key: Key of the project you wish to look in.
:param repository_slug: url-compatible repository identifier
:return: The value of the put request.
"""
url = self._url_repo(project_key, repository_slug)
return self.put(url, data=params)
def delete_repo(self, project_key, repository_slug):
"""
Delete a specific repository from a project. This operates based on slug not name which may
be confusing to some users.
:param project_key: Key of the project you wish to look in.
:param repository_slug: url-compatible repository identifier
:return: Dictionary of request response
"""
url = self._url_repo(project_key, repository_slug)
return self.delete(url)
def fork_repository(self, project_key, repository_slug, new_repository_slug):
"""
Forks a repository within the same project.
:param project_key:
:param repository_slug:
:param new_repository_slug:
:return:
"""
url = self._url_repo(project_key, repository_slug)
body = {}
if new_repository_slug is not None:
body["name"] = new_repository_slug
body["project"] = {"key": project_key}
return self.post(url, data=body)
def fork_repository_new_project(self, project_key, repository_slug, new_project_key, new_repository_slug):
"""
Forks a repository to a separate project.
:param project_key: Origin Project Key
:param repository_slug: Origin repository slug
:param new_project_key: Project Key of target project
:param new_repository_slug: Target Repository slug
:return:
"""
url = self._url_repo(project_key, repository_slug)
body = {}
if new_repository_slug is not None and new_project_key is not None:
body["name"] = new_repository_slug
body["project"] = {"key": new_project_key}
return self.post(url, data=body)
def repo_keys(self, project_key, repo_key, start=0, limit=None, filter_str=None):
"""
Get SSH access keys added to the repository
:param start:
:param limit:
:param project_key: The project key
:param repo_key: The repository key
:param filter_str: OPTIONAL: users filter string
:return:
"""
url = "{}/ssh".format(self._url_repo(project_key, repo_key, api_root="rest/keys"))
params = {}
if start:
params["start"] = start
if limit:
params["limit"] = limit
if filter_str:
params["filter"] = filter_str
return self._get_paged(url, params=params)
def _url_repo_users(self, project_key, repo):
return "{}/permissions/users".format(self._url_repo(project_key, repo))
def repo_users(self, project_key, repo_key, start=0, limit=None, filter_str=None):
"""
Get users who has permission in repository
:param start:
:param limit:
:param project_key: The project key
:param repo_key: The repository key
:param filter_str: OPTIONAL: Users filter string
:return:
"""
url = self._url_repo_users(project_key, repo_key)
params = {}
if start:
params["start"] = start
if limit:
params["limit"] = limit
if filter_str:
params["filter"] = filter_str
return self._get_paged(url, params=params)
def repo_grant_user_permissions(self, project_key, repo_key, username, permission):
"""
Grant the specified repository permission to an specific user
:param project_key: The project key
:param repo_key: The repository key (slug)
:param username: user name to be granted
:param permission: the repository permissions available are 'REPO_ADMIN', 'REPO_WRITE' and 'REPO_READ'
:return:
"""
url = self._url_repo_users(project_key, repo_key)
params = {"permission": permission, "name": username}
return self.put(url, params=params)
def repo_remove_user_permissions(self, project_key, repo_key, username):
"""
Revoke all permissions for the specified repository for a user.
The authenticated user must have REPO_ADMIN permission for the specified repository
or a higher project or global permission to call this resource.
In addition, a user may not revoke their own repository permissions
if they do not have a higher project or global permission.
:param project_key: The project key
:param repo_key: The repository key (slug)
:param username: user name to be granted
:return:
"""
url = self._url_repo_users(project_key, repo_key)
params = {"name": username}
return self.delete(url, params=params)
def _url_repo_groups(self, project_key, repo):
return "{}/permissions/groups".format(self._url_repo(project_key, repo))
def repo_groups(self, project_key, repo_key, start=0, limit=None, filter_str=None):
"""
Get repository Groups
:param start:
:param limit:
:param project_key: The project key
:param repo_key: The repository key
:param filter_str: OPTIONAL: group filter string
:return:
"""
url = self._url_repo_groups(project_key, repo_key)
params = {}
if start:
params["start"] = start
if limit:
params["limit"] = limit
if filter_str:
params["filter"] = filter_str
return self._get_paged(url, params=params)
def project_groups_with_administrator_permissions(self, key):
"""
Get groups with admin permissions
:param key:
:return:
"""
return [group["group"]["name"] for group in self.project_groups(key) if group["permission"] == "PROJECT_ADMIN"]
def repo_grant_group_permissions(self, project_key, repo_key, groupname, permission):
"""
Grant the specified repository permission to an specific group
Promote or demote a group's permission level for the specified repository. Available repository permissions are:
REPO_READ
REPO_WRITE
REPO_ADMIN
See the Bitbucket Server documentation for a detailed explanation of what each permission entails.
The authenticated user must have REPO_ADMIN permission for the specified repository or a higher project
or global permission to call this resource.
In addition, a user may not demote a group's permission level
if their own permission level would be reduced as a result.
:param project_key: The project key
:param repo_key: The repository key (slug)
:param groupname: group to be granted
:param permission: the repository permissions available are 'REPO_ADMIN', 'REPO_WRITE' and 'REPO_READ'
:return:
"""
url = self._url_repo_groups(project_key, repo_key)
params = {"permission": permission, "name": groupname}
return self.put(url, params=params)
def repo_remove_group_permissions(self, project_key, repo_key, groupname, permission):
"""
Revoke all permissions for the specified repository for a group.
The authenticated user must have REPO_ADMIN permission for the specified repository
or a higher project or global permission to call this resource.
In addition, a user may not revoke a group's permissions if it will reduce their own permission level.
:param project_key: The project key
:param repo_key: The repository key (slug)
:param groupname: group to be granted
:param permission: the repository permissions available are 'REPO_ADMIN', 'REPO_WRITE' and 'REPO_READ'
:return:
"""
url = self._url_repo_groups(project_key, repo_key)
params = {"name": groupname}
if permission:
params["permission"] = permission
return self.delete(url, params=params)
def _url_repo_labels(self, project_key, repository_slug):
if self.cloud:
raise Exception("Not supported in Bitbucket Cloud")
return "{}/labels".format(self._url_repo(project_key, repository_slug))
def get_repo_labels(self, project_key, repository_slug):
"""
Get labels for a specific repository from a project. This operates based on slug not name which may
be confusing to some users. (BitBucket Server only)
:param project_key: Key of the project you wish to look in.
:param repository_slug: url-compatible repository identifier
:return: Dictionary of request response
"""
url = self._url_repo_labels(project_key, repository_slug)
return self.get(url)
def set_repo_label(self, project_key, repository_slug, label_name):
"""
Sets a label on a repository. (BitBucket Server only)
The authenticated user must have REPO_ADMIN permission for the specified repository to call this resource.
:param project_key: Key of the project you wish to look in.
:param repository_slug: url-compatible repository identifier
:param label_name: label name to apply
:return:
"""
url = self._url_repo_labels(project_key, repository_slug)
data = {"name": label_name}
return self.post(url, data=data)
def _url_repo_audit_log(self, project_key, repository_slug):
if self.cloud:
raise Exception("Not supported in Bitbucket Cloud")
return "{}/events".format(self._url_repo(project_key, repository_slug, api_root="rest/audit"))
def get_repo_audit_log(self, project_key, repository_slug, start=0, limit=None):
"""
Get the audit log of the repository
:param start:
:param limit:
:param project_key: Key of the project you wish to look in.
:param repository_slug: url-compatible repository identifier
:return: List of events of the audit log
"""
url = self._url_repo_audit_log(project_key, repository_slug)
params = {}
if start:
params["start"] = start
if limit:
params["limit"] = limit
return self._get_paged(url, params=params)
def _url_repo_branches(self, project_key, repository_slug, api_root=None):
return "{}/branches".format(self._url_repo(project_key, repository_slug, api_root=api_root))
def get_branches(
self,
project_key,
repository_slug,
base=None,
filter=None,
start=0,
limit=None,
details=True,
order_by="MODIFICATION",
):
"""
Retrieve the branches matching the supplied filterText param.
The authenticated user must have REPO_READ permission for the specified repository to call this resource.
:param start:
:param project_key:
:param repository_slug:
:param base: base branch/tag to compare each branch to (for the metadata providers that uses that information)
:param filter:
:param limit: OPTIONAL: The limit of the number of branches to return, this may be restricted by
fixed system limits. Default by built-in method: None
:param details: whether to retrieve plugin-provided metadata about each branch
:param order_by: OPTIONAL: ordering of refs either ALPHABETICAL (by name) or MODIFICATION (last updated)
:return:
"""
url = self._url_repo_branches(project_key, repository_slug)
params = {}
if start:
params["start"] = start
if limit:
params["limit"] = limit
if filter:
params["filterText"] = filter
if base:
params["base"] = base
if order_by:
params["orderBy"] = order_by
params["details"] = details
return self._get_paged(url, params=params)
def _url_repo_default_branche(self, project_key, repository_slug):
return "{}/default".format(self._url_repo_branches(project_key, repository_slug))
def get_default_branch(self, project_key, repository_slug):
"""
Get the default branch of the repository.
The authenticated user must have REPO_READ permission for the specified repository to call this resource.
:param project_key: The project key
:param repository_slug: The repository key
:return:
"""
url = self._url_repo_default_branche(project_key, repository_slug)
return self.get(url)
def set_default_branch(self, project_key, repository_slug, ref_branch_name):
"""
Update the default branch of a repository.
The authenticated user must have REPO_ADMIN permission for the specified repository to call this resource.
:param project_key: The project key
:param repository_slug: The repository key (slug)
:param ref_branch_name: ref name like refs/heads/master
:return:
"""
url = self._url_repo_default_branche(project_key, repository_slug)
data = {"id": ref_branch_name}
return self.put(url, data=data)
def create_branch(self, project_key, repository_slug, name, start_point, message=""):
"""Creates a branch using the information provided in the request.
The authenticated user must have REPO_WRITE permission for the context repository to call this resource.
:param project_key: The project matching the projectKey supplied in the resource path as shown in URL.
:type project_key: str
:param repository_slug: Name of repository where branch is created (i.e. "my_repo").
:param name: Name of branch to create (i.e. "my_branch").
:type name: str
:param start_point: Name of branch to branch from.
:type start_point: str
:param message: Branch message.
:type message: str
:return:
200 - application/json (repository)
401 - application/json (errors)
404 - application/json (errors)
:rtype: requests.Response
"""
url = self._url_repo_branches(project_key, repository_slug)
data = {"name": name, "startPoint": start_point, "message": message}
return self.post(url, data=data)
def delete_branch(self, project_key, repository_slug, name, end_point=None):
"""
Delete branch from related repo
:param self:
:param project_key:
:param repository_slug:
:param name:
:param end_point:
:return:
"""
url = self._url_repo_branches(project_key, repository_slug, api_root="rest/branch-utils")
data = {"name": str(name)}
if end_point:
data["endPoint"] = end_point
return self.delete(url, data=data)
def _url_repo_tags(self, project_key, repository_slug, api_root=None):
if self.cloud:
return "{}/refs/tags".format(self._url_repo(project_key, repository_slug, api_root=api_root))
else:
return "{}/tags".format(self._url_repo(project_key, repository_slug, api_root=api_root))
def get_tags(
self,
project_key,
repository_slug,
filter="",
limit=1000,
order_by=None,
start=0,
):
"""
Retrieve the tags matching the supplied filterText param.
The authenticated user must have REPO_READ permission for the context repository to call this resource.
:param project_key:
:param repository_slug:
:param filter:
:param start:
:param limit: OPTIONAL: The limit of the number of tags to return, this may be restricted by
fixed system limits. Default by built-in method: 1000
:param order_by: OPTIONAL: ordering of refs either ALPHABETICAL (by name) or MODIFICATION (last updated)
:return:
"""
url = self._url_repo_tags(project_key, repository_slug)
params = {}
if start:
params["start"] = start
if limit:
params["limit"] = limit
if filter:
params["filterText"] = filter
if order_by:
params["orderBy"] = order_by
return self._get_paged(url, params=params)
def get_project_tags(self, project_key, repository_slug, tag_name=None):
"""
Retrieve a tag in the specified repository.
The authenticated user must have REPO_READ permission for the context repository to call this resource.
Search uri is api/1.0/projects/{projectKey}/repos/{repositorySlug}/tags/{name:.*}
:param project_key:
:param repository_slug:
:param tag_name: OPTIONAL:
:return:
"""
url = self._url_repo_tags(project_key, repository_slug)
if tag_name is not None:
return self.get("{}/{}".format(url, tag_name))
return self._get_paged(url)
def set_tag(self, project_key, repository_slug, tag_name, commit_revision, description=None):
"""
Creates a tag using the information provided in the {@link RestCreateTagRequest request}
The authenticated user must have REPO_WRITE permission for the context repository to call this resource.
:param project_key:
:param repository_slug:
:param tag_name:
:param commit_revision: commit hash
:param description: OPTIONAL:
:return:
"""
url = self._url_repo_tags(project_key, repository_slug)
body = {
"name": tag_name,
"startPoint": commit_revision,
}
if description is not None:
body["message"] = description
return self.post(url, data=body)
def delete_tag(self, project_key, repository_slug, tag_name):
"""
Creates a tag using the information provided in the {@link RestCreateTagRequest request}
The authenticated user must have REPO_WRITE permission for the context repository to call this resource.
:param project_key:
:param repository_slug:
:param tag_name:
:return:
"""
url = "{}/{}".format(
self._url_repo_tags(project_key, repository_slug, api_root="rest/git"),
tag_name,
)
return self.delete(url)
def _url_repo_hook_settings(self, project_key, repository_slug):
return "{}/settings/hooks".format(self._url_repo(project_key, repository_slug))
def all_repo_hook_settings(self, project_key, repository_slug, start=0, limit=None, filter_type=None):
"""
Get all repository hooks for a given repo
:param project_key: The project key
:param repository_slug: The repository key
:param start:
:param limit: OPTIONAL: The limit of the number of changes to return, this may be restricted by
fixed system limits. Default by built-in method: None
:param filter_type: OPTIONAL: PRE_RECEIVE|POST_RECEIVE if present, controls how repository hooks should be filtered.
:return:
"""
url = self._url_repo_hook_settings(project_key, repository_slug)
params = {}
if filter_type:
params["type"] = filter_type
if start:
params["start"] = start
if limit:
params["limit"] = limit
return self._get_paged(url, params)
def get_repo_hook_settings(self, project_key, repository_slug, hook_key):
"""
Get a repository hook from a given repo
:param project_key: The project key
:param repository_slug: The repository key
:param hook_key: The repository hook key
:return:
"""
url = "{}/{}".format(self._url_repo_hook_settings(project_key, repository_slug), hook_key)
return self.get(url)
def enable_repo_hook_settings(self, project_key, repository_slug, hook_key):
"""
Enable a repository hook for a given repo
:param project_key: The project key
:param repository_slug: The repository key
:param hook_key: The repository hook key
:return:
"""
url = "{}/{}/enabled".format(self._url_repo_hook_settings(project_key, repository_slug), hook_key)
return self.put(url)
def disable_repo_hook_settings(self, project_key, repository_slug, hook_key):
"""
Disable a repository hook for a given repo
:param project_key: The project key
:param repository_slug: The repository key
:param hook_key: The repository hook key
:return:
"""
url = "{}/{}/enabled".format(self._url_repo_hook_settings(project_key, repository_slug), hook_key)
return self.delete(url)
def _url_webhooks(self, project_key, repository_slug):
return "{}/webhooks".format(self._url_repo(project_key, repository_slug))
def get_webhooks(
self,
project_key,
repository_slug,
event=None,
statistics=False,
):
"""
Get webhooks
:param project_key:
:param repository_slug:
:param event: OPTIONAL: defaults to None
:param statistics: OPTIONAL: defaults to False
:return:
"""
url = self._url_webhooks(project_key, repository_slug)
params = {}
if event:
params["event"] = event
if statistics:
params["statistics"] = statistics
return self._get_paged(url, params=params)
def create_webhook(
self,
project_key,
repository_slug,
name,
events,
webhook_url,
active,
secret=None,
):
"""Creates a webhook using the information provided in the request.
The authenticated user must have REPO_ADMIN permission for the context repository to call this resource.
:param project_key: The project matching the projectKey supplied in the resource path as shown in URL.
:param repository_slug:
:param name: Name of webhook to create.
:param events: List of event. (i.e. ["repo:refs_changed", "pr:merged", "pr:opened"])
:param webhook_url:
:param active:
:param secret: The string is used to verify data integrity between Bitbucket and your endpoint.
:return:
"""
url = self._url_webhooks(project_key, repository_slug)
body = {
"name": name,
"events": events,
"url": webhook_url,
"active": active,
}
if secret:
body["configuration"] = {"secret": secret}
return self.post(url, data=body)
def _url_webhook(self, project_key, repository_slug, webhook_id):
return "{}/{}".format(self._url_webhooks(project_key, repository_slug), webhook_id)
def get_webhook(self, project_key, repository_slug, webhook_id):
"""
Retrieve a webhook.
The authenticated user must have REPO_ADMIN permission for the context repository to call this resource.
:param project_key:
:param repository_slug:
:param webhook_id: the ID of the webhook within the repository
:return:
"""
url = self._url_webhook(project_key, repository_slug, webhook_id)
return self.get(url)
def update_webhook(self, project_key, repository_slug, webhook_id, **params):
"""
Update a webhook.
The authenticated user must have REPO_ADMIN permission for the context repository to call this resource.
:param project_key:
:param repository_slug:
:param webhook_id: the ID of the webhook within the repository
:return:
"""
url = self._url_webhook(project_key, repository_slug, webhook_id)
return self.put(url, data=params)
def delete_webhook(self, project_key, repository_slug, webhook_id):
"""
Delete a webhook.
The authenticated user must have REPO_ADMIN permission for the context repository to call this resource.
:param project_key:
:param repository_slug:
:param webhook_id: the ID of the webhook within the repository
:return:
"""
url = self._url_webhook(project_key, repository_slug, webhook_id)
return self.delete(url)
def _url_pull_request_settings(self, project_key, repository_slug):
return "{}/settings/pull-requests".format(self._url_repo(project_key, repository_slug))
def get_pull_request_settings(self, project_key, repository_slug):
"""
Get pull request settings.
:param project_key:
:param repository_slug:
:return:
"""
url = self._url_pull_request_settings(project_key, repository_slug)
return self.get(url)
def set_pull_request_settings(self, project_key, repository_slug, data):
"""
Set pull request settings.
:param project_key:
:param repository_slug:
:param data: json body
:return:
"""
url = self._url_pull_request_settings(project_key, repository_slug)
return self.post(url, data=data)
def _url_pull_requests(self, project_key, repository_slug):
if self.cloud:
return self.resource_url("repositories/{}/{}/pullrequests".format(project_key, repository_slug))
else:
return "{}/pull-requests".format(self._url_repo(project_key, repository_slug))
def get_pull_requests(
self,
project_key,
repository_slug,
state="OPEN",
order="newest",
limit=100,
start=0,
at=None,
):
"""
Get pull requests
:param project_key:
:param repository_slug:
:param state:
:param order: OPTIONAL: defaults to NEWEST) the order to return pull requests in, either OLDEST
(as in: "oldest first") or NEWEST.
:param limit:
:param start:
:param at:
:return:
"""
url = self._url_pull_requests(project_key, repository_slug)
params = {}
if state:
params["state"] = state
if limit:
params["limit"] = limit
if start:
params["start"] = start
if order:
params["order"] = order
if at:
params["at"] = at
return self._get_paged(url, params=params)
def open_pull_request(
self,
source_project,
source_repo,
dest_project,
dest_repo,
source_branch,
destination_branch,
title,
description,
reviewers=None,
):
"""
Create a new pull request between two branches.
The branches may be in the same repository_slug, or different ones.
When using different repositories, they must still be in the same {@link Repository#getHierarchyId() hierarchy}.
The authenticated user must have REPO_READ permission for the "from" and "to"repositories to call this resource.
:param source_project: the project that the PR source is from
:param source_repo: the repository that the PR source is from
:param source_branch: the branch name of the PR
:param dest_project: the project that the PR destination is from
:param dest_repo: the repository that the PR destination is from
:param destination_branch: where the PR is being merged into
:param title: the title of the PR
:param description: the description of what the PR does
:param reviewers: the list of reviewers or a single reviewer of the PR
:return:
"""
body = {
"title": title,
"description": description,
"fromRef": {
"id": source_branch,
"repository": {
"slug": source_repo,
"name": source_repo,
"project": {"key": source_project},
},
},
"toRef": {
"id": destination_branch,
"repository": {
"slug": dest_repo,
"name": dest_repo,
"project": {"key": dest_project},
},
},
"reviewers": [],
}
def add_reviewer(reviewer_name):
entry = {"user": {"name": reviewer_name}}
body["reviewers"].append(entry)
if reviewers is not None:
if isinstance(reviewers, str):
add_reviewer(reviewers)
elif isinstance(reviewers, list):
for reviewer in reviewers:
add_reviewer(reviewer)
return self.create_pull_request(dest_project, dest_repo, body)
def create_pull_request(self, project_key, repository_slug, data):
"""
:param project_key:
:param repository_slug:
:param data: json body
:return:
"""
url = self._url_pull_requests(project_key, repository_slug)
return self.post(url, data=data)
def _url_pull_request(self, project_key, repository_slug, pull_request_id):
return "{}/{}".format(self._url_pull_requests(project_key, repository_slug), pull_request_id)
def get_pull_request(self, project_key, repository_slug, pull_request_id):
"""
Retrieve a pull request.
The authenticated user must have REPO_READ permission
for the repository that this pull request targets to call this resource.
:param project_key:
:param repository_slug:
:param pull_request_id: the ID of the pull request within the repository
:return:
"""
url = self._url_pull_request(project_key, repository_slug, pull_request_id)
return self.get(url)
@deprecated(version="1.15.1", reason="Use get_pull_request()")
def get_pullrequest(self, *args, **kwargs):
"""
Deprecated name since 1.15.1. Let's use the get_pull_request()
"""
return self.get_pull_request(*args, **kwargs)
def delete_pull_request(self, project_key, repository_slug, pull_request_id, pull_request_version):
"""
Delete a pull request.
:param project_key: the project key
:param repository_slug: the repository slug
:param pull_request_id: the ID of the pull request within the repository
:param pull_request_version: the version of the pull request
:return:
"""
url = self._url_pull_request(project_key, repository_slug, pull_request_id)
data = {"version": pull_request_version}
return self.delete(url, data=data)
def get_pull_requests_activities(self, project_key, repository_slug, pull_request_id, start=0, limit=None):
"""
Get pull requests activities
:param limit:
:param project_key:
:param repository_slug:
:param pull_request_id: the ID of the pull request within the repository
:param start:
:return:
"""
url = "{}/activities".format(self._url_pull_request(project_key, repository_slug, pull_request_id))
params = {}
if start:
params["start"] = start
if limit:
params["limit"] = limit
return self._get_paged(url, params)
def get_pull_requests_changes(self, project_key, repository_slug, pull_request_id, start=0, limit=None):
"""
Get pull requests changes
:param start:
:param limit:
:param project_key:
:param repository_slug:
:param pull_request_id: the ID of the pull request within the repository
:return:
"""
url = "{}/changes".format(self._url_pull_request(project_key, repository_slug, pull_request_id))
params = {}
if start:
params["start"] = start
if limit:
params["limit"] = limit
return self._get_paged(url, params)
def get_pull_requests_commits(self, project_key, repository_slug, pull_request_id, start=0, limit=None):
"""
Get pull requests commits
:param start:
:param limit:
:param project_key:
:param repository_slug:
:param pull_request_id: the ID of the pull request within the repository
:start
:limit
:return:
"""
url = "{}/commits".format(self._url_pull_request(project_key, repository_slug, pull_request_id))
params = {}
if start:
params["start"] = start
if limit:
params["limit"] = limit
return self._get_paged(url, params)
def _url_pull_request_participants(self, project_key, repository_slug, pull_request_id):
return "{}/{}/participants".format(self._url_pull_requests(project_key, repository_slug), pull_request_id)
def get_pull_requests_participants(self, project_key, repository_slug, pull_request_id, start=0, limit=None):
"""
Get all participants of a pull request
:param start:
:param limit:
:param project_key:
:param repository_slug:
:param pull_request_id:
:return:
"""
url = self._url_pull_request_participants(project_key, repository_slug, pull_request_id)
params = {}
if start:
params["start"] = start
if limit:
params["limit"] = limit
return self._get_paged(url, params)
def change_reviewed_status(self, project_key, repository_slug, pull_request_id, status, user_slug):
"""
Change the current user's status for a pull request.
Implicitly adds the user as a participant if they are not already.
If the current user is the author, this method will fail.
:param project_key
:param repository_slug:
:param pull_request_id:
:param status:
:param user_slug:
:return:
"""
url = "{}/{}".format(
self._url_pull_request_participants(project_key, repository_slug, pull_request_id),
user_slug,
)
approved = True if status == "APPROVED" else False
data = {"user": {"name": user_slug}, "approved": approved, "status": status}
return self.put(url, data)
def _url_pull_request_comments(self, project_key, repository_slug, pull_request_id):
url = "{}/comments".format(self._url_pull_request(project_key, repository_slug, pull_request_id))
return url
def add_pull_request_comment(self, project_key, repository_slug, pull_request_id, text, parent_id=None):
"""
Add comment into pull request
:param project_key:
:param repository_slug:
:param pull_request_id: the ID of the pull request within the repository
:param text comment text
:param parent_id parent comment id
:return:
"""
url = self._url_pull_request_comments(project_key, repository_slug, pull_request_id)
body = {"text": text}
if parent_id:
body["parent"] = {"id": parent_id}
return self.post(url, data=body)
def _url_pull_request_comment(self, project_key, repository_slug, pull_request_id, comment_id):
url = "{}/{}".format(
self._url_pull_request_comments(project_key, repository_slug, pull_request_id),
comment_id,
)
return url
def get_pull_request_comment(self, project_key, repository_slug, pull_request_id, comment_id):
"""
Retrieves a pull request comment.
The authenticated user must have REPO_READ permission
for the repository that this pull request targets to call this resource.
:param project_key:
:param repository_slug:
:param pull_request_id: the ID of the pull request within the repository
:param comment_id: the ID of the comment to retrieve
:return:
"""
url = self._url_pull_request_comment(project_key, repository_slug, pull_request_id, comment_id)
return self.get(url)
def update_pull_request_comment(
self,
project_key,
repository_slug,
pull_request_id,
comment_id,
comment,
comment_version,
):
"""
Update the text of a comment.
Only the user who created a comment may update it.
Note: the supplied supplied JSON object must contain a version
that must match the server's version of the comment
or the update will fail.
"""
url = self._url_pull_request_comment(project_key, repository_slug, pull_request_id, comment_id)
data = {"version": comment_version, "text": comment}
return self.put(url, data=data)
@deprecated(version="2.4.2", reason="Use delete_pull_request_comment()")
def delete_pull_reques_comment(self, project_key, repository_slug, pull_request_id, comment_id, comment_version):
"""
Deprecated name since 2.4.2. Let's use the get_pull_request()
"""
return self.delete_pull_request_comment(
project_key, repository_slug, pull_request_id, comment_id, comment_version
)
def delete_pull_request_comment(self, project_key, repository_slug, pull_request_id, comment_id, comment_version):
"""
Delete a comment.
Only the repository admin or user who created a comment may update it.
Note: the supplied supplied JSON object must contain a version
that must match the server's version of the comment
or the delete will fail.
"""
url = self._url_pull_request_comment(project_key, repository_slug, pull_request_id, comment_id)
data = {"version": comment_version}
return self.delete(url, params=data)
def decline_pull_request(self, project_key, repository_slug, pr_id, pr_version):
"""
Decline a pull request.
The authenticated user must have REPO_READ permission for the repository
that this pull request targets to call this resource.
:param project_key: PROJECT
:param repository_slug: my_shiny_repo
:param pr_id: 2341
:param pr_version: 12
:return:
"""
url = "{}/decline".format(self._url_pull_request(project_key, repository_slug, pr_id))
params = {}
if not self.cloud:
params["version"] = pr_version
return self.post(url, params=params)
def get_tasks(self, project_key, repository_slug, pull_request_id):
"""
Get all tasks for the pull request
:param project_key:
:param repository_slug:
:param pull_request_id: the ID of the pull request within the repository
:return:
"""
if self.cloud:
raise Exception("Not supported in Bitbucket Cloud")
url = "{}/tasks".format(self._url_pull_request(project_key, repository_slug, pull_request_id))
return self.get(url)
def _url_tasks(self):
if self.cloud:
raise Exception("Not supported in Bitbucket Cloud")
return self.resource_url("tasks")
def add_task(self, anchor, text):
"""
Add task to the comment
:param anchor: ID of the comment,
:param text: task text
:return:
"""
url = self._url_tasks()
data = {"anchor": {"id": anchor, "type": "COMMENT"}, "text": text}
return self.post(url, data=data)
def _url_task(self, task_id):
return "{}/{}".format(self._url_tasks(), task_id)
def get_task(self, task_id):
"""
Get task information by ID
:param task_id:
:return:
"""
url = self._url_task(task_id)
return self.get(url)
def delete_task(self, task_id):
"""
Delete task by ID
:param task_id:
:return:
"""
url = self._url_task(task_id)
return self.delete(url)
def update_task(self, task_id, text=None, state=None):
"""
Update task by ID. It is possible to update state and/or text of the task
:param task_id:
:param text:
:param state: OPEN, RESOLVED
:return:
"""
url = self._url_task(task_id)
data = {"id": task_id}
if text:
data["text"] = text
if state:
data["state"] = state
return self.put(url, data=data)
def is_pull_request_can_be_merged(self, project_key, repository_slug, pr_id):
"""
Test whether a pull request can be merged.
A pull request may not be merged if:
- there are conflicts that need to be manually resolved before merging; and/or
- one or more merge checks have vetoed the merge.
The authenticated user must have REPO_READ permission for the repository
that this pull request targets to call this resource.
:param project_key: PROJECT
:param repository_slug: my_shiny_repo
:param pr_id: 2341
:return:
"""
url = "{}/merge".format(self._url_pull_request(project_key, repository_slug, pr_id))
return self.get(url)
def merge_pull_request(self, project_key, repository_slug, pr_id, pr_version):
"""
Merge pull request
The authenticated user must have REPO_READ permission for the repository
that this pull request targets to call this resource.
:param project_key: PROJECT
:param repository_slug: my_shiny_repo
:param pr_id: 2341
:param pr_version:
:return:
"""
url = "{}/merge".format(self._url_pull_request(project_key, repository_slug, pr_id))
params = {}
if not self.cloud:
params["version"] = pr_version
return self.post(url, params=params)
def reopen_pull_request(self, project_key, repository_slug, pr_id, pr_version):
"""
Re-open a declined pull request.
The authenticated user must have REPO_READ permission for the repository
that this pull request targets to call this resource.
:param project_key: PROJECT
:param repository_slug: my_shiny_repo
:param pr_id: 2341
:param pr_version: 12
:return:
"""
url = "{}/reopen".format(self._url_pull_request(project_key, repository_slug, pr_id))
params = {"version": pr_version}
return self.post(url, params=params)
def _url_inbox_pull_requests(self):
return "inbox/pull-requests"
def check_inbox_pull_requests_count(self):
url = "{}/count".format(self._url_inbox_pull_requests())
return self.get(url)
def check_inbox_pull_requests(self, start=0, limit=None, role=None):
"""
Get pull request in your inbox
:param start:
:param limit:
:param role:
:return:
"""
url = self._url_inbox_pull_requests()
params = {"start": start}
if limit:
params["limit"] = limit
if role:
params["role"] = role
return self.get(url, params=params)
def _url_repo_compare(self, project_key, repository_slug):
url = "{}/compare".format(self._url_repo(project_key, repository_slug))
return url
def get_diff(self, project_key, repository_slug, path, hash_oldest, hash_newest):
"""
Gets a diff of the changes available in the {@code from} commit but not in the {@code to} commit.
If either the {@code from} or {@code to} commit are not specified,
they will be replaced by the default branch of their containing repository.
:param project_key:
:param repository_slug:
:param path:
:param hash_oldest: the source commit (can be a partial/full commit ID or qualified/unqualified ref name)
:param hash_newest: the target commit (can be a partial/full commit ID or qualified/unqualified ref name)
:return:
"""
url = "{}/diff/{}".format(self._url_repo_compare(project_key, repository_slug), path)
params = {}
if hash_oldest:
params["from"] = hash_oldest
if hash_newest:
params["to"] = hash_newest
return (self.get(url, params=params) or {}).get("diffs")
def _url_commits(self, project_key, repository_slug, api_root=None, api_version=None):
return "{}/commits".format(
self._url_repo(project_key, repository_slug, api_root=api_root, api_version=api_version)
)
def get_commits(
self,
project_key,
repository_slug,
hash_oldest=None,
hash_newest=None,
follow_renames=False,
ignore_missing=False,
merges="include",
with_counts=False,
avatar_size=None,
avatar_scheme=None,
limit=None,
):
"""
Get commit list from repo
:param project_key:
:param repository_slug:
:param hash_oldest:
:param hash_newest:
:param merges: OPTIONAL: include|exclude|only if present, controls how merge commits should be filtered.
:param follow_renames: OPTIONAL: if true, the commit history of the specified file will be followed past renames
:param ignore_missing: OPTIONAL: true to ignore missing commits, false otherwise
:param with_counts: OPTIONAL: optionally include the total number of commits and total number of unique authors
:param avatar_size: OPTIONAL: if present the service adds avatar URLs for commit authors.
:param avatar_scheme: OPTIONAL: the desired scheme for the avatar URL
:param limit: OPTIONAL: The limit of the number of commits to return, this may be restricted by
fixed system limits. Default by built-in method: None
:return:
"""
url = self._url_commits(project_key, repository_slug)
params = {"merges": merges}
if hash_oldest:
params["since"] = hash_oldest
if hash_newest:
params["until"] = hash_newest
if follow_renames:
params["followRenames"] = follow_renames
if ignore_missing:
params["ignoreMissing"] = ignore_missing
if with_counts:
params["withCounts"] = with_counts
if avatar_size:
params["avatarSize"] = avatar_size
if avatar_scheme:
params["avatarScheme"] = avatar_scheme
if limit:
params["limit"] = limit
return (self.get(url, params=params) or {}).get("values")
def _url_commit(self, project_key, repository_slug, commit_id, api_root=None, api_version=None):
return "{}/{}".format(
self._url_commits(project_key, repository_slug, api_root=api_root, api_version=api_version),
commit_id,
)
def get_commit_info(self, project_key, repository_slug, commit, path=None):
"""
Retrieve a single commit identified by its ID>. In general, that ID is a SHA1.
From 2.11, ref names like "refs/heads/master" are no longer accepted by this resource.
The authenticated user must have REPO_READ permission for the specified repository to call this resource.
:param project_key:
:param repository_slug:
:param commit: the commit ID to retrieve
:param path :OPTIONAL an optional path to filter the commit by.
If supplied the details returned may not be for the specified commit.
Instead, starting from the specified commit, they will be the details for the first commit
affecting the specified path.
:return:
"""
url = self._url_commit(project_key, repository_slug, commit)
params = {}
if path:
params["path"] = path
return self.get(url, params=params)
def _url_commit_pull_requests(self, project_key, repository_slug, commit_id):
return "{}/pull-requests".format(self._url_commit(project_key, repository_slug, commit_id))
def get_pull_requests_contain_commit(self, project_key, repository_slug, commit):
url = self._url_commit_pull_requests(project_key, repository_slug, commit)
return (self.get(url) or {}).get("values")
def get_changelog(self, project_key, repository_slug, ref_from, ref_to, start=0, limit=None):
"""
Get change log between 2 refs
:param start:
:param project_key:
:param repository_slug:
:param ref_from:
:param ref_to:
:param limit: OPTIONAL: The limit of the number of changes to return, this may be restricted by
fixed system limits. Default by built-in method: None
:return:
"""
url = "{}/compare/commits".format(self._url_repo(project_key, repository_slug))
params = {}
if ref_from:
params["from"] = ref_from
if ref_to:
params["to"] = ref_to
if start:
params["start"] = start
if limit:
params["limit"] = limit
return self._get_paged(url, params=params)
def _url_code_insights_annotations(self, project_key, repository_slug, commit_id, report_key):
return "{}/reports/{}/annotations".format(
self._url_commit(
project_key,
repository_slug,
commit_id,
api_root="rest/insights",
api_version="1.0",
),
report_key,
)
def add_code_insights_annotations_to_report(self, project_key, repository_slug, commit_id, report_key, annotations):
"""
Adds annotations to an existing insight report.
For further information visit:
https://docs.atlassian.com/bitbucket-server/rest/6.6.1/bitbucket-code-insights-rest.html
:project_key: str
:repository_slug: str
:commit_id: str
:report_key: str
:annotations: list
"""
url = self._url_code_insights_annotations(project_key, repository_slug, commit_id, report_key)
data = {"annotations": annotations}
return self.post(url, data=data)
def _url_code_insights_report(self, project_key, repository_slug, commit_id, report_key):
return "{}/reports/{}".format(
self._url_commit(
project_key,
repository_slug,
commit_id,
api_root="rest/insights",
api_version="1.0",
),
report_key,
)
def get_code_insights_report(self, project_key, repository_slug, commit_id, report_key):
"""
Retrieve the specified code-insights report.
:projectKey: str
:repositorySlug: str
:commitId: str
:report_key: str
"""
url = self._url_code_insights_report(project_key, repository_slug, commit_id, report_key)
return self.get(url)
def delete_code_insights_report(self, project_key, repository_slug, commit_id, report_key):
"""
Delete a report for the given commit. Also deletes any annotations associated with this report.
:projectKey: str
:repositorySlug: str
:commitId: str
:report_key: str
"""
url = self._url_code_insights_report(project_key, repository_slug, commit_id, report_key)
return self.delete(url)
def create_code_insights_report(
self, project_key, repository_slug, commit_id, report_key, report_title, **report_params
):
"""
Create a new insight report, or replace the existing one
if a report already exists for the given repository_slug, commit, and report key.
A request to replace an existing report will be rejected
if the authenticated user was not the creator of the specified report.
For further information visit:
https://docs.atlassian.com/bitbucket-server/rest/6.6.1/bitbucket-code-insights-rest.html
:projectKey: str
:repositorySlug: str
:commitId: str
:report_key: str
:report_title: str
:report_params:
"""
url = self._url_code_insights_report(project_key, repository_slug, commit_id, report_key)
data = {"title": report_title}
data.update(report_params)
return self.put(url, data=data)
def get_file_list(self, project_key, repository_slug, query=None, start=0, limit=None):
"""
Retrieve a page of files from particular directory of a repository.
The search is done recursively, so all files from any sub-directory of the specified directory will be returned.
The authenticated user must have REPO_READ permission for the specified repository to call this resource.
:param start:
:param project_key:
:param repository_slug:
:param query: the commit ID or ref (e.g. a branch or tag) to list the files at.
If not specified the default branch will be used instead.
:param limit: OPTIONAL
:return:
"""
url = "{}/files".format(self._url_repo(project_key, repository_slug))
params = {}
if query:
params["at"] = query
if start:
params["start"] = start
if limit:
params["limit"] = limit
return self._get_paged(url, params=params)
def get_content_of_file(self, project_key, repository_slug, filename, at=None, markup=None):
"""
Retrieve the raw content for a file path at a specified revision.
The authenticated user must have REPO_READ permission for the specified repository to call this resource.
:param project_key:
:param repository_slug:
:param filename:
:param at: OPTIONAL ref string
:param markup: if present or "true", triggers the raw content to be markup-rendered and returned as HTML;
otherwise, if not specified, or any value other than "true" the content is streamed without markup.
:return:
"""
url = "{}/raw/{}".format(self._url_repo(project_key, repository_slug), filename)
params = {}
if at is not None:
params["at"] = at
if markup is not None:
params["markup"] = markup
headers = self.form_token_headers
return self.get(url, params=params, not_json_response=True, headers=headers)
def _url_branches_permissions(self, project_key, permission_id=None, repository_slug=None):
if repository_slug is None:
base = self._url_project(project_key, api_root="rest/branch-permissions", api_version="2.0")
else:
base = self._url_repo(
project_key,
repository_slug,
api_root="rest/branch-permissions",
api_version="2.0",
)
return "{}/restrictions/{}".format(base, "" if permission_id is None else str(permission_id))
def get_branches_permissions(self, project_key, permission_id, repository_slug=None, start=0, limit=25):
"""
Get branches permissions from a given repo
:param project_key:
:param permission_id:
:param repository_slug:
:param start:
:param limit:
:return:
"""
url = self._url_branches_permissions(project_key, permission_id, repository_slug)
params = {}
if limit:
params["limit"] = limit
if start:
params["start"] = start
return self.get(url, params=params)
def set_branches_permissions(
self,
project_key,
multiple_permissions=False,
matcher_type=None,
matcher_value=None,
permission_type=None,
repository_slug=None,
except_users=None,
except_groups=None,
except_access_keys=None,
start=0,
limit=25,
):
"""
Create a restriction for the supplied branch or set of branches to be applied to the given repository.
Allows creating multiple restrictions at once.
To use multiple restrictions you should format payload manually -
see the bitbucket-branch-restrictions.py example.
Reference: https://docs.atlassian.com/bitbucket-server/rest/6.8.0/bitbucket-ref-restriction-rest.html
:param project_key:
:param multiple_permissions:
:param matcher_type:
:param matcher_value:
:param permission_type:
:param repository_slug:
:param except_users:
:param except_groups:
:param except_access_keys:
:param start:
:param limit:
:return:
"""
url = self._url_branches_permissions(project_key=project_key, repository_slug=repository_slug)
if except_users is None:
except_users = []
if except_groups is None:
except_groups = []
if except_access_keys is None:
except_access_keys = []
headers = self.default_headers
if multiple_permissions:
headers = self.bulk_headers
restriction = multiple_permissions
else:
restriction = {
"type": permission_type,
"matcher": {
"id": matcher_value,
"displayId": matcher_value,
"type": {
"id": matcher_type.upper(),
"name": matcher_type.capitalize(),
},
"active": True,
},
"users": except_users,
"groups": except_groups,
"accessKeys": except_access_keys,
}
params = {"start": start, "limit": limit}
return self.post(url, data=restriction, params=params, headers=headers)
def delete_branch_permission(self, project_key, permission_id, repository_slug=None):
"""
Deletes a restriction as specified by a restriction id.
The authenticated user must have REPO_ADMIN permission or higher to call this resource.
:param project_key:
:param repository_slug:
:param permission_id:
:return:
"""
url = self._url_branches_permissions(project_key, permission_id, repository_slug)
return self.delete(url)
def get_branch_permission(self, project_key, permission_id, repository_slug=None):
"""
Returns a restriction as specified by a restriction id.
The authenticated user must have REPO_ADMIN permission or higher to call this resource.
:param project_key:
:param repository_slug:
:param permission_id:
:return:
"""
url = self._url_branches_permissions(project_key, permission_id, repository_slug)
return self.get(url)
def all_branches_permissions(self, project_key, permission_id, repository_slug=None):
"""
Get branches permissions from a given repo
:param project_key:
:param permission_id
:param repository_slug:
:return:
"""
url = self._url_branches_permissions(project_key, permission_id, repository_slug)
return self._get_paged(url)
def _url_branching_model(self, project_key, repository_slug):
return "{}/branchmodel/configuration".format(
self._url_repo(
project_key,
repository_slug,
api_root="rest/branch-utils",
api_version="1.0",
)
)
def get_branching_model(self, project_key, repository_slug):
"""
Get branching model
:param project_key:
:param repository_slug:
:return:
"""
url = self._url_branching_model(project_key, repository_slug)
return self.get(url)
def set_branching_model(self, project_key, repository_slug, data):
"""
Set branching model
:param project_key:
:param repository_slug:
:param data:
:return:
"""
url = self._url_branching_model(project_key, repository_slug)
return self.put(url, data=data)
def enable_branching_model(self, project_key, repository_slug):
"""
Enable branching model by setting it with default configuration
:param project_key:
:param repository_slug:
:return:
"""
default_model_data = {
"development": {"refId": None, "useDefault": True},
"types": [
{
"displayName": "Bugfix",
"enabled": True,
"id": "BUGFIX",
"prefix": "bugfix/",
},
{
"displayName": "Feature",
"enabled": True,
"id": "FEATURE",
"prefix": "feature/",
},
{
"displayName": "Hotfix",
"enabled": True,
"id": "HOTFIX",
"prefix": "hotfix/",
},
{
"displayName": "Release",
"enabled": True,
"id": "RELEASE",
"prefix": "release/",
},
],
}
return self.set_branching_model(project_key, repository_slug, default_model_data)
def disable_branching_model(self, project_key, repository_slug):
"""
Disable branching model
:param project_key:
:param repository_slug:
:return:
"""
return self.delete(self._url_branching_model(project_key, repository_slug))
def _url_file(self, project_key, repository_slug, filename):
return "{}/browse/{}".format(self._url_repo(project_key, repository_slug), filename)
def upload_file(self, project_key, repository_slug, content, message, branch, filename):
"""
Upload new file for given branch.
:param project_key:
:param repository_slug:
:param content:
:param message:
:param branch:
:param filename
:return:
"""
url = self._url_file(project_key, repository_slug, filename)
data = {"content": content, "message": message, "branch": branch}
return self.put(url, files=data)
def update_file(
self,
project_key,
repository_slug,
content,
message,
branch,
filename,
source_commit_id,
):
"""
Update existing file for given branch.
:param project_key:
:param repository_slug:
:param content:
:param message:
:param branch:
:param filename:
:param source_commit_id:
:return:
"""
url = self._url_file(project_key, repository_slug, filename)
data = {
"content": content,
"message": message,
"branch": branch,
"sourceCommitId": source_commit_id,
}
return self.put(url, files=data)
def search_code(self, team, search_query, page=1, limit=10):
"""
Search repositories for matching code
:team: str
:search_query: str
"""
url = self.resource_url("teams/{team}/search/code".format(team=team))
return self.get(url, params={"search_query": search_query, "page": page, "pagelen": limit})
def get_lfs_repo_status(self, project_key, repo):
url = "rest/git-lfs/admin/projects/{projectKey}/repos/{repositorySlug}/enabled".format(
projectKey=project_key, repositorySlug=repo
)
return self.get(url)
def set_lfs_repo_status(self, project_key, repo, enable=True):
url = "rest/git-lfs/admin/projects/{projectKey}/repos/{repositorySlug}/enabled".format(
projectKey=project_key, repositorySlug=repo
)
if enable:
return self.put(url)
else:
return self.delete(url)
def _url_repo_conditions(self, project_key, repo_key):
return "{}/conditions".format(
self._url_repo(
project_key,
repo_key,
api_root="rest/default-reviewers",
api_version="1.0",
)
)
def get_repo_conditions(self, project_key, repo_key):
"""
Request type: GET
Return a page of defaults conditions with reviewers list (type REPOSITORY or PROJECT)
that have been configured for this repository slug inside project specified.
For further information visit:
https://docs.atlassian.com/bitbucket-server/rest/5.16.0/bitbucket-default-reviewers-rest.html#idm52264928992
:projectKey: str- project key involved
:repoKey: str - repo key involved
:return:
"""
url = self._url_repo_conditions(project_key, repo_key)
return self.get(url) or {}
def get_repo_project_conditions(self, project_key, repo_key):
"""
Request type: GET
Return a page of repository conditions (only type PROJECT) with reviewers list associated
that have been configured for this repository slug inside project specified.
For further information visit:
https://docs.atlassian.com/bitbucket-server/rest/5.16.0/bitbucket-default-reviewers-rest.html#idm52264928992
:projectKey: str- project key involved
:repoKey: str - repo key involved
:return:
"""
response = self.get_repo_conditions(project_key, repo_key)
count = 0
for condition in response:
if condition["scope"]["type"] == "REPOSITORY":
del response[count]
count += 1
return response
def get_repo_repo_conditions(self, project_key, repo_key):
"""
Request type: GET
Return a page of repository conditions (only type REPOSITORY) with reviewers list associated
that have been configured for this repository slug inside project specified.
For further information visit:
https://docs.atlassian.com/bitbucket-server/rest/5.16.0/bitbucket-default-reviewers-rest.html#idm52264928992
:projectKey: str- project key involved
:repoKey: str - repo key involved
:return:
"""
response = self.get_repo_conditions(project_key, repo_key)
count = 0
for condition in response:
if condition["scope"]["type"] == "PROJECT":
del response[count]
count += 1
return response
def _url_repo_condition(self, project_key, repo_key, id_condition=None):
return "{}/condition/{}".format(
self._url_repo(
project_key,
repo_key,
api_root="rest/default-reviewers",
api_version="1.0",
),
"" if id_condition is None else str(id_condition),
)
def get_repo_condition(self, project_key, repo_key, id_condition):
"""
Request type: GET
Return a specific condition with reviewers list
that have been configured for this repository slug inside project specified.
For further information visit:
https://docs.atlassian.com/bitbucket-server/rest/5.16.0/bitbucket-default-reviewers-rest.html#idm52264927632
:projectKey: str- project key involved
:repoKey: str - repo key involved
:idCondition: int - condition id involved
:return:
"""
url = self._url_repo_condition(project_key, repo_key, id_condition)
return self.get(url) or {}
def create_repo_condition(self, project_key, repo_key, condition):
"""
Request type: POST
Create a new condition for this repository slug inside project specified.
For further information visit:
https://docs.atlassian.com/bitbucket-server/rest/5.16.0/bitbucket-default-reviewers-rest.html#idm52264908128
:projectKey: str- project key involved
:repoKey: str - repo key involved
:data: condition: dictionary object
:example condition: '{"sourceMatcher":
{"id":"any",
"type":{"id":"ANY_REF"}},
"targetMatcher":{"id":"refs/heads/master","type":{"id":"BRANCH"}},
"reviewers":[{"id": 12}],"requiredApprovals":"0"
}'
:return:
"""
url = self._url_repo_condition(project_key, repo_key)
return self.post(url, data=condition) or {}
def update_repo_condition(self, project_key, repo_key, condition, id_condition):
"""
Request type: PUT
Update a specific condition for this repository slug inside project.
For further information visit:
https://docs.atlassian.com/bitbucket-server/rest/5.16.0/bitbucket-default-reviewers-rest.html#idm52264927632
:projectKey: str- project key involved
:repoKey: str - repo key involved
:idCondition: int - condition id involved
:data: condition: dictionary object
:example condition: '{"sourceMatcher":
{"id":"any",
"type":{"id":"ANY_REF"}},
"targetMatcher":{"id":"refs/heads/master","type":{"id":"BRANCH"}},
"reviewers":[{"id": 12}],"requiredApprovals":"0"
}'
:return:
"""
url = self._url_repo_condition(project_key, repo_key, id_condition)
return self.put(url, data=condition) or {}
def delete_repo_condition(self, project_key, repo_key, id_condition):
"""
Request type: DELETE
Delete a specific condition for this repository slug inside project.
For further information visit:
https://docs.atlassian.com/bitbucket-server/rest/5.16.0/bitbucket-default-reviewers-rest.html#idm8287339888
:projectKey: str- project key involved
:repoKey: str - repo key involved
:idCondition: int - condition id involved
:return:
"""
url = self._url_repo_condition(project_key, repo_key, id_condition)
return self.delete(url) or {}
def download_repo_archive(
self,
project_key,
repository_slug,
dest_fd,
at=None,
filename=None,
format=None,
path=None,
prefix=None,
chunk_size=128,
):
"""
Downloads a repository archive.
Note that the data is written to the specified file-like object,
rather than simply being returned.
For further information visit:
https://docs.atlassian.com/bitbucket-server/rest/7.13.0/bitbucket-rest.html#idp199
:param project_key:
:param repository_slug:
:param dest_fd: a file-like object to which the archive will be written
:param at: string: Optional, the commit to download an archive of; if not supplied, an archive of the default branch is downloaded
:param filename: string: Optional, a filename to include the "Content-Disposition" header
:param format: string: Optional, the format to stream the archive in; must be one of: zip, tar, tar.gz or tgz. If not specified, then the archive will be in zip format.
:param paths: string: Optional, path to include in the streamed archive
:param prefix: string: Optional, a prefix to apply to all entries in the streamed archive; if the supplied prefix does not end with a trailing /, one will be added automatically
:param chunk_size: int: Optional, download chunk size. Defeault is 128
"""
url = "{}/archive".format(self._url_repo(project_key, repository_slug))
params = {}
if at is not None:
params["at"] = at
if filename is not None:
params["filename"] = filename
if format is not None:
params["format"] = format
if path is not None:
params["path"] = path
if prefix is not None:
params["prefix"] = prefix
headers = {"Accept": "*/*"}
response = self.get(url, params=params, headers=headers, advanced_mode=True)
for chunk in response.iter_content(chunk_size=chunk_size):
dest_fd.write(chunk)
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def get_repositories(self, workspace, role=None, query=None, sort=None):
"""
Get all repositories in a workspace.
:param workspace:
:param role: Filters the result based on the authenticated user's role on each repository.
One of: member, contributor, admin, owner
:param query: Query string to narrow down the response.
:param sort: Field by which the results should be sorted.
"""
return [
r.data
for r in Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.each(role=role, q=query, sort=sort)
]
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def get_pipelines(self, workspace, repository_slug, number=10, sort_by="-created_on"):
"""
Get information about latest pipelines runs.
:param workspace:
:param repository_slug:
:param sort_by:
:param number: number of pipelines to fetch
:param :sort_by: optional key to sort available pipelines for
:return: List of pipeline data
"""
values = []
for p in (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.pipelines.each(sort=sort_by)
):
values.append(p.data)
if len(values) == number:
break
return values
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def trigger_pipeline(self, workspace, repository_slug, branch="master", revision=None, name=None):
"""
Trigger a new pipeline. The following options are possible (1 and 2
trigger the pipeline that the branch is associated with in the Pipelines
configuration):
1. Latest revision of a branch (specify ``branch``)
2. Specific revision on a branch (additionally specify ``revision``)
3. Specific pipeline (additionally specify ``name``)
:return: the initiated pipeline; or error information
"""
return (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.pipelines.trigger(branch=branch, commit=revision, pattern=name)
.data
)
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def get_pipeline(self, workspace, repository_slug, uuid):
"""
Get information about the pipeline specified by ``uuid``.
:param workspace:
:param repository_slug:
:param uuid: Pipeline identifier (with surrounding {}; NOT the build number)
"""
return (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.pipelines.get(uuid)
.data
)
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def stop_pipeline(self, workspace, repository_slug, uuid):
"""
Stop the pipeline specified by ``uuid``.
:param workspace:
:param repository_slug:
:param uuid: Pipeline identifier (with surrounding {}; NOT the build number)
See the documentation for the meaning of response status codes.
"""
return (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.pipelines.get(uuid)
.stop()
)
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def get_pipeline_steps(self, workspace, repository_slug, uuid):
"""
Get information about the steps of the pipeline specified by ``uuid``.
:param workspace:
:param repository_slug:
:param uuid: Pipeline identifier (with surrounding {}; NOT the build number)
"""
values = []
for s in (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.pipelines.get(uuid)
.steps()
):
values.append(s.data)
return values
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def get_pipeline_step(self, workspace, repository_slug, pipeline_uuid, step_uuid):
"""
Get information about a step of a pipeline, specified by respective UUIDs.
:param workspace:
:param repository_slug:
:param pipeline_uuid: Pipeline identifier (with surrounding {}; NOT the build number)
:param step_uuid: Step identifier (with surrounding {})
"""
return (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.pipelines.get(pipeline_uuid)
.step(step_uuid)
.data
)
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def get_pipeline_step_log(self, workspace, repository_slug, pipeline_uuid, step_uuid):
"""
Get log of a step of a pipeline, specified by respective UUIDs.
:param workspace:
:param repository_slug:
:param pipeline_uuid: Pipeline identifier (with surrounding {}; NOT the build number)
:param step_uuid: Step identifier (with surrounding {})
:return: byte string log
"""
return (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.pipelines.get(pipeline_uuid)
.step(step_uuid)
.log()
)
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def create_issue(
self,
workspace,
repository_slug,
title,
description="",
kind="bug",
priority="major",
):
"""
Create a new issue in the issue tracker of the given repository.
:param workspace:
:param repository_slug:
:param title:
:param description:
:param kind: one of: bug, enhancement, proposal, task
:param priority: one of: trivial, minor, major, critical, blocker
"""
return (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.issues.create(title=title, description=description, kind=kind, priority=priority)
.data
)
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def get_issues(self, workspace, repository_slug, sort_by=None, query=None):
"""
Get information about the issues tracked in the given repository. By
default, the issues are sorted by ID in descending order.
:param workspace:
:param repository_slug:
:param sort_by: optional key to sort available issues for
:param query: optional query to filter available issues for. See
https://developer.atlassian.com/bitbucket/api/2/reference/meta/filtering
for an overview
:return: List of issues (direct, i.e. without the 'values' key)
"""
values = []
for p in (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.issues.each(q=query, sort=sort_by)
):
values.append(p.data)
return values
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def get_issue(self, workspace, repository_slug, id):
"""
Get the issue specified by ``id``.
"""
return (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.issues.get(id)
.data
)
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def update_issue(self, workspace, repository_slug, id, **fields):
"""
Update the ``fields`` of the issue specified by ``id``.
Consult the official API documentation for valid fields.
"""
return (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.issues.get(id)
.update(**fields)
.data
)
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def delete_issue(self, workspace, repository_slug, id):
"""
Delete the issue specified by ``id``.
"""
return (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.issues.get(id)
.delete()
)
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def add_branch_restriction(
self,
workspace,
repository_slug,
kind,
branch_match_kind="glob",
branch_pattern="*",
branch_type=None,
users=None,
groups=None,
value=None,
):
"""
Add a new branch restriction.
:param workspace:
:param repository_slug:
:param value:
:param kind: One of require_tasks_to_be_completed, force, restrict_merges,
enforce_merge_checks, require_approvals_to_merge, delete,
require_all_dependencies_merged, push, require_passing_builds_to_merge,
reset_pullrequest_approvals_on_change, require_default_reviewer_approvals_to_merge
:param branch_match_kind: branching_model or glob, if branching_model use
param branch_type otherwise branch_pattern.
:param branch_pattern: A glob specifying the branch this restriction should
apply to (supports * as wildcard).
:param branch_type: The branch type specifies the branches this restriction
should apply to. One of: feature, bugfix, release, hotfix, development, production.
:param users: List of user objects that are excluded from the restriction.
Minimal: {"username": "<username>"}
:param groups: List of group objects that are excluded from the restriction.
Minimal: {"owner": {"username": "<teamname>"}, "slug": "<groupslug>"}
"""
return (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.branch_restrictions.create(
kind,
branch_match_kind=branch_match_kind,
branch_pattern=branch_pattern,
branch_type=branch_type,
users=users,
groups=groups,
value=value,
)
.data
)
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def get_branch_restrictions(self, workspace, repository_slug, kind=None, pattern=None, number=10):
"""
Get all branch permissions.
"""
values = []
for p in (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.branch_restrictions.each(kind=kind, pattern=pattern)
):
values.append(p.data)
if len(values) == number:
break
return values
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def update_branch_restriction(self, workspace, repository_slug, id, **fields):
"""
Update an existing branch restriction identified by ``id``.
Consult the official API documentation for valid fields.
"""
return (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.branch_restrictions.get(id)
.update(**fields)
.data
)
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def delete_branch_restriction(self, workspace, repository_slug, id):
"""
Delete an existing branch restriction identified by ``id``.
"""
return (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.branch_restrictions.get(id)
.delete()
)
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def add_default_reviewer(self, workspace, repository_slug, user):
"""
Add user as default reviewer to the repository.
Can safely be called multiple times with the same user, only adds once.
:param workspace:
:param repository_slug:
:param user: The username or account UUID to add as default_reviewer.
"""
return (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.default_reviewers.add(user)
.data
)
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def get_default_reviewers(self, workspace, repository_slug, number=10):
"""
Get all default reviewers for the repository.
"""
values = []
for p in (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.default_reviewers.each()
):
values.append(p.data)
if len(values) == number:
break
return values
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def is_default_reviewer(self, workspace, repository_slug, user):
"""
Check if the user is a default reviewer of the repository.
:param workspace:
:param repository_slug:
:param user: The username or account UUID to check.
:return: True if present, False if not.
"""
if (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.default_reviewers.get(user)
is None
):
return False
return True
@deprecated(
version="2.0.2",
reason="Use atlassian.bitbucket.cloud instead of atlassian.bitbucket",
)
def delete_default_reviewer(self, workspace, repository_slug, user):
"""
Remove user as default reviewer from the repository.
:param repository_slug:
:param workspace:
:param user: The username or account UUID to delete as default reviewer.
"""
return (
Cloud(self.url, **self._new_session_args)
.workspaces.get(workspace)
.repositories.get(repository_slug)
.default_reviewers.get(user)
.delete()
)
|
AstroTech/atlassian-python-api
|
atlassian/bitbucket/__init__.py
|
Python
|
apache-2.0
| 121,356
|
[
"VisIt"
] |
000e57782fa72ae3238387610935d09ca4fa67b154909d8828cb3e818b3c95eb
|
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .posterior import PosteriorExact as Posterior
from ...util.linalg import pdinv, dpotrs, tdot
from ...util import diag
import numpy as np
from . import LatentFunctionInference
log_2_pi = np.log(2*np.pi)
class ExactGaussianInference(LatentFunctionInference):
"""
An object for inference when the likelihood is Gaussian.
The function self.inference returns a Posterior object, which summarizes
the posterior.
For efficiency, we sometimes work with the cholesky of Y*Y.T. To save repeatedly recomputing this, we cache it.
"""
def __init__(self):
pass#self._YYTfactor_cache = caching.cache()
def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, K=None, variance=None, Z_tilde=None):
"""
Returns a Posterior class containing essential quantities of the posterior
"""
if mean_function is None:
m = 0
else:
m = mean_function.f(X)
if variance is None:
variance = likelihood.gaussian_variance(Y_metadata)
YYT_factor = Y-m
if K is None:
K = kern.K(X)
Ky = K.copy()
diag.add(Ky, variance+1e-8)
Wi, LW, LWi, W_logdet = pdinv(Ky)
alpha, _ = dpotrs(LW, YYT_factor, lower=1)
log_marginal = 0.5*(-Y.size * log_2_pi - Y.shape[1] * W_logdet - np.sum(alpha * YYT_factor))
if Z_tilde is not None:
# This is a correction term for the log marginal likelihood
# In EP this is log Z_tilde, which is the difference between the
# Gaussian marginal and Z_EP
log_marginal += Z_tilde
dL_dK = 0.5 * (tdot(alpha) - Y.shape[1] * Wi)
dL_dthetaL = likelihood.exact_inference_gradients(np.diag(dL_dK), Y_metadata)
return Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL, 'dL_dm':alpha}
def LOO(self, kern, X, Y, likelihood, posterior, Y_metadata=None, K=None):
"""
Leave one out error as found in
"Bayesian leave-one-out cross-validation approximations for Gaussian latent variable models"
Vehtari et al. 2014.
"""
g = posterior.woodbury_vector
c = posterior.woodbury_inv
c_diag = np.diag(c)[:, None]
neg_log_marginal_LOO = 0.5*np.log(2*np.pi) - 0.5*np.log(c_diag) + 0.5*(g**2)/c_diag
#believe from Predictive Approaches for Choosing Hyperparameters in Gaussian Processes
#this is the negative marginal LOO
return -neg_log_marginal_LOO
|
mikecroucher/GPy
|
GPy/inference/latent_function_inference/exact_gaussian_inference.py
|
Python
|
bsd-3-clause
| 2,705
|
[
"Gaussian"
] |
a0472f9307602036d832c22c9aac892e4de4ba198191dc44e454f21e8711b143
|
from __future__ import absolute_import, division, print_function
import ast
from jaspyx.visitor import BaseVisitor
class AugAssign(BaseVisitor):
def visit_AugAssign(self, node):
attr = getattr(self, 'AugAssign_%s' % node.op.__class__.__name__, None)
if attr is None:
# Rewrite the expression as an assignment using a BinOp
self.visit(ast.Assign(
[node.target],
ast.BinOp(
ast.Name(node.target.id, ast.Load()),
node.op,
node.value
)
))
else:
attr(node.target, node.value)
for key, value in {
'Add': ' += ',
'Sub': ' -= ',
'Mult': ' *= ',
'Div': ' /= ',
'Mod': ' %= ',
'BitAnd': ' &= ',
'BitOr': ' |= ',
'BitXor': ' ^= ',
}.items():
def gen_op(op):
def f_op(self, target, value):
self.indent()
self.group(
[target, value],
prefix='',
infix=op,
suffix='',
)
self.finish()
return f_op
exec('AugAssign_%s = gen_op("%s")' % (key, value))
|
ztane/jaspyx
|
jaspyx/visitor/augassign.py
|
Python
|
mit
| 1,275
|
[
"VisIt"
] |
a06302d2f72adc7bfe55117eabd948376421f7b5d4824b63c7b547723e212dfa
|
#!/usr/bin/python
# Filename: basic.py
#
# Code by Martin Jucker, distributed under an MIT License
# Any publication benefitting from this piece of code should cite
# Jucker, M 2014. Scientific Visualisation of Atmospheric Data with ParaView.
# Journal of Open Research Software 2(1):e4, DOI: http://dx.doi.org/10.5334/jors.al
#
# Python interface for ParaView (www.paraview.org). Reads netCDF file on an arbitrary grid, including logarithmic coordinates and time evolution (if present). netCDF file needs to loosely correspond to Climate and Forecast (FC) conventions (https://en.wikipedia.org/wiki/Climate_and_Forecast_Metadata_Conventions).
# Also provides helper functions for common operations.
##### needed modules: paraview.simple, math #########################
from paraview.simple import *
from math import pi,log10
# some global constants
strPi = str(pi)[0:7]
##### define auxiliary functions ##################################
# define logarithmic coordinate conversion
def ConvertLogCoordString(pString, basis=1e3):
"""Logarithmic coordinate conversion in string form for Calculator filter.
Output is the string to be used inside the Calculator filter:
pString -- the coordinate to convert
basis -- basis (surface) pressure to normalize
"""
expression = '-log10(abs(' + pString + ')/' + str(basis) + ')'
return expression
# do the coordinate conversion inside a Calculator
def Cart2Log(src=GetActiveSource(), ratios=[1,1,1], logCoords=[], basis=[]):
"""Convert between logarithmic and linear coordinates. Also applies aspect ratio correction.
Adds a Calculator filter to the pipeline
src -- filter in pipeline to attach to
ratios -- multiplicative factor for coordinates - must be same length as # of dimensions
logCoords -- indices (0 based) of coordinates to be converted
basis -- basis to normalize argument to logarithm (ie defines origin) - must be length 1 or same as logCoords
"""
nVec=['iHat*','jHat*','kHat*']
coords=['coordsX','coordsY','coordsZ']
cFun=coords[:]
pFun=''
for pp in range(len(logCoords)):
ci = logCoords[pp]
if len(basis) == 1:
bas = basis[0]
else:
bas = basis[pp]
cFun[ci] = ConvertLogCoordString(coords[ci], bas)
for ii in range(len(ratios)):
if ratios[ii] != 1.0:
pFun += nVec[ii]+cFun[ii] + '*'+str(ratios[ii]) + ' + '
else:
pFun += nVec[ii]+cFun[ii] + ' + '
calc=Calculator(src)
calc.Function = pFun[:-3]
calc.CoordinateResults = 1
return calc
# convert cartesian coordinates to spherical coordinates
def Cart2Spherical(radius=1.0, src=GetActiveSource()):
"""Convert Cartesian to spherical coordinates.
Assumes X coordinate is longitude, Y coordinate latitude, Z coordinate vertical.
Adds Calculator filter to the pipeline.
radius -- radius of the sphere, where coordZ = basis
src -- filter in pipeline to attach to
"""
calc=Calculator(src)
strRad = str(radius)
try:
calc.Function = 'iHat*('+strRad+'+coordsZ)*cos(coordsY*'+strPi+'/180)*cos(coordsX*'+strPi+'/180) + jHat*('+strRad+'+coordsZ)*cos(coordsY*'+strPi+'/180)*sin(coordsX*'+strPi+'/180) + kHat*('+strRad+'+coordsZ)*sin(coordsY*'+strPi+'/180)'
except:
calc.Function = 'iHat*'+strRad+'*cos(coordsY*'+strPi+'/180)*cos(coordsX*'+strPi+'/180) + jHat*'+strRad+'*cos(coordsY*'+strPi+'/180)*sin(coordsX*'+strPi+'/180) + kHat*'+strRad+'*sin(coordsY*'+strPi+'/180)'
calc.CoordinateResults = 1
RenameSource('Cart2Spherical',calc)
return calc
# apply aspect ratios to grid. This might already be done in Cart2Log
def GridAspectRatio(ratios, src=GetActiveSource()):
"""Adjust aspect ratio of Cartesian grid: multiplies ratios x coordinates.
Adds Calculator filter to the pipeline.
ratios -- 2- or 3-vector with multiplicative factors for each spatial coordinate
"""
calc=Calculator(src)
if len(ratios) == 1:
calc.Function = 'iHat*'+str(ratios[0])+'*coordsX'
elif len(ratios) == 2:
calc.Function = 'iHat*'+str(ratios[0])+'*coordsX + jHat*'+str(ratios[1])+'*coordsY'
elif len(ratios) == 3:
calc.Function = 'iHat*'+str(ratios[0])+'*coordsX + jHat*'+str(ratios[1])+'*coordsY + kHat*'+str(ratios[2])+'*coordsZ'
else:
raise ValueError('Aspect ratios must be length 1,2 or 3, but is '+str(len(rations)))
calc.CoordinateResults = 1
return calc
# transform coordinates: logarithmic, aspect ratio
def TransformCoords(src=GetActiveSource(), aspectRatios=[1,1,1], logCoords=[], basis=[], reverseCoords=[], revCenter=[]):
"""Transform the coordinates depending on whether or not there are logarithmic coordinates"""
if len(reverseCoords)>0:
nVec = ['iHat*','jHat*','kHat*']
nCoor= ['X','Y','Z']
revCoor = Calculator(src)
rFun = ''
for dim in range(3):
if dim in reverseCoords or -dim in reverseCoords:
for d in range(len(reverseCoords)):
if dim == abs(reverseCoords[d]):
rd = d
if reverseCoords[rd]<0:
coorSign = '+'
else:
coorSign = '-'
rFun += ' +'+nVec[dim]+'('+str(revCenter[rd])+coorSign+'coords'+nCoor[dim]+')'
else:
rFun += ' +'+nVec[dim]+'coords'+nCoor[dim]
revCoor.Function = rFun[2:]
revCoor.CoordinateResults = 1
src = revCoor
if len(logCoords)>0 :
transCoor = Cart2Log(src=src,ratios=aspectRatios,logCoords=logCoords,basis=basis)
else:
transCoor = GridAspectRatio(ratios=aspectRatios, src=src)
return transCoor
#
def MakeSelectable(src=GetActiveSource()):
"""Make filter selectable in pipeline browser, but don't show it."""
rep=Show(src)
rep.Visibility=0
######### read in data, redefine pressure coordinates and change aspect ratio ###############
def LoadData( fileName, ncDims=['lon','lat','pfull'], aspectRatios=[1,1,1], logCoords=[], basis=[], reverseCoords=[], revCenter=[], replaceNaN=True ):
"""Load netCDF file, convert coordinates into useful aspect ratio.
Adds file output_nc, and Calculator LogP or Calculator AspRat to the pipeline
INPUTS:
fileName -- full path and file name of data to be read
ncDims -- names of the dimensions within the netCDF file. Time should be excluded. Ordering [x,y,z]
aspectRatios -- how to scale coordinates [xscale,yscale,zscale]. Z coordinate is scaled after applying log10 for logarithmic axes
logCoords -- index/indices of dimension(s) to be logarithmic, set to [] if no log coordinates
basis -- basis to normalize argument to logarithm (ie defines origin). List of same length as logCoords
reverseCoords -- index/indices of dimension(s) to be reversed, set to [] if none to be reversed
revCenter -- center of reversal if reverseCoords is not empty. List of same length as logCoords
replaceNaN -- whether or not to replace the FillValue with NaNs
OUTPUTS:
output_nc -- netCDF reader object with the file data as read
transCoor -- Calculator filter corresponding to the transformed coordinates
"""
# outputDimensions must be in same sequence as in netCDF file, except time (e.g. ['pfull','lat','lon'] ). This is usually the "wrong" way round. Thus, we invert it here
outputDimensions = ncDims[::-1]
output_nc = NetCDFReader( FileName=[fileName] )
if len(outputDimensions)>0 :
outDims = '('+ outputDimensions[0]
for dim in range(1,len(outputDimensions)):
outDims = outDims + ', ' + outputDimensions[dim]
outDims += ')'
output_nc.Dimensions = outDims
output_nc.SphericalCoordinates = 0
output_nc.OutputType = 'Unstructured'
output_nc.ReplaceFillValueWithNan = replaceNaN
MakeSelectable()
RenameSource(fileName,output_nc)
transCoor = TransformCoords(src=output_nc,aspectRatios=aspectRatios,logCoords=logCoords,basis=basis,reverseCoords=reverseCoords,revCenter=revCenter)
MakeSelectable()
if len(logCoords)>0 :
RenameSource('LogCoor',transCoor)
else:
RenameSource('TransCoor',transCoor)
return output_nc,transCoor
######## convert 2D into 3D data using a variable as third dimension ##############
def Make3D( expandVar, expandDir='z', aspectRatios=[1,1,1], logCoords=[], basis=[], src=GetActiveSource() ):
"""Expand any 2D dataset into 3D with the values of a field.
Make3D takes a 2D dataset, and adds a third dimension corresponding to a data field.
INPUTS:
expandVar -- name of the variable to use as third dimension
expandDir -- direction in which to expand {'x','y','z'}. Make it negative for expanding in opposite direction: {'-x','-y','-z'}
aspectRatios -- how to scale coordinates [xscale,yscale,zscale].
logCoords -- index/indices of dimension(s) to be logarithmic
basis -- basis to normalize argument to logarithm (ie defines origin). List of same length as logCoords
src -- source filter to attach to
OUPUTS:
make3d -- a Calculator filter with the intermediate step of adding the variable into the coordinates.
trans3d -- a Calculator filter with the transformed 3D field
"""
make3d = Calculator(src)
sign = '+'
if expandDir[0] == '-':
sign = '-'
if expandDir.lower()[-1] == 'x':
make3d.Function = sign+'iHat*'+expandVar+' + jHat*coordsX + kHat*coordsY'
elif expandDir.lower()[-1] == 'y':
make3d.Function = 'iHat*coordsX '+sign+' jHat*'+expandVar+' + kHat*coordsY'
elif expandDir.lower()[-1] == 'z':
make3d.Function = 'iHat*coordsX + jHat*coordsY '+sign+' kHat*'+expandVar
else:
raise Exception("Make3D: expandDir has to be one of x,y,z, but is "+expandDir)
make3d.CoordinateResults = 1
trans3d = TransformCoords(src=make3d,aspectRatios=aspectRatios,logCoords=logCoords,basis=basis)
return make3d,trans3d
######## some other usefull tools #################################################
# transform winds from SI to plot units
def CartWind2Sphere(src=GetActiveSource(), zonalComponentName='ucomp', meridionalComponentName='vcomp', secondsPerTimeStep=86400, verticalComponentName='none', ratios=[1,1,1], vertAsp=1 ):
"""Convert wind components from m/s to lat/timeStep, lon/timeStep, z/timeStep, and store it as vector W. This is, naturally, specific to spherical geometry, and assumes that coordsX = longitude [degrees], coordsY = latitude [degrees], and coordsZ = vertical coordinate.
Works with both pressure and height velocity, as long as vertAsp = [initial vertical range]/[present vertical range] is correct.
INPUTS:
src -- filter in pipeline to attach to
zonalComponentName -- name of zonal wind component in pipeline
meridionalComponentName -- name of meridional wind component in pipeline
secondsPerTimeStep -- duration of time step in seconds: 86400 for daily
verticalComponentName -- name of vertical component, or 'none'
ratios -- Corrections to actually plotted axes
vertAsp -- factor for vertical unit conversion = [initial vertical range]/[present vertical range(transformed)]. Only needed if there is a vertical component
OUTPUTS:
Adds two Calculators to the pipeline:
W -- wind vector calculation
normW -- magnitude of wind vector
Adds two slices to the pipeline to remove division by zero close to poles:
clipS -- remove south pole
clipN -- remove north pole
"""
W=Calculator(src)
if verticalComponentName != 'none' :
W.Function = '(' + \
'iHat*'+zonalComponentName+'/(6.28*6.4e6*cos(coordsY/'+str(ratios[1])+'*'+strPi+'/180))*360 +' + \
'jHat*'+meridionalComponentName+'/('+strPi+'*6.4e6)*180 +' + \
'kHat*'+verticalComponentName+'/'+str(vertAsp) + \
')*'+str(secondsPerTimeStep)
else:
W.Function = '(' + \
'iHat*'+zonalComponentName+'/(6.28*6.4e6*cos(coordsY/'+str(ratios[1])+'*'+strPi+'/180))*360 +' + \
'jHat*'+meridionalComponentName+'/('+strPi+'*6.4e6)*180' + \
')*'+str(secondsPerTimeStep)
W.ResultArrayName = 'W'
RenameSource('CartWind2Sphere',W)
MakeSelectable(W)
# add the magnitdue of the wind vector, i.e wind strength. nice to have for color, threshold, glyph filters later on
norm = Calculator(W)
norm.Function = 'mag(W)'
norm.ResultArrayName = 'normW'
RenameSource('normW',norm)
MakeSelectable(norm)
# conversion invloves a division by zero over the poles. to avoid large numbers, cut away anything higher than 80 degrees
clipS = Clip(norm)
clipS.ClipType = 'Plane'
clipS.ClipType.Normal = [0.0, 1.0, 0.0]
clipS.ClipType.Origin = [0.0, -80.0*ratios[1], 0.0]
try: # paraview v5.5+
clipS.Invert = 0
except:
pass
RenameSource('clipS',clipS)
MakeSelectable(clipS)
clipN = Clip(clipS)
clipN.ClipType = 'Plane'
clipN.ClipType.Normal = [0.0,-1.0, 0.0]
clipN.ClipType.Origin = [0.0, 80.0*ratios[1], 0.0]
try: # paraview v5.5+
clipN.Invert = 0
except:
pass
RenameSource('clipN',clipN)
MakeSelectable(clipN)
return W,norm,clipS,clipN
# extract the boundaries of a filter
def ExtractBounds(src=GetActiveSource()):
"""Return the axis extremities (bounds) of any source filter
Inputs:
src - filter to extract bounds of
Outputs:
bounds - list of (xmin,xmax [,ymin,ymax [,zmin,zmax]])"""
bounds = src.GetDataInformation().GetBounds()
return bounds
## working with a spherical geometry: conversion functions
def Sphere2xyz(coords, lam=None, phi=None):
"""Compute (x,y,z) from coords=(r,lam,phi) or r,lam,phi, where lam=0 at the Equator, -90 <= lam <= 90 (latitude),
and phi=0 along x-axis, 0 <= phi <= 360 (longitude)
Also computes the normal along the radial direction (useful for placing and orienting the camera).
INPUTS:
coords - list of (radius,lambda,phi) or radius
lam - lambda (declination, latitude) if coords is radius
phi - phi (azimuth, longitude) if coords is radius
OUTPUTS:
xyzPos - list of corresponding (x,y,z)
normal - list of (xn,yn,zn) along radial direction
"""
from math import pi,sin,cos
if isinstance(coords,list) or isinstance(coords,tuple):
if len(coords) == 3:
rr=coords[0];lam=coords[1];phi=coords[2]
else:
raise Exception("Sphere2xyz: coords has to be a list of length 3 (r,lambda,phi), or a scalar")
else:
rr=coords
xyzPos = [rr*cos(lam*pi/180)*cos(phi*pi/180),rr*cos(lam*pi/180)*sin(phi*pi/180),rr*sin(lam*pi/180)]
rr=rr+1
p1 = [rr*cos(lam*pi/180)*cos(phi*pi/180),rr*cos(lam*pi/180)*sin(phi*pi/180),rr*sin(lam*pi/180)]
normal = []
for i in range(len(p1)):
normal.append(p1[i] - xyzPos[i])
return xyzPos,normal
#
def xyz2Sphere(coords, y=None, z=None):
"""Compute (r,lam,phi) from coords=(x,y,z) or x,y,z, where lam=0 at the Equator, -90 <= lam <= 90 (latitude),
and phi=0 along x-axis, 0 <= phi <= 360 (longitude)
INPUTS:
coords - list of (x,y,z) or x
y - y coordinate if coords is x
z - z coordinate if coords is x
OUTPUTS:
sphPos - list of corresponding (r,lam,phi)
"""
from math import sqrt,pi,sin,cos,asin,atan
if isinstance(coords,list) or isinstance(coords,tuple):
if len(coords) == 3:
x=coords[0];y=coords[1];z=coords[2]
else:
raise Exception("xyz2Sphere: coords has to be a list of length 3 (x,y,z), or a scalar")
else:
x = coords
r = sqrt(x*x + y*y + z*z)
if x > 0:
phi = atan(y/x)
elif x < 0:
phi = pi + atan(y/x)
elif x == 0 and y > 0:
phi = 0.5*pi
elif x == 0 and y < 0:
phi = 1.5*pi
lam = asin(z/r)
return (r,lam*180/pi,phi*180/pi)
## some simple helper functions
#
def DeleteAll():
"""Delete all objects in the pipeline browser."""
for src in GetSources().values():
Delete(src)
#
def HideAll():
"""Make all objects in pipeline browser invisible."""
for src in GetSources().values():
Hide(src)
#
def ShowAll():
"""Make all objects in pipeline browser visible."""
for src in GetSources().values():
Show(src)
|
mjucker/pv_atmos
|
basic.py
|
Python
|
mit
| 16,796
|
[
"NetCDF",
"ParaView"
] |
66e03f37ebde96bd5e77a8005d21f380fde61b92743ad858ad48d52dee621ecc
|
#!/usr/bin/env python
#########################################################################################
#
# Extract spinal levels
#
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Karun Raju, Julien Touati
# Modified: 2016-11-28 by jcohenadad
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import sys, io, os, shutil, time
import matplotlib
import nibabel
import numpy as np
#=======================================================================================================================
# main
#=======================================================================================================================
def main():
# initialization
folder_out = 'spinal_levels/'
file_infolabel = 'info_label.txt'
name_level = ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'T1', 'T2', 'T3', 'T4', 'T5', 'T6', 'T7', 'T8', 'T9', 'T10', 'T11', 'T12', 'L1', 'L2', 'L3', 'L4', 'L5']
# Use agg to redirect figure display
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# get path of the toolbox
path_sct = os.environ.get("SCT_DIR", os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
# Create output folder (and delete previous folder if exist)
try:
shutil.rmtree(folder_out, ignore_errors=True)
except Exception as e:
print e
os.mkdir(folder_out)
# Load files
fname_template_vertebral_level = os.path.join(path_sct, 'data', 'PAM50', 'template', 'PAM50_levels.nii.gz')
fname_template_vertebral_cord = os.path.join(path_sct, 'data', 'PAM50', 'template', 'PAM50_cord.nii.gz')
vertebral_level_file = nibabel.load(fname_template_vertebral_level)
vertebral_cord_file = nibabel.load(fname_template_vertebral_cord)
vertebral_level_image = vertebral_level_file.get_data()
vertebral_cord_image = vertebral_cord_file.get_data()
hdr_vertebral_level = vertebral_level_file.get_header()
hdr_vertebral_cord = vertebral_cord_file.get_header()
# get dimensions
px, py, pz = hdr_vertebral_level.get_zooms()
# z_size_vertebral_level = hdr_vertebral_level['pixdim'][3]
# z_size_vertebral_cord = hdr_vertebral_cord['pixdim'][3]
# Find the centers of the vertebral levels
vertebral_levels = find_mid_point_vertebral_level(vertebral_level_image)
nb_vert = len(vertebral_levels)
# Mean and Variance of Vertebral centers with respect to the PMJ
# Note: these were obtained from Figure 3 in https://www.ncbi.nlm.nih.gov/pubmed/25523587
vertebral_Mu = np.zeros(nb_vert)
vertebral_Sigma = np.zeros(nb_vert)
vertebral_Mu[2], vertebral_Sigma[2] = 69.99, 4.58 # C3
vertebral_Mu[3], vertebral_Sigma[3] = 86.11, 5.35 # C4
vertebral_Mu[4], vertebral_Sigma[4] = 101.16, 6.33 # C5
vertebral_Mu[5], vertebral_Sigma[5] = 115.54, 7.15 # C6
vertebral_Mu[6], vertebral_Sigma[6] = 130.08, 8.25 # C7
# Mean and Variance of Spinal centers with respect to the PMJ
spinal_Mu = np.zeros(nb_vert)
spinal_Sigma = np.zeros(nb_vert)
spinal_Mu[2], spinal_Sigma[2] = 51.46, 3.15 # C3
spinal_Mu[3], spinal_Sigma[3] = 65.68, 3.96 # C4
spinal_Mu[4], spinal_Sigma[4] = 81.08, 5.15 # C5
spinal_Mu[5], spinal_Sigma[5] = 95.39, 5.68 # C6
spinal_Mu[6], spinal_Sigma[6] = 109.33, 6.05 # C7
spinal_Mu[7], spinal_Sigma[7] = 122.77, 7.03 # C8
#==================================================================================
# Extrapolating unknown Means and Variances from the given data
# Finding the distance between the known Spinal centers and Vertebral centers from the given data
# here the hard-coded range(5) and "+2" in the index corresponds to the known values from the graph: i=2 to i=7
spinal_vertebral_dist = np.zeros(5)
for i in range(5):
spinal_vertebral_dist[i] = vertebral_Mu[i+2] - spinal_Mu[i+2]
# Linear fit of the distances between Vertebral and Spinal centers
popt_spinal_vertebral = np.polyfit(np.arange(3, 8), spinal_vertebral_dist, 1)
P_fit_spinal_vertebral_dist = np.poly1d(popt_spinal_vertebral)
# display
plt.figure()
plt.plot(np.arange(0, nb_vert), P_fit_spinal_vertebral_dist(np.arange(0, nb_vert)), marker='+')
plt.plot(np.arange(3, 8), spinal_vertebral_dist, marker='o', linestyle='None')
plt.title('Spinal-Vertebral distances')
plt.savefig('fig_spinal-to-VertebralDistance.png')
#
# # Compute the conversion factor between delta(vertlevel_PAM50) and delta(vertlevel_toronto), based on the known levels from Toronto.
# conversion_factor = np.zeros(4)
# for i in range(2, 6, 1):
# conversion_factor[i-2] = (vertebral_levels[i] - vertebral_levels[i+1]) / (vertebral_Mu[i+1] - vertebral_Mu[i])
# average_conversion_factor = np.mean(conversion_factor)
#
# # Apply the conversion factor to scale the delta(vertlevel_PAM50) for all levels.
# delta_vertebral_levels = np.zeros(len(vertebral_levels)-1)
# for i in range(len(vertebral_levels)-1):
# delta_vertebral_levels[i] = (vertebral_levels[i] - vertebral_levels[i+1]) / average_conversion_factor
#
# # Compute vertebral distance from the PMJ
# for i in range(0, len(vertebral_levels)):
# vertebral_Mu[0] = vertebral_Mu[1] - delta_vertebral_levels[0]
#
# vertebral_Mu[0] = vertebral_Mu[1] - delta_vertebral_levels[0]
# vertebral_Mu[1] = vertebral_Mu[2] - delta_vertebral_levels[1]
# vertebral_Mu[7] = vertebral_Mu[6] + delta_vertebral_levels[6]
# vertebral_Mu[8] = vertebral_Mu[7] + delta_vertebral_levels[7]
# vertebral_Mu[9] = vertebral_Mu[8] + delta_vertebral_levels[8]
# vertebral_Mu[10] = vertebral_Mu[9] + delta_vertebral_levels[9]
# vertebral_Mu[11] = vertebral_Mu[10] + delta_vertebral_levels[10]
# vertebral_Mu[12] = vertebral_Mu[11] + delta_vertebral_levels[11]
#
# spinal_Mu[0] = vertebral_Mu[0] - P_fit_spinal_vertebral_dist(1)
# spinal_Mu[1] = vertebral_Mu[1] - P_fit_spinal_vertebral_dist(2)
# spinal_Mu[8] = vertebral_Mu[8] - P_fit_spinal_vertebral_dist(9)
# spinal_Mu[9] = vertebral_Mu[9] - P_fit_spinal_vertebral_dist(10)
# spinal_Mu[10] = vertebral_Mu[10] - P_fit_spinal_vertebral_dist(11)
# spinal_Mu[11] = vertebral_Mu[11] - P_fit_spinal_vertebral_dist(12)
# spinal_Mu[12] = vertebral_Mu[12] - P_fit_spinal_vertebral_dist(13)
# Compute spinal distance from each vertebral level using fitted distance
# Note: distance is divided by pz to account for voxel size (because variance in Cadotte et al. is given in mm).
spinal_levels = np.zeros(nb_vert)
for i in range(0, nb_vert):
spinal_levels[i] = vertebral_levels[i] + P_fit_spinal_vertebral_dist(i) / pz
#
# # Linear Fit of the known Vertebral variances to find the unkown variances
# popt_vertebral_sigma = np.polyfit(np.arange(3,8), vertebral_Sigma[2:7],1)
# P_fit_vertebral_sigma = np.poly1d(popt_vertebral_sigma)
# plt.plot(np.arange(0,14),P_fit_vertebral_sigma(np.arange(0,14)),marker='+')
# plt.title('Vertebral_sigma')
# plt.plot(np.arange(3, 8), vertebral_Sigma[2:7],marker='o',linestyle='None')
# plt.show()
# Linear Fit of the known Spinal variances
popt_spinal_sigma = np.polyfit(np.arange(3, 9), spinal_Sigma[2: 8], 1)
P_fit_spinal_sigma = np.poly1d(popt_spinal_sigma)
# display
plt.figure()
plt.plot(np.arange(0, nb_vert), P_fit_spinal_sigma(np.arange(0, nb_vert)), marker='+')
plt.title('Spinal_sigma')
plt.plot(np.arange(3, 9), spinal_Sigma[2:8], marker='o', linestyle='None')
plt.savefig('fig_spinalVariance.png')
# Compute spinal variance using fitted variance
# Note: variance is divided by pz to account for voxel size (because variance in Cadotte et al. is given in mm).
for i in range(0, nb_vert):
spinal_Sigma[i] = P_fit_spinal_sigma(i) / pz
# vertebral_Sigma[1] = P_fit_vertebral_sigma(0)
# vertebral_Sigma[0] = P_fit_vertebral_sigma(1)
# vertebral_Sigma[7] = P_fit_vertebral_sigma(8)
# vertebral_Sigma[8] = P_fit_vertebral_sigma(9)
# vertebral_Sigma[9] = P_fit_vertebral_sigma(10)
# vertebral_Sigma[10] = P_fit_vertebral_sigma(11)
# vertebral_Sigma[11] = P_fit_vertebral_sigma(12)
# vertebral_Sigma[12] = P_fit_vertebral_sigma(13)
#
# spinal_Sigma[1] = P_fit_spinal_sigma(2)
# spinal_Sigma[0] = P_fit_spinal_sigma(1)
# spinal_Sigma[8] = P_fit_spinal_sigma(9)
# spinal_Sigma[9] = P_fit_spinal_sigma(10)
# spinal_Sigma[10] = P_fit_spinal_sigma(11)
# spinal_Sigma[11] = P_fit_spinal_sigma(12)
# spinal_Sigma[12] = P_fit_spinal_sigma(13)
# for i in range(len(vertebral_levels)):
# y = gaussian(np.arange(0, 500, 0.1), spinal_Mu[i], spinal_Sigma[i])
# plt.plot(np.arange(0, 500, 0.1), y)
# plt.title('Mean Distance from the PMJ to the centre of each Nerve Rootlet Segment')
# plt.show()
#
# # Finding the distance between the Spinal and Vertebral centers in the image
# spinal_vertebral_distances = np.zeros(len(vertebral_levels))
# for i in range(len(vertebral_levels)):
# spinal_vertebral_distances[i] = round((vertebral_Mu[i] - spinal_Mu[i])*average_conversion_factor)
#
# spinal_levels = np.zeros(len(vertebral_levels))
# for i in range(len(vertebral_levels)):
# spinal_levels[i] = int(vertebral_levels[i] + spinal_vertebral_distances[i])
# Display spinal levels
plt.figure()
for i in range(nb_vert):
y = gaussian(np.arange(0, vertebral_level_image.shape[2], 1), spinal_levels[i], spinal_Sigma[i])
plt.plot(np.arange(0, vertebral_level_image.shape[2], 1), y)
plt.title('Probabilistic distribution of spinal level')
plt.savefig('fig_spinalLevels.png')
# Creating an image for each Spinal level
k = 0
X, Y, Z = np.where(vertebral_cord_image > 0)
file_name = [None] * nb_vert
for i in range(nb_vert):
template_spinal_image = vertebral_cord_image.copy()
count = 0
# loop across z
for iz in range(np.amax(Z), np.amin(Z)-1, -1):
x, y = np.where(vertebral_cord_image[:, :, iz] > 0)
template_spinal_image[x, y, iz] = gaussian(iz, spinal_levels[i], spinal_Sigma[i])
# diff = (i - j)/average_conversion_factor
# value = gaussian((spinal_Mu[k] + diff), spinal_Mu[k], spinal_Sigma[k])
# template_spinal_image[x, y, j] = value
# k = k+1
# Write NIFTI volumes
hdr_vertebral_cord.set_data_dtype('uint8') # set imagetype to uint8
print '\nWrite NIFTI volumes...'
img = nibabel.Nifti1Image(template_spinal_image, None, hdr_vertebral_cord)
file_name[i] = 'spinal_level_'+str(i+1).zfill(2)+'.nii.gz'
nibabel.save(img, os.path.join(folder_out, file_name[i]))
print ' File created:' + file_name[i]
# create info_label.txt file
fid_infolabel = io.open(os.path.join(folder_out, file_infolabel,) 'w')
# Write date and time
fid_infolabel.write('# Spinal levels labels - generated on ' + time.strftime('%Y-%m-%d') + '\n')
fid_infolabel.write('# Keyword=IndivLabels (Please DO NOT change this line)\n')
fid_infolabel.write('# ID, name, file\n')
for i in range(nb_vert):
fid_infolabel.write('%i, %s, %s\n' % (i, 'Spinal level ' + name_level[i], file_name[i]))
fid_infolabel.close()
#==================================================================================
def find_mid_point_vertebral_level(data):
vertebral_levels = np.zeros(int(np.amax(data)))
for i in range((int(np.amin(data))+1),(int(np.amax(data))+1)):
#finding the co-ordinates of voxels in each level
x,y,z = np.where(data==i)
z = np.sort(z)
vertebral_levels[i-1] = np.amin(z) + round((np.amax(z)-np.amin(z))/2)
return vertebral_levels
#==================================================================================
def gaussian(x, mu, sig):
return (np.exp(-((x - mu)*(x - mu))/(2*sig*sig)))/(np.sqrt(2*(np.pi)*sig*sig))
#=======================================================================================================================
# Start program
#=======================================================================================================================
if __name__ == "__main__":
# call main function
main()
|
neuropoly/spinalcordtoolbox
|
dev/spinal_level/sct_extract_spinal_levels.py
|
Python
|
mit
| 12,573
|
[
"Gaussian"
] |
59a7f2c6f5445dd0a4fc6b9049a2682437d76db6a468a079bd8d6d6a2886bc39
|
# -*- coding: utf-8 -*-
"""A cool pool of tools for PyBEL."""
from .version import get_version # noqa: F401
|
pybel/pybel-tools
|
src/pybel_tools/__init__.py
|
Python
|
mit
| 111
|
[
"Pybel"
] |
94a7803bf7c9fe74b2f60104dac0439e36394706c26176029d8c522b20700ef8
|
#!/usr/bin/env python
########################################################################
# $Header: /local/reps/dirac/DIRAC3/scripts/dirac-version,v 1.3 2008/03/22 10:39:02 rgracian Exp $
# File : dirac-version
# Author : Ricardo Graciani
########################################################################
"""
print version of current DIRAC installation
"""
__RCSID__ = "$Id: dirac-version,v 1.3 2008/03/22 10:39:02 rgracian Exp $"
import DIRAC
print DIRAC.version
|
rajanandakumar/DIRAC
|
Core/scripts/dirac-version.py
|
Python
|
gpl-3.0
| 484
|
[
"DIRAC"
] |
31cca73682ab9fca44a27d8cd6f6178f395375cedfb91fc9e0106cef6c639985
|
#!/usr/bin/env python
'''
IP/EA-RADC calculations for closed-shell N2
'''
from pyscf import gto, scf, adc
mol = gto.Mole()
r = 1.098
mol.atom = [
['N', ( 0., 0. , -r/2 )],
['N', ( 0., 0. , r/2)],]
mol.basis = {'N':'aug-cc-pvdz'}
mol.build()
mf = scf.RHF(mol)
mf.conv_tol = 1e-12
mf.kernel()
myadc = adc.ADC(mf)
#IP-RADC(2) for 1 root
myadc.verbose = 6
eip,vip,pip,xip = myadc.kernel()
#EA-RADC(2)-x for 1 root
myadc.method = "adc(2)-x"
myadc.method_type = "ea"
eea,vea,pea,xea = myadc.kernel()
#Get EA-RADC(2)-x eigenevector analysis only
myadc.compute_properties = False
myadc.analyze()
#EA-RADC(3) for 3 roots and properties
myadc.compute_properties = True
myadc.method = "adc(3)"
myadc.method_type = "ea"
eea,vea,pea,xea = myadc.kernel(nroots = 3)
myadc.analyze()
|
sunqm/pyscf
|
examples/adc/01-closed_shell.py
|
Python
|
apache-2.0
| 795
|
[
"PySCF"
] |
03929b5e8986aaaf1901952cdbaf776ec21e272b038feca152b3032d64036faa
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(
template_name='pages/home.html'), name='home'),
url(r'^about/$',
TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include('tunza_v2.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
# Patient management
url(r'^patients/', include("register.urls", namespace="patients")),
url(r'^reminders/', include("reminder.urls", namespace="reminders")),
url(r'^services/', include("services.urls", namespace="services")),
url(r'^call_engine/', include("call_engine.urls", namespace="calls")),
url(r'^reports/', include("reports.urls", namespace="reports")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request,
kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied,
kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found,
kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
|
omwomotieno/tunza_v3
|
config/urls.py
|
Python
|
mit
| 1,872
|
[
"VisIt"
] |
4d72dca396a8a5115cbb407ba98889da4dce04f5369be4b3a68a98cbef846433
|
"""
@name: PyHouse_Install/src/Install/Utility.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2015-2016 by D. Brian Kimmel
@license: MIT License
@note: Created on Oct 13, 2015
@Summary:
"""
# Import system type stuff
import getpass
import os
import subprocess
try:
import pwd
except ImportError:
import Install.test.win_pwd as pwd
class Utilities(object):
"""
"""
@staticmethod
def must_not_be_root():
l_user = getpass.getuser()
if l_user == 'root':
exit('You must not be root (no sudo)! - Aborting!')
@staticmethod
def get_user_ids(p_user_name):
l_user = pwd.getpwnam(p_user_name)
l_uid = l_user.pw_uid
l_gid = l_user.pw_gid
return l_uid, l_gid
@staticmethod
def is_dir(p_path):
return os.path.isdir(p_path)
@staticmethod
def MakeDir(p_dir_name, p_user_name):
l_uid, l_gid = Utilities.get_user_ids(p_user_name)
if not os.path.isdir(p_dir_name):
print(' Creating a directory {}'.format(p_dir_name))
subprocess.call(['sudo', 'mkdir', p_dir_name])
subprocess.call(['sudo', 'chown', str(l_uid), str(p_dir_name)])
subprocess.call(['sudo', 'chgrp', str(l_gid), str(p_dir_name)])
else:
print(' *** Directory {} already exists.'.format(p_dir_name))
def getserial():
# Extract serial from cpuinfo file
cpuserial = "0000000000000000"
try:
f = open('/proc/cpuinfo', 'r')
for line in f:
if line[0:6] == 'Serial':
cpuserial = line[10:26]
f.close()
except:
cpuserial = "ERROR000000000"
return cpuserial
# ## END DBK
|
DBrianKimmel/PyHouse_Install
|
src/Install/Utility.py
|
Python
|
mit
| 1,820
|
[
"Brian"
] |
7550edcd73b1c5ff41c1506b231ee8281dce864694b7fe4948d7c46c37b1df77
|
typo_dict = {
'steele stake': 'steel stake',
'gas mowe': 'gas mower',
'metal plate cover gcfi': 'metal plate cover gfci',
'lawn sprkinler': 'lawn sprinkler',
'ourdoor patio tile': 'outdoor patio tile',
'6 teir shelving': '6 tier shelving',
'storage shelve': 'storage shelf',
'American Standard Bone round toliet': 'American Standard Bone round toilet',
'6 stell': '6 steel',
'fece posts metal': 'fence posts metal',
'cushions outdoorlounge': 'cushions outdoor lounge',
'pricepfister kitchen faucet g135': 'price pfister kitchen faucet g135',
'glaciar bay toiled': 'glacier bay toilet',
'glacie bay dual flush': 'glacier bay dual flush',
'glacier bay tiolet tank lid': 'glacier bay toilet tank lid',
'handycap toilets': 'handicap toilets',
'high boy tolet': 'highboy toilet',
'gas wayer heaters': 'gas water heaters',
'basemetnt window': 'basement window',
'rustollum epoxy': 'rustoleum epoxy',
'air /heaterconditioner window': 'air /heat conditioner window',
'spliter ac unit': 'splitter ac unit',
'berh deck over': 'behr deck over',
'28 snow thower': '28 snow thrower',
'base board molding boundle': 'baseboard molding bundle',
'1 infloor flange': '1 in floor flange',
'10 window sping rod': '10 window spring rod',
'combo powertool kit': 'combo power tool kit',
'desalt impact 18': 'dewalt impact 18',
'rigid lithium ion batteries fuego drill': 'ridgid lithium ion batteries fuego drill',
'fiberglass repir kit': 'fiberglass repair kit',
'portable air condtioners': 'portable air conditioners',
'wall pannels': 'wall panels',
'2X4 SRUDS': '2X4 STUDS',
'frostking window shrink film': 'frost king window shrink film',
'Florescent Light Bulbs': 'Fluorescent Light Bulbs',
'violet flourescent light': 'violet fluorescent light',
'lawn mower- electic': 'lawn mower- electric',
'closetmade': 'closetmaid',
'greecianmarble floor tile': 'grecian marble floor tile',
'join compound wall tile': 'joint compound wall tile',
'montagnia contina floor tile': 'montagna cortina floor tile',
'porcelin floor tile 6x24': 'porcelain floor tile 6x24',
'three wayy': 'three way',
'incide wall heater': 'inside wall heater',
'westminster pedistal combo': 'westminster pedestal combo',
'water softners': 'water softeners',
'miricale': 'miracle',
'sliding windos locks': 'sliding window locks',
'20v dewalt kombo': '20v dewalt combo',
'DEWALT VACCUM': 'DEWALT VACUUM',
'lithium 20 dewalt': 'lithium 20v dewalt',
'water heather': 'water heater',
'riobi blower vac 9056': 'ryobi blower vac 9056',
'DRAWEER PULLS': 'DRAWER PULLS',
'bagged cinder mulch': 'bagged cedar mulch',
'hindges': 'hinges',
'chair rail hieght': 'chair rail height',
'celling light': 'ceiling light',
'tub repair kit procelian': 'tub repair kit porcelain',
'dewalr tools': 'dewalt tools',
'zinc plated flatbraces': 'zinc plated flat braces',
'cieling': 'ceiling',
'control celing fan': 'control ceiling fan',
'roll roofing lap cemet': 'roll roofing lap cement',
'cedart board': 'cedar board',
'lg stcking kit': 'lg stacking kit',
'ajustable ladder feet': 'adjustable ladder feet',
'milwakee M12': 'milwaukee M12',
'garden sprayer non pump': 'garden sprayer no pump',
'roof rdge flashing': 'roof edge flashing',
'cable prime line emergensy open': 'cable prime line emergency open',
'roybi l18v': 'ryobi l18v',
'milwaukee 18-volt lithium-ion cordlessrotary hammerss': 'milwaukee 18-volt lithium-ion cordless rotary hammers',
'bath sinnk': 'bath sink',
'bathro sinks': 'bathroom sinks',
'bathroom pedelal sink': 'bathroom pedestal sink',
'epoxy concrete pain': 'epoxy concrete paint',
'pool suppll': 'pool supply',
'3-3 galvinized tubing': '3-3 galvanized tubing',
'portable air conditionar and heater': 'portable air conditioner and heater',
'vynal windows': 'vinyl windows',
'aluminun tread plate': 'aluminum tread plate',
'3/4 vlve': '3/4 valve',
'kitchen ceiling lightening': 'kitchen ceiling lighting',
'led fixtues for the kitchen': 'led fixtures for the kitchen',
'wall design cermic': 'wall design ceramic',
'door chim buttons': 'door chime buttons',
'plastice corrugated panels': 'plastic corrugated panels',
'doors gaurds': 'doors guards',
'24 inche sink and vanity for bath': '24 inch sink and vanity for bath',
'24 swantone vanity top': '24 swanstone vanity top',
'40 wattsolar charged lights': '40 watt solar charged lights',
'buikids toilet seat': 'buy kids toilet seat',
'toliet seats': 'toilet seats',
'land scaping timbers': 'landscaping timbers',
'everblit heavy duty canvas dropcloth': 'everbilt heavy duty canvas drop cloth',
'3/4 sharkbits': '3/4 sharkbite',
'bath rom toilets': 'bathroom toilets',
'alumanam sheets': 'aluminum sheets',
'huskvarna': 'husqvarna',
'treate 2x4': 'treated 2x4',
'12000 btuair conditioners window': '12000 btu air conditioners window',
'air conditioner vbration': 'air conditioner vibration',
'heith-zenith motion lights': 'heath-zenith motion lights',
'small paint rollerss': 'small paint rollers',
'fencde posts': 'fence posts',
'knoty pine fencing': 'knotty pine fencing',
'metal sheet underpenning': 'metal sheet underpinning',
'plastic untility shelves': 'plastic utility shelves',
'christmass lights': 'christmas lights',
'garlend lights': 'garland lights',
'ceilig fan mount': 'ceiling fan mount',
'paito table and chairs': 'patio table and chairs',
'glacier bay one pice flapper': 'glacier bay one piece flapper',
'dcanvas drop cloth': 'canvas drop cloth',
'lawn mowre covers': 'lawn mower covers',
'vaccum for dw745': 'vacuum for dw745',
'Club cadet primer bulb': 'Cub cadet primer bulb',
'interior door lcoks': 'interior door locks',
'dremel toll kit': 'dremel tool kit',
'round up nozzle replacment': 'roundup nozzle replacement',
'ceder mulch': 'cedar mulch',
'sikalatexr concrete vonding adhesive': 'sikalatex concrete bonding adhesive',
'rigid air compressor': 'ridgid air compressor',
'garge doors': 'garage doors',
'ridding mowers': 'riding mowers',
'ridiing lawnmower': 'riding lawn mower',
'sliding mirror bathroom medicn cabinets': 'sliding mirror bathroom medicine cabinets',
'pastic qtr round': 'plastic quarter round',
'robutussin dh 835 replacement wick': 'robitussin dh 835 replacement wick',
'brick wall panles': 'brick wall panels',
'kitchen floor tikles': 'kitchen floor tiles',
'buffer polishewr': 'buffer polisher',
'keorsene heater wicks': 'kerosene heater wicks',
'1x6 cedar boaed': '1x6 cedar board',
'infered heaters': 'infrared heaters',
'1-1/2in. x 1ft. blk pipe': '1-1/2in. x 1 ft. black pipe',
'show me all 60 inch vaniteis': 'show me all 60 inch vanities',
'cieling fan': 'ceiling fan',
'instant waater heater gas lp': 'instant water heater gas lp',
'woodebn fence panels': 'wooden fence panels',
'hardiboard siding': 'hardie board siding',
'craft an lawn mower': 'craftsman lawn mower',
'kohler wellworth tpoilet': 'kohler wellworth toilet',
'moen dhower faucet': 'moen shower faucet',
'dewalt hand toolsg saw cordless': 'dewalt hand tools saw cordless',
'hindged l bracket': 'hinged l bracket',
'ceiling fan canopie for flst ceiling': 'ceiling fan canopy for flat ceiling',
'furnance vent delfector': 'furnace vent deflector',
'flourescent shop light': 'fluorescent shop light',
'bateries': 'batteries',
'bath wall tile chanpayne': 'bath wall tile champagne',
'floor ceramick': 'floor ceramic',
'stone are mb11': 'stone care mb11',
'traffic master porcelin ceramic tile portland stone': 'trafficmaster porcelain ceramic tile portland stone',
'celing fans hampton bay': 'ceiling fans hampton bay',
'outdoor ceilikng fan with light': 'outdoor ceiling fan with light',
'36in vinale fence': '36in vinyl fence',
'extention ladder little gaint': 'extension ladder little giant',
'closet rod 8 n9ickel': 'closet rod 8 nickel',
'closetmaid wire eight itier organizer': 'closetmaid wire eight tier organizer',
'shorten pendent lighting': 'shorten pendant lighting',
'chainlink gate': 'chain link gate',
'4 flourescent': '4 fluorescent',
'lithium batties': 'lithium batteries',
'24x73 book shelve case white': '24x73 bookshelf case white',
'linoliuml adhesive': 'linoleum adhesive',
'vynal flooring': 'vinyl flooring',
'vynal grip strip': 'vinyl grip strip',
'hagchet': 'hatchet',
'frameless mirro mount': 'frameless mirror mount',
'microwarve cart': 'microwave cart',
'mosia grout sealer': 'mosaic grout sealer',
'backsplach': 'backsplash',
'dimable ceiling strip lights': 'dimmable ceiling strip lights',
'lithum leaf blower': 'lithium leaf blower',
'rayoby batteries': 'ryobi batteries',
'pressure washerparts': 'pressure washer parts',
'rigid 18v lituim ion nicad': 'ridgid 18v lithium ion nicad',
'artric air portable': 'arctic air portable',
'8ft wht veranda post sleeve': '8 ft white veranda post sleeve',
'vynal fence': 'vinyl fence',
'solar naturlas salt': 'solar naturals salt',
'metl flashing': 'metal flashing',
'dog fence batt': 'dog fence battery',
'onda pressure washer': 'honda pressure washer',
'pressue washer': 'pressure washer',
'fridgdare air conditioners': 'frigidaire air conditioners',
'double pain windows': 'double pane windows',
'round flat topmetal post caps': 'round flat top metal post caps',
'1/2\' plyweood': '1/2\' plywood',
'ddummy door knobs interior': 'dummy door knobs interior',
'robi battery lawn trimmer': 'ryobi battery lawn trimmer',
'weewacker edger': 'weed wacker edger',
'prunning shears': 'pruning shears',
'steel enrty doors': 'steel entry doors',
'forimca': 'formica',
'satin nickle door hinge 4 in': 'satin nickel door hinge 4 in',
'garden hose repir cuplings': 'garden hose repair couplings',
'1/3 hoursepower garbage disposal': '1/3 horsepower garbage disposal',
'chicken wire 16 gauze': 'chicken wire 16 gauge',
'wheelbarow': 'wheelbarrow',
'didger': 'dodger',
'hhigh efficiency round toilet in white': 'high efficiency round toilet in white',
'accordian door venetian': 'accordion door venetian',
'patio flurniture covers': 'patio furniture covers',
'through thewall air conditioner': 'through the wall air conditioner',
'Whirpool washer': 'Whirlpool washer',
'4x6treaded wood': '4x6 treated wood',
'preature treated lumber 2in. x12in.x12 ft.': 'pressure treated lumber 2in. x 12 in.x 12 ft.',
'closetmade wood': 'closetmaid wood',
'steam cleanerm mop': 'steam cleaner mop',
'steqamers': 'steamers',
'pendant shads': 'pendant shades',
'battery operated flashingm light': 'battery operated flashing light',
'metal flexable water hose': 'metal flexible water hose',
'air filter for lawn equitment': 'air filter for lawn equipment',
'fiber glass pip insulation': 'fiberglass pipe insulation',
'insallation': 'installation',
'insullation': 'insulation',
'contracor string light': 'contractor string light',
'gas furnace and hotwater': 'gas furnace and hot water',
'rust oleum cabinet stain kit': 'rustoleum cabinet stain kit',
'sjhelf': 'shelf',
'small brackets for selves': 'small brackets for shelves',
'hecurles': 'hercules',
'anderson window grate': 'andersen window grate',
'anderson windows': 'andersen windows',
'lasron slider windows': 'larson slider windows',
'samsung 25.6 french door refridgerator': 'samsung 25.6 french door refrigerator',
'closet doors oganizers': 'closet doors organizers',
'koehler cimarron bathroom sink': 'kohler cimarron bathroom sink',
'kohler pedestal sink cimeron': 'kohler pedestal sink cimarron',
'cover for pole structue': 'cover for pole structure',
'drils': 'drills',
'surface mount channe': 'surface mount channel',
'outside corner- dentil': 'outside corner- dental',
'14heightx24withx15depth air conditioner': '14 heightx24 with 15 depth air conditioner',
'r30 demin insulation': 'r30 denim insulation',
'6 metal tee posts': '6 metal t posts',
'metal fence postsd': 'metal fence posts',
'aluminum l cahnnel': 'aluminum l channel',
'conner trim moulding': 'corner trim moulding',
'cornor board': 'corner board',
'pvc planel glue': 'pvc panel glue',
'3 in 1 vacum, ryobi': '3 in 1 vacuum, ryobi',
'toliet bowl rebuilding kits': 'toilet bowl rebuilding kits',
'swing set accesories': 'swing set accessories',
'ventenatural gas heater': 'vented natural gas heater',
'square ube wood': 'square cube wood',
'swivrl wood anchors': 'swivel wood anchors',
'ge gridle': 'ge griddle',
'pendant shafe': 'pendant shade',
'3/8 pipe galvinized': '3/8 pipe galvanized',
'vaporbarrier, crawl space': 'vapor barrier, crawl space',
'self sealant membrane': 'self sealing membrane',
'husky work bemch': 'husky work bench',
'vanity light fictures': 'vanity light fixtures',
'bed frames headboaed': 'bed frames headboard',
'replace plasticbathroom towel holder': 'replace plastic bathroom towel holder',
'whirlpool diswasher weather stripping': 'whirlpool dishwasher weather stripping',
'36 inch front dooe with casing': '36 inch front door with casing',
'glass back doorr': 'glass back door',
'pre hu door': 'pre hung door',
'backsplash paneks': 'backsplash panels',
'jeffery court mozaic tile': 'jeffrey court mosaic tile',
'floo shets': 'floor sheets',
'gazhose for dryer machine': 'gas hose for dryer machine',
'electric fireplacewater heaters': 'electric fireplace water heaters',
'ceiling mounted lighting fixures': 'ceiling mounted lighting fixtures',
'tools bloowers': 'tools blowers',
'artifical ground cover': 'artificial ground cover',
'waxhers and electric dryers': 'washers and electric dryers',
'outdoor tilees': 'outdoor tiles',
'owens corning ashingles': 'owens corning shingles',
'peper towel holder wall mount': 'paper towel holder wall mount',
'genecrac generators': 'generac generators',
'robyi gas weeder': 'ryobi gas weeder',
'acrtlic tape': 'acrylic tape',
'foam insulaion panels': 'foam insulation panels',
'rumbl;estone': 'rumblestone',
'famed sliding door $289.00': 'framed sliding door $289.00',
'padio door': 'patio door',
'cement boards ciding': 'cement boards siding',
'upholstry': 'upholstery',
'miror interior doors': 'mirror interior doors',
'recessed medicien cabinet': 'recessed medicine cabinet',
'bulked washed sand and gravel': 'bulk washed sand and gravel',
'sheet stock floorinh': 'sheet stock flooring',
'polycarbonite': 'polycarbonate',
'dedwalt cordless drill': 'dewalt cordless drill',
'ryobi power chalking gun': 'ryobi power caulking gun',
'poulan pro lawn motor blades': 'poulan pro lawn mower blades',
'diining set outdoor': 'dining set outdoor',
'granite countertop glu': 'granite countertop glue',
'cyculer saw': 'circular saw',
'kitchenaid frenchdoor ref': 'kitchenaid french door ref',
'rigid wet dry vac': 'ridgid wet dry vac',
'whirlpool caprios 4.3': 'whirlpool cabrio 4.3',
'micro wave ovens': 'microwave ovens',
'8 valleta edger': '8 valletta edger',
'decking hardsware': 'decking hardware',
'utility traiter': 'utility trailer',
'ceilin storage': 'ceiling storage',
'white wall bathroon cabinets': 'white wall bathroom cabinets',
'tsnkless hot water heater': 'tankless hot water heater',
'weed killer consertrated': 'weed killer concentrate',
'milwaukee ha,,er drill': 'milwaukee hammer drill',
'23 ince': '23 inch',
'stone outside tile': 'stone outdoor tile',
'galvanized outdoor celing fan': 'galvanized outdoor ceiling fan',
'oil rubbered bronze dor': 'oil rubbed bronze door',
'vynik tiles peel stick': 'vinyl tiles peel stick',
'window aircondiioner 12000 but': 'window air conditioner 12000 btu',
'60 lb hi strength concrete': '60 lb high strength concrete',
'plexy glass 24 x 24': 'plexiglass 24 x 24',
'porch liht fixture': 'porch light fixture',
'moving trollie': 'moving trolley',
'shoipping cart': 'shopping cart',
'accesory bags': 'accessory bags',
'garage door 70 lb extention spring': 'garage door 70 lb extension spring',
'riobi shop vac filter': 'ryobi shop vac filter',
'wet carpet cleaninig': 'wet carpet cleaning',
'pvd electrical conduit': 'pvc electrical conduit',
'roller up window blinds': 'roll up window blinds',
'uplihght': 'uplight',
'metal shelfs': 'metal shelves',
'dewalt 20v recepicating saw': 'dewalt 20v reciprocating saw',
'outdooor carpet': 'outdoor carpet',
'step latter': 'step ladder',
'kitchen cabinte hardware blue knob': 'kitchen cabinet hardware blue knob',
'pivotangle lock hinge': 'pivot angle lock hinge',
'plasticl panels': 'plastic panels',
'varigated fiber board': 'variegated fiber board',
'battery chages': 'battery charges',
'1/2 inch blk iron coupling': '1/2 inch black iron coupling',
'defiant led armer max': 'defiant led armormax',
'defiant led ight': 'defiant led light',
'led flashlightts': 'led flashlights',
'pfister pasedena 4 center set faucet': 'pfister pasadena 4 center set faucet',
'meguire plastic cleaner': 'meguiars plastic cleaner',
'single board pannel': 'single board panel',
'foundation fent covers': 'foundation vent covers',
'bottom freezer refrdgerators': 'bottom freezer refrigerators',
'colbolt drill bits': 'cobalt drill bits',
'soundfroofing material': 'soundproofing material',
'hanging light masn gar': 'hanging light mason jar',
'drywall mudd': 'drywall mud',
'delta bathroom falcet': 'delta bathroom faucet',
'ridgid 10000 watt': 'rigid 10000 watt',
'pvc edgetape white': 'pvc edge tape white',
'fireplace mantle': 'fireplace mantel',
'drop in sink ovel': 'drop in sink oval',
'40ft aluminumm ladder': '40 ft aluminum ladder',
'rigid shop vac filter': 'ridgid shop vac filter',
'moen single handle valvue rebuild': 'moen single handle valve rebuild',
'hunter ceiling fans accesories strip': 'hunter ceiling fans accessories strip',
'wheel barrel': 'wheelbarrow',
'16 aluminuim ladder': '16 aluminum ladder',
'1/2\' olastic pipe': '1/2\' plastic pipe',
'moen 7570 single hanlel faucet': 'moen 7570 single handle faucet',
'padtio heater': 'patio heater',
'rachet scret drivers': 'ratchet screwdrivers',
'water fountain nozle': 'water fountain nozzle',
'rigid sander': 'ridgid sander',
'anderson 4000 windows': 'andersen 4000 windows',
'doublew stainless': 'double stainless',
'milwakee m12 cordless heated jacket': 'milwaukee m12 cordless heated jacket',
'french door scree doorsscreen door': 'french door screen doors screen door',
'samsung refridegrator': 'samsung refrigerator',
'flurorescent light bulbs': 'fluorescent light bulbs',
'phillips 40t12cw plus florescent tube': 'phillips 40t12cw plus fluorescent tube',
'black and decker timmer parts st4500': 'black and decker trimmer parts st4500',
'gas range slide inove': 'gas range slide in love',
'baldwin lock stets': 'baldwin lock sets',
'6 ft ceder fence': '6 ft cedar fence',
'storeage': 'storage',
'beckett fountin pump': 'beckett fountain pump',
'polyeurethane exterior': 'polyurethane exterior',
'ceiling pannel': 'ceiling panel',
'70 celing fan': '70 ceiling fan',
'vynil barackets': 'vinyl brackets',
'moen kitchen fauchet': 'moen kitchen faucet',
'ridgid model wd1680 filter': 'rigid model wd1680 filter',
'point of use electtric': 'point of use electric',
'stell finished french patio door': 'steel finished french patio door',
'lg elec laundry suite': 'lg electric laundry suite',
'outdoor screem': 'outdoor screen',
'patio chair cushions/marth stewart': 'patio chair cushions/martha stewart',
'24 hollow core closet dor': '24 hollow core closet door',
'rigid miter saw': 'ridgid miter saw',
'ruotor table': 'router table',
'airconditioner decoritive cover unit': 'air conditioner decorative cover unit',
'miwaukee 18v battery and charger': 'milwaukee 18v battery and charger',
'potable air conditioner': 'portable air conditioner',
'perhung 30x80 interior door': 'prehung 30 x 80 interior door',
'6 dewalt skill saw': '6 dewalt skil saw',
'1x8x8 toung and grove': '1x8x8 tongue and groove',
'river feather door threashold': 'river feather door threshold',
'range connnector': 'range connector',
'ligt fixture covers': 'light fixture covers',
'window flasheing': 'window flashing',
'backet metal': 'bracket metal',
'horizantel fence panel': 'horizontal fence panel',
'rug pad 8 x 10': 'rug pad 8x10',
'frigadaire appliances': 'frigidaire appliances',
'bath si k cabinets': 'bath sink cabinets',
'8x10 outside storage': '8x10 outdoor storage',
'earthgrow mulch': 'earthgro mulch',
'10 60 tooth blde': '10 60 tooth blade',
'sink faucet with soap dispencer': 'sink faucet with soap dispenser',
'ridgid job max attatchmens': 'ridgid jobmax attachments',
'ridgid wrachet head': 'ridgid ratchet head',
'celliling light': 'ceiling light',
'waterroo concrete paint': 'waterproof concrete paint',
'americian standard champion 4 toliets': 'american standard champion 4 toilets',
'4 ftawning frame': '4 ft awning frame',
'restour for concrete': 'restore for concrete',
'econo florecent bulb': 'econo fluorescent bulb',
'florecent bulb holder': 'fluorescent bulb holder',
'light fictures': 'light fixtures',
'lihonia 4 led work light': 'lithonia 4 led work light',
'interrior frnch doors': 'interior french doors',
'hamptom bay cusion': 'hampton bay cushion',
'wndows': 'windows',
'porcalain thinset': 'porcelain thinset',
'versabon 50lb': 'versabond 50 lb',
'table for outsde': 'table for outside',
'hoinda gas edger': 'honda gas edger',
'installing sockets for flor': 'installing sockets for floor',
'laguna porcelin tile': 'laguna porcelain tile',
'showe heads in oil rubbed bronze': 'shower heads in oil rubbed bronze',
'chase lounge cushions': 'chaise lounge cushions',
'electric detector in simming pool water': 'electric detector in swimming pool water',
'elongagated toilet seat': 'elongated toilet seat',
'towbehind lawn spreaders': 'tow behind lawn spreaders',
'cable poter': 'cable porter',
'fraiming nailer electric': 'framing nailer electric',
'12 x 12 porcelian floor and wall tile': '12 x 12 porcelain floor and wall tile',
'marrazi': 'marazzi',
'range hoodu': 'range hood',
'whirpool range': 'whirlpool range',
'subway title 3 x 6': 'subway tile 3 x 6',
'untique stone': 'antique stone',
'post sleeveee': 'post sleeve',
'dinning chair seats': 'dining chair seats',
'christmas lights icicle colerful': 'christmas lights icicle colorful',
'colpay garage door molding': 'clopay garage door molding',
'light for public ligthining': 'light for public lightning',
'slate timberland shingle': 'slate timberline shingle',
'cicular saw blad': 'circular saw blade',
'varbide 7 1/4 circular saw blade': 'carbide 7 1/4 circular saw blade',
'10 flourescent bulbs': '10 fluorescent bulbs',
'kids outside furnature': 'kids outside furniture',
'whirpool gas range': 'whirlpool gas range',
'starter fertillzer': 'starter fertilizer',
'toro snowerblower light kit': 'toro snowblower light kit',
'High Wheel String Trimer': 'High Wheel String Trimmer',
'insided house door': 'inside house door',
'3 1/2 non-mortison hinges satin finish': '3 1/2 non-mortise hinges satin finish',
'miracle grow garden soil': 'miracle gro garden soil',
'miracle grow spray dispensers': 'miracle gro spray dispensers',
'alure flooring black oak': 'allure flooring black oak',
'sweeping atatchment for weed wacker': 'sweeping attachment for weed wacker',
'retangle bathroom sinks': 'rectangular bathroom sinks',
'underthe cabinet microwaves': 'under the cabinet microwaves',
'24 inch lover doors': '24 inch louvered doors',
'window drip egedg': 'window drip edge',
'rechargable portable air compressor': 'rechargeable portable air compressor',
'birkmann 5 burner': 'brinkmann 5 burner',
'whirlpool gasnstove self cleaning oven': 'whirlpool gas stove self cleaning oven',
'havc brush': 'hvac brush',
'discharge hose 1.5 inces': 'discharge hose 1.5 inches',
'6 ft laminite countertop': '6 ft laminate countertop',
'pool vaccum': 'pool vacuum',
'1/2 in.x 1/2 in. thread albow male to male': '1/2 in.x 1/2 in. threaded elbow male to male',
'sofet': 'soffit',
'sliding patio doort': 'sliding patio door',
'30inch flourescent tubes': '30 inch fluorescent tubes',
'phillips light bulbs': 'philips light bulbs',
'stainless steel sinl': 'stainless steel sink',
'burgular bars for front porch': 'burglar bars for front porch',
'oach lights': 'coach lights',
'2 in lnsulated bushings': '2 in insulated bushings',
'motion lught': 'motion light',
'residental light sensor security lights': 'residential light sensor security lights',
'vertical blind accsesories': 'vertical blind accessories',
'1/2 in ree bar': '1/2 in rebar',
'cloths rod and shelf brackets': 'clothes rod and shelf brackets',
'fire rated buildng materials': 'fire rated building materials',
'hot point water filer': 'hotpoint water filter',
'bathroom cabinet without fermaldehyde': 'bathroom cabinet without formaldehyde',
'9.6 bvolt': '9.6 volt',
'rustoleum bright coach metallic': 'rustoleum bright coat metallic',
'stone effect sante cecilia top': 'stone effects santa cecilia top',
'suspanded ceiling': 'suspended ceiling',
'4x8 plywood pressure treeted': '4x8 plywood pressure treated',
'acess panel': 'access panel',
'genie excellartor garage door opener': 'genie excelerator garage door opener',
'ge dish washer with 46 dba rating': 'ge dishwasher with 46 dba rating',
'wood and concret stain': 'wood and concrete stain',
'8 foot flour sent': '8 foot fluorescent',
'infared grills': 'infrared grills',
'wirless interconnected smoke dedector': 'wireless interconnected smoke detector',
'luever': 'leuver',
'3 in roung head bolt': '3 in round head bolt',
'rachet': 'ratchet',
'rigid 12 volt': 'ridgid 12 volt',
'sharkbit': 'sharkbite',
'hamiltton collectin': 'hamilton collection',
'kitchen aide wine and beverage refrigerator': 'kitchenaid wine and beverage refrigerator',
'paint markers burgondy color': 'paint markers burgundy color',
'glass washer with sucktion cups': 'glass washer with suction cups',
'andersor doors': 'anderson doors',
'hickory cabinett': 'hickory cabinet',
'repacement can type light bulbs': 'replacement can type light bulbs',
'ceeling patio shades': 'ceiling patio shades',
'white vainty 8 faucet': 'white vanity 8 faucet',
'daylight florisant bulb 36inch': 'daylight fluorescent bulb 36 inch',
'contact paoer': 'contact paper',
'air bathtubes': 'air bathtubs',
'cushions for wecker furniture': 'cushions for wicker furniture',
'galvinized poles 20long': 'galvanized poles 20 long',
'siegel light pendent': 'siegel light pendant',
'spaonges': 'sponges',
'extorior shatters': 'exterior shutters',
'led blubs': 'led bulbs',
'4 inch back flow prenter': '4 inch backflow preventer',
'silding closet doors track': 'sliding closet doors track',
'10000 btu windowair condiioner': '10000 btu window air conditioner',
'sewer pipe hoider': 'sewer pipe holder',
'vinal blind paint': 'vinyl blind paint',
'fuacet': 'faucet',
'picinic tables': 'picnic tables',
'all in one topmount kraus sinks': 'all in one top mount kraus sinks',
'solar post lmapy': 'solar post lamp',
'transormations': 'transformations',
'daltiles sandy beach': 'daltile sandy beach',
'wallmount indoor lights with plug': 'wall mounted indoor lights with plug',
'kennal kit': 'kennel kit',
'46 high output grow florescent bulb': '46 high output grow fluorescent bulb',
'frost fee freezers': 'frost free freezers',
'stainles steel door handle': 'stainless steel door handle',
'combo drill makita 20v': 'combi drill makita 20v',
'shop vacumm': 'shop vacuum',
'primer for led paint': 'primer for lead paint',
'outdoor gas fiepits': 'outdoor gas firepits',
'hallway pendendant lighting': 'hallway pendant lighting',
'chesapeke oak flooring': 'chesapeake oak flooring',
'ryobi multi tool acccessories': 'ryobi multi tool accessories',
'ryobi raidos': 'ryobi radios',
'milwaukee skill saw': 'milwaukee skil saw',
'ligh chrismas hanging tree': 'light christmas hanging tree',
'galvinized screws': 'galvanized screws',
'led circuline bulbs': 'led circline bulbs',
'kholer elongated toilet seat': 'kohler elongated toilet seat',
'tolet seats': 'toilet seats',
'ock blade knife piece 3': 'lock blade knife piece 3',
'portable airconditioner': 'portable air conditioner',
'window aircondition': 'window air conditioner',
'36 vx 72 commercial outdoor mats': '36 x 72 commercial outdoor mats',
'runner commerical': 'runner commercial',
'montagna dappy gray': 'montagna dapple gray',
'soil temperture test kit': 'soil temperature test kit',
'basement tolet': 'basement toilet',
'32 door threshhold': '32 door threshold',
'hampton bay oak bast cabinets': 'hampton bay oak base cabinets',
'charbroil parts': 'char broil parts',
'qucikie mop': 'quickie mop',
'concret anchor bolts': 'concrete anchor bolts',
'24 whtie storage cabinet': '24 white storage cabinet',
'door handle deabolt kit': 'door handle deadbolt kit',
'ge profile 30 inch charcoal folters': 'ge profile 30 inch charcoal filters',
'49 inch napolian vanity top': '49 inch napoleon vanity top',
'4in pvc franco cuppling': '4in pvc fernco coupling',
'graveless gravaless sewer pipe': 'graveless graveless sewer pipe',
'shower fllor': 'shower floor',
'riverera screen doors': 'riviera screen doors',
'animal deterent': 'animal deterrent',
'woodpeckers repellant': 'woodpeckers repellent',
'wood buring insert 200-250': 'wood burning insert 200-250',
'spectrazide ant': 'spectracide ant',
'gas grill accesories': 'gas grill accessories',
'elecronic insect repeller': 'electronic insect repeller',
'slyvanna motion nite light': 'sylvania motion nite light',
'4 in pvs end cap': '4 in pvc end cap',
'delta portor shower and tub trim': 'delta porter shower and tub trim',
'replacment mini bulbs': 'replacement mini bulbs',
'braxilian cherry laminate': 'brazilian cherry laminate',
'15 amp tampe resistant outlets': '15 amp tamper resistant outlets',
'hydraulic jack renat': 'hydraulic jack rental',
'32 x 32 shower baser': '32 x 32 shower base',
'electronic bed bug repellant': 'electronic bed bug repellent',
'ridgid auger': 'rigid auger',
'2000 psi force nozzzle': '2000 psi force nozzle',
'25 height beveragecooler': '25 height beverage cooler',
'anderson windows 400 seriesimpact resistant': 'andersen windows 400 series impact resistant',
'drill 20 lithium battery': 'drill 20v lithium battery',
'extertal air vent cover': 'external air vent cover',
'resin shesd': 'resin sheds',
'8x8x4 conctete block': '8x8x4 concrete block',
'tun faucet spout': 'tub faucet spout',
'continuos curtain rods': 'continuous curtain rods',
'upholstry cleaner': 'upholstery cleaner',
'ureka vaccuum': 'eureka vacuum',
'30 towel rods brushed nicol': '30 towel rods brushed nickel',
'1/2 gal thermos': '1/2 gallon thermos',
'unbralla fabric top only': 'umbrella fabric top only',
'outdoor cieling fans': 'outdoor ceiling fans',
'20 amps cros hinghs breaker': '20 amps cross highs breaker',
'mixing tubn': 'mixing tub',
'gfi circuit breaker': 'gfci circuit breaker',
'wrought iuron fence panels': 'wrought iron fence panels',
'ac air vent sleave': 'ac air vent sleeve',
'air ventalation deflector': 'air ventilation deflector',
'buddahs hand tree': 'buddha\'s hand tree',
'lawm mowers': 'lawn mowers',
'asathbula 7 piece': 'ashtabula 7 piece',
'recessed lightjs': 'recessed lights',
'hing pin door dtop': 'hinge pin door stop',
'elerical outlets plates': 'electrical outlets plates',
'bed tool boc': 'bed tool box',
'16 inch fabn': '16 inch fan',
'battery poerated motion sensor': 'battery operated motion sensor',
'grqss': 'grass',
'troy build trimmer extension': 'troy bilt trimmer extension',
'mansonry impact bit': 'masonry impact bit',
'high output basebord': 'high output baseboard',
'shower door sealparts': 'shower door seal parts',
'12 inch hight wall cabinet': '12 inch height wall cabinet',
'light s for sno throwers': 'lights for snow throwers',
'ceiling medallians': 'ceiling medallions',
'medalion': 'medallion',
'everbilt sloted': 'everbilt slotted',
'transparant redwood stain': 'transparent redwood stain',
'black and decker scub buster extreme': 'black and decker scrub buster extreme',
'mobilehome siding': 'mobile home siding',
'shutter screwws': 'shutter screws',
'hampton pation set with firepit': 'hampton patio set with firepit',
'industreial wire': 'industrial wire',
'vegtable seeds': 'vegetable seeds',
'masterpeice 72': 'masterpiece 72',
'5/4 lumbe': '5/4 lumber',
'dawn to dusk lig': 'dawn to dusk light',
'dusk to dawn motion sensoroutdoor lighting fixtures': 'dusk to dawn motion sensor outdoor lighting fixtures',
'cordless sweeperr': 'cordless sweeper',
'mill valley colle': 'mill valley college',
'outdoorstorage bin': 'outdoor storage bin',
'haging wire': 'hanging wire',
'4 in white recessed haol baffle in soft white': '4 in white recessed led baffle in soft white',
'11 1/2x25 1/2 white aluminun': '11 1/2 x 25 1/2 white aluminum',
'saratoga hickorya': 'saratoga hickory',
'surface gringer': 'surface grinder',
'kidie co2': 'kidde co2',
'batterys and charger kits': 'batteries and charger kits',
'nutru ninja': 'nutri ninja',
'23.5 shower door nickle': '23.5 shower door nickel',
'glass panel retiner': 'glass panel retainer',
'12v replacement blubs': '12v replacement bulbs',
'martha steward': 'martha stewart',
'1 1/2inchbrasswalltube18 inch': '1 1/2 inch brass wall tube 18 inch',
'brown color scheem': 'brown color scheme',
'spiral latters': 'spiral letters',
'24 incyh range': '24 inch range',
'8x8 ezup canopie cover': '8x8 ez up canopy cover',
'kitcheen door blind': 'kitchen door blind',
'flourescent balast 120-2/32is': 'fluorescent ballast 120-2/32is',
'vinyl lattiace': 'vinyl lattice',
'1/4 28 threadded connector': '1/4 28 threaded connector',
'kitchaid 3 burner': 'kitchenaid 3 burner',
'10 condiut pvc': '10 conduit pvc',
'WEBER GRILL GENIS 310': 'WEBER GRILL GENESIS 310',
'wall mount tub fauet moen': 'wall mount tub faucet moen',
'sower cleaner': 'shower cleaner',
'batteryfor alarm system': 'battery for alarm system',
'bed gugs': 'bed bugs',
'show the pric of washer and dryer': 'show the price of washer and dryer',
'washer electic dryer': 'washer electric dryer',
'ho hub couplings': 'no hub couplings',
'battey string trimmers': 'battery string trimmers',
'3/4 in. wide quarteround': '3/4 in. wide quarter round',
'ac dip pans': 'ac drip pans',
'rutland wood stove termometer': 'rutland wood stove thermometer',
'outdoor daucets': 'outdoor faucets',
'badless vacuum cleaners': 'bagless vacuum cleaners',
'dewalt 20 volt xr hamer': 'dewalt 20 volt xr hammer',
'dewalt drillimpact tool 20 volt xr': 'dewalt drill impact tool 20 volt xr',
'martha steward bath mirror': 'martha stewart bath mirror',
'infared thermometer': 'infrared thermometer',
'millwaukee 1/2 ele.c drill': 'milwaukee 1/2 elec drill',
'25 watt 4 foot flourescent': '25 watt 4 foot fluorescent',
'boscj bit': 'bosch bit',
'barbque grills': 'barbecue grills',
'brinkman grill burner': 'brinkmann grill burner',
'malbu replacement led light bubles': 'malibu replacement led light bulbs',
'natural stone tiele': 'natural stone tile',
'stone vaneer': 'stone veneer',
'stone venner sequia': 'stone veneer sequoia',
'ceiling fan replacement clades': 'ceiling fan replacement blades',
'transformet for flurescent tube lights': 'transformer for fluorescent tube lights',
'refrigerator frenchdoor': 'refrigerator french door',
'flourescent paint': 'fluorescent paint',
'marking baint': 'marking paint',
'mirrir hanger': 'mirror hanger',
'chrisymas tree bags': 'christmas tree bags',
'comercial food processor': 'commercial food processor',
'picture haning kitpicture hanging kit': 'picture hanging kit picture hanging kit',
'bathroom vanity cabinetwithouttops': 'bathroom vanity cabinets without tops',
'amcrest survelliance systems': 'amcrest surveillance systems',
'30 inch refigrator': '30 inch refrigerator',
'chain saw eletric': 'chainsaw electric',
'power dprayer': 'power sprayer',
'douglas fur fake christmas trees': 'douglas fir fake christmas trees',
'brinkman grill': 'brinkmann grill',
'dual switch dimer': 'dual switch dimmer',
'Ortho Wed B Gone max': 'Ortho Weed B Gon max',
'ortho weed be gone': 'ortho weed b gon',
'4ft flourescent bulb t8': '4ft fluorescent bulb t8',
'18 volt 1/2 roter hammer': '18 volt 1/2 roto hammer',
'cabinents with drawers': 'cabinets with drawers',
'7 mil trash bgs': '7 mil trash bags',
'1/2 ntp to 1/2': '1/2 npt to 1/2',
'3/8 rachert set': '3/8 ratchet set',
'hunter shower eshaust fan with light': 'hunter shower exhaust fan with light',
'vanity in mahogany mirros': 'vanity in mahogany mirrors',
'hasmmock bed': 'hammock bed',
'composit fencing': 'composite fencing',
'post insurts': 'post inserts',
'3500 psi pressue washer': '3500 psi pressure washer',
'idylus air purifier': 'idylis air purifier',
'garden solenoide valves': 'garden solenoid valves',
'window plastic instulation': 'window plastic insulation',
'engineered wood floorcleaners': 'engineered wood floor cleaners',
'parquee flooring': 'parquet flooring',
'dermal saw max ultra': 'dremel saw max ultra',
'external structual connector screw': 'external structural connector screw',
'tv shelv': 'tv shelf',
'kithen cabinets 18 white': 'kitchen cabinets 18 white',
'1 1/2 couplingg': '1 1/2 coupling',
'porceline faucet handle': 'porcelain faucet handle',
'duplex outlet and ubs charger': 'duplex outlet and usb charger',
'1/4 quarter round cherries jublilee': '1/4 quarter round cherries jubilee',
'lg hausys viaterra': 'lg hausys viatera',
'bear semi transparent cedar stain': 'behr semi transparent cedar stain',
'27 mivrowave': '27 microwave',
'gardinias': 'gardenias',
'ull spectrum plant light': 'full spectrum plant light',
'942196brinkmann 2 burner': '942196 brinkmann 2 burner',
'gargage storage ideas': 'garage storage ideas',
'outside horizontal storage sheds': 'outdoor horizontal storage sheds',
'bouganvilla': 'bougainvillea',
'led recressed lighting': 'led recessed lighting',
'3 x3 marle tile': '3x3 marble tile',
'concrete saw dewall': 'concrete saw dewalt',
'replacement glass for pellet stive': 'replacement glass for pellet stove',
'porcelin tile black pencil tile': 'porcelain tile black pencil tile',
'smoke dectectors': 'smoke detectors',
'humidifier fulters': 'humidifier filters',
'3/4 in. pvc assesories': '3/4 in. pvc accessories',
'12 inch sower head': '12 inch shower head',
'22 mm impact ocket': '22mm impact socket',
'garvanized wood screws': 'galvanized wood screws',
'interlocking rubbber floor mats': 'interlocking rubber floor mats',
'Hose end nozzel': 'Hose end nozzle',
'led energy efficient kitchen lites': 'led energy efficient kitchen lights',
'barn syslet door': 'barn style door',
'rat or mice poision': 'rat or mice poison',
'led ressed deameable lights': 'led recessed dimmable lights',
'prelit tree mutli': 'pre lit tree multi',
'sodering iron': 'soldering iron',
'tub suround': 'tub surround',
'fireplace screen assessories': 'fireplace screen accessories',
'acrilic white paint': 'acrylic white paint',
'gibraltor locking': 'gibraltar locking',
'air conditioner sideays': 'air conditioner sideways',
'white inyrtior paint': 'white interior paint',
'100 watt candlebra': '100 watt candelabra',
'llhampton bay patio rocker': 'hampton bay patio rocker',
'lock brushed nicke;': 'lock brushed nickel;',
'structered media': 'structured media',
'summit 24 inch ss gaqs range': 'summit 24 inch ss gas range',
'ryobl battery': 'ryobi battery',
'replacement carbrator for robyi': 'replacement carburetor for ryobi',
'balist': 'ballast',
'pressuer washer': 'pressure washer',
'22 storage shelve': '22 storage shelf',
'32\' strorm door': '32\' storm door',
'hazardous locationlight fixture globe': 'hazardous location light fixture globe',
'john deer bagger': 'john deere bagger',
'ridinng lawn mowers mulching': 'riding lawn mowers mulching',
'1/2 fpt x 1/2 inch pex': '1/2 npt x 1/2 inch pex',
'2 kindorff straps': '2 kindorf straps',
'telemechanic square d': 'telemecanique square d',
'thresh hold': 'threshold',
'24x24 framless recessed mount mirrored medicine': '24x24 frameless recessed mount mirrored medicine',
'600 connector cylander': '600 connector cylinder',
'well pump submerciable': 'well pump submersible',
'security gate pannel': 'security gate panel',
'1/4-20 jamb nuts': '1/4-20 jam nuts',
'american standard flush valvu': 'american standard flush valve',
'stove adopter': 'stove adapter',
'kitchenaide dishwasher': 'kitchenaid dishwasher',
'roofing leadders': 'roofing ladders',
'heath zenity 180 security light': 'heath zenith 180 security light',
'solar powerd lights': 'solar powered lights',
'24 white walloven': '24 white wall oven',
'kitchen aide mixer': 'kitchenaid mixer',
'10 in w 30 in l inetrior vent': '10 in w 30 in l interior vent',
'co smoke detector kiddie': 'co smoke detector kidde',
'vacum aa bag 58236c': 'vacuum aa bag 58236c',
'sealant for sideing': 'sealant for siding',
'come along and chaincome along and chain': 'come along and chain come along and chain',
'wall paper bprder': 'wallpaper border',
'cararra tile': 'carrara tile',
'14 gauge strranded wire': '14 gauge stranded wire',
'30 gal electirc water heater': '30 gal electric water heater',
'guarter round tile': 'quarter round tile',
'summit gril': 'summit grill',
'gavanized pipe 20 feet': 'galvanized pipe 20 feet',
'melamine sheliving': 'melamine shelving',
'composite fiscia board': 'composite fascia board',
'spunge mop refill': 'sponge mop refill',
'wall mount outside motion dector': 'wall mount outdoor motion detector',
'bisquit tub refinish kit': 'biscuit tub refinish kit',
'patternn paint rollers': 'pattern paint rollers',
'built in wall nitch': 'built in wall niche',
'ironboard built in': 'iron board built in',
'behr melrot': 'behr merlot',
'led shoplightmakita light': 'led shop light makita light',
'armazone': 'amazon',
'soild 6 panel interior door': 'solid 6 panel interior door',
'dishs for 8': 'dishes for 8',
'1 1/4 steel ppes': '1 1/4 steel pipes',
'pull out drw': 'pull out draw',
'swffer mop': 'swiffer mop',
'milwaukee m18 tootls': 'milwaukee m18 tools',
'bronzw phone wall jack cover': 'bronze phone wall jack cover',
'flourscent lights size 18x24': 'fluorescent lights size 18x24',
'berber carpeting destiny doeskin': 'berber carpet destiny doeskin',
'spring heavy dut': 'spring heavy duty',
'2 in pvc pipe incresers': '2 in pvc pipe increasers',
'lifetime rouind table': 'lifetime round table',
'16x26 recesssed medicine cabinets': '16x26 recessed medicine cabinets',
'rolling barn dorr hardware': 'rolling barn door hardware',
'huricane panel caps': 'hurricane panel caps',
'73 inch anderson patio screen doors': '73 inch andersen patio screen doors',
'barbque grill temperature guage': 'barbecue grill temperature gauge',
'bath tub shower repair lit': 'bathtub shower repair kit',
'entery door sidelights': 'entry door sidelights',
'5 burnerner brikman gas grill': '5 burner brinkmann gas grill',
'battub floor mat': 'bathtub floor mat',
'outlet wallplate with cover': 'outlet wall plate with cover',
'fungacide': 'fungicide',
'tuband tile latex caulk': 'tub and tile latex caulk',
'natural gas barbeque': 'natural gas barbecue',
'hallogen bulb flood': 'halogen bulb flood',
'roudulf': 'rudolf',
'cellular shade 23.75x37': 'cellular shade 23.75x 37',
'wyndham vanities with no tops': 'wyndham vanities without tops',
'frigidare gas range': 'frigidaire gas range',
'frigidare refrigerator': 'frigidaire refrigerator',
'dishwasher moiunting kit': 'dishwasher mounting kit',
'black refrigeratore': 'black refrigerator',
'barcello estates light fi': 'bercello estates light fi',
'kohler ch730 maintance kits': 'kohler ch730 maintenance kits',
'phillips led slimline a19': 'philips led slimline a19',
'asburn mahogany medicine cabinate': 'ashburn mahogany medicine cabinet',
'stove top replacement patr': 'stove top replacement part',
'hampton bay pendent light parts': 'hampton bay pendant light parts',
'wall mountreading light': 'wall mount reading light',
'heat on malamine tape': 'heat on melamine tape',
'vinal plank selection': 'vinyl plank selection',
'marble qwhite': 'marble white',
'reheem performance 75 gal water heater': 'rheem performance 75 gal water heater',
'cover for a double barrow grill': 'cover for a double barrel grill',
'water taste kits': 'water test kits',
'roybi gas trimmer repair kit': 'ryobi gas trimmer repair kit',
'masonary dril bits': 'masonry drill bits',
'bath and shower facet set': 'bath and shower faucet set',
'sanding sponce': 'sanding sponge',
'silestone sammples': 'silestone samples',
'ge mwr filter': 'ge mwf filter',
'rectangele garbage can': 'rectangle garbage can',
'light podt sensor': 'light post sensor',
'honewell wireless doorbell': 'honeywell wireless doorbell',
'vertical door slide mechanis': 'vertical door slide mechanism',
'2 inch bye 6 inch thick board': '2 inch by 6 inch thick board',
'28x80 contl splt rh': '28x80 control split rh',
'doors exterior with top windo': 'doors exterior with top window',
'water filter for vanitys': 'water filter for vanities',
'hampton bay geogian wall plates aged bronze': 'hampton bay georgian wall plates aged bronze',
'18 wat let lamps': '18 watt led lamps',
'qstatic cling window film': 'static cling window film',
'eletric pole hedge clippers': 'electric pole hedge clippers',
'moen voss lightin': 'moen voss lighting',
'dreamline showeruni door': 'dreamline shower door',
'dewaqlt air nailers': 'dewalt air nailers',
'hex drill chcuck': 'hex drill chuck',
'vinal siding per box': 'vinyl siding per box',
'verticle blind': 'vertical blind',
'chome framed mirror': 'chrome framed mirror',
'b onnet': 'bonnet',
'dowel sprial': 'dowel spiral',
'deck tdiles': 'deck tiles',
'driveing bits': 'driving bits',
'water putifiers': 'water purifiers',
'clyvus': 'clivus',
'old style nailshand forgednails': 'old style nails hand forged nails',
'grohe essencekitchen faucet': 'grohe essence kitchen faucet',
'femle end hose repair': 'female end hose repair',
'garden hose reair kits': 'garden hose repair kits',
'bathroom facets': 'bathroom faucets',
'kitchenaid refrigerator bottom frrezer': 'kitchenaid refrigerator bottom freezer',
'chrome/polished brass 2-handle 4-in centerset bathroom fauc': 'chrome/polished brass 2-handle 4-in centerset bathroom faucet',
'spackilng knife': 'spackling knife',
'cadelabra light bulbs led': 'candelabra light bulbs led',
'roller bracker for frameless shower doors': 'roller bracket for frameless shower doors',
'morola tile metro penny': 'merola tile metro penny',
'48 inchled tube': '48 inch led tube',
'corner sorage': 'corner storage',
'glaciar bay crystal shower': 'glacier bay crystal shower',
'tosco ivory tile': 'tosca ivory tile',
'elecric screw driver batteries': 'electric screwdriver batteries',
'mobilehome wall paint': 'mobile home wall paint',
'chainsaw rplacement chains': 'chainsaw replacement chains',
'electric guage cable': 'electric gauge cable',
'f15 t5 florescent': 'f15 t5 fluorescent',
'sprinkler conroller': 'sprinkler controller',
'wireless light sitch': 'wireless light switch',
'16x16x60boxes for moving': '16x16x60 boxes for moving',
'engeenered wood': 'engineered wood',
'frigidare microwave': 'frigidaire microwave',
'nals for subfloor': 'nails for subfloor',
'verathane': 'varathane',
'remote controlle light dimmer': 'remote controlled light dimmer',
'koehler shower door': 'kohler shower door',
'burgluar bar tool': 'burglar bar tool',
'greem roofing shingles': 'green roofing shingles',
'milwoki circular saw': 'milwaukee circular saw',
'tub faucets bronza': 'tub faucets bronze',
'bathtubdoor towel racks': 'bathtub door towel racks',
'ac exhaust extention': 'ac exhaust extension',
'outside deck boards composit': 'outside deck boards composite',
'4inch ligh junction box': '4 inch light junction box',
'gardenn containers': 'garden containers',
'plant continers': 'plant containers',
'3 paint bbrush': '3 paint brush',
'26 in woodsaddle stool': '26 in wood saddle stool',
'adhensive with nozzle': 'adhesive with nozzle',
'swanstone kitchen sink accesories': 'swanstone kitchen sink accessories',
'pvc to corragated connector': 'pvc to corrugated connector',
'unsanded grout bisquit': 'unsanded grout biscuit',
'spray paint rust-oleum gray': 'spray paint rustoleum gray',
'brushes drils': 'brushed drills',
'indoor mounting tpe': 'indoor mounting tape',
'indoor grow light blubs': 'indoor grow light bulbs',
'thinset morter': 'thin set mortar',
'flourescent g25 60watt': 'fluorescent g25 60 watt',
'diatemaceous earth': 'diatomaceous earth',
'23\' biview surface mount med cab chestnut': '23\' bi view surface mount med cab chestnut',
'72 hour carpt': '72 hour carpet',
'2 \' galvanise street 90': '2 \' galvanized street 90',
'maytab bravos': 'maytag bravos',
'600w incandecent toggle dimmer': '600w incandescent toggle dimmer',
'galvanized wire 10 guage': 'galvanized wire 10 gauge',
'assemble hight 17 inches': 'assembled height 17 inches',
'pvc t coulpler': 'pvc t coupler',
'water heatere drain pan': 'water heater drain pan',
'faucet steam washers': 'faucet stem washers',
'heat window filtm': 'heat window film',
'dewalt circlular saw blades': 'dewalt circular saw blades',
'5plinth block': 'plinth block',
'french pation doors with sidepanels': 'french patio doors with side panels',
'30 unfinish filler': '30 unfinished filler',
'home depot in cambrige': 'home depot in cambridge',
'faucet siphon hose connecter': 'faucet siphon hose connector',
'black out doors spray paint': 'black outdoor spray paint',
'anderson storm door full view easy install': 'andersen storm door full view easy install',
'ice marker water kits': 'ice maker water kits',
'adhesive magnetized roll': 'adhesive magnetic roll',
'metal kkitchen cabines': 'metal kitchen cabinets',
'2\' x 1 1/2 reducing busing thread': '2\' x 1 1/2 reducing bushing threaded',
'abs rambit pipe saver': 'abs rambut pipe saver',
'33 in w x 18 icnh depth vanity': '33 in w x 18 inch depth vanity',
'built in landry shelving': 'built in laundry shelving',
'grey rubbermaid trash barrells': 'grey rubbermaid trash barrels',
'sawall blades': 'sawzall blades',
'9v battery ackup': '9v battery backup',
'1/2 in. fip x 7/16 in. or 1/2 in. slip joint angle stop valv': '1/2 in. fip x 7/16 in. or 1/2 in. slip joint angle stop valve',
'peir block': 'pier block',
'under ceiling garag storage': 'under ceiling garage storage',
'stone effects backsplash cool fushion': 'stone effects backsplash cool fusion',
'desoldering vacum pump': 'desoldering vacuum pump',
'elrctric welders': 'electric welders',
'unfinushed kitchen cabinets': 'unfinished kitchen cabinets',
'3 pole range reciptical': '3 pole range receptacle',
'sink cutting oard': 'sink cutting board',
'steel tubing falanges': 'steel tubing flanges',
'outdoor unskid tiles': 'outdoor non skid tiles',
'6 round headlag bolt': '6 round head lag bolt',
'cyprees fence': 'cypress fence',
'75 qrt cooler with wheels': '75 quart cooler with wheels',
'buit in themostat': 'built in thermostat',
'speacalty bit set': 'specialty bit set',
'curtain rod classic sqaure finial': 'curtain rod classic square finial',
'silk poinsetia': 'silk poinsettia',
'1 1/4 pvcsch 80': '1 1/4 pvc sch 80',
'grill ousite door': 'grill outside door',
'lumionaire': 'luminaire',
'adienne bathroom vanity light': 'adrienne bathroom vanity light',
'chashing led lights': 'chasing led lights',
'24 inch vessal tops': '24 inch vessel tops',
'co2 detector kiddie': 'co2 detector kidde',
'white glazed 4 tilw': 'white glazed 4 tile',
'wood lattace': 'wood lattice',
'premaid stair railing': 'premade stair railing',
'3 function double walll switch': '3 function double wall switch',
'koehler shower faucet with spray': 'kohler shower faucet with spray',
'askley electric fireplace': 'ashley electric fireplace',
'blind for paladian': 'blind for paladin',
'regancy railin': 'regency railing',
'weatherside purit': 'weatherside purity',
'vent a hood dampr': 'vent a hood damper',
'light tropper 2x4': 'light troffer 2x4',
'30 amp generater receptical': '30 amp generator receptacle',
'prefab wood gate panals': 'prefab wood gate panels',
'floating corner shelfing': 'floating corner shelving',
'fridgidaire dehumidifier': 'frigidaire dehumidifier',
'pegs for cabinent shelves': 'pegs for cabinet shelves',
'100 amp to 200a lit': '100 amp to 200 a lot',
'decorative metal sceen': 'decorative metal screen',
'lacross weather pro center': 'lacrosse weather pro center',
'behr flat white marque': 'behr flat white marquee',
'high output floresant': 'high output fluorescent',
'behr hawian paint': 'behr hawaiian paint',
'pressure vaccuum breaker o-ring': 'pressure vacuum breaker o-ring',
'psint gun': 'paint gun',
'wine coller': 'wine cooler',
'rug ruunners': 'rug runners',
'clock control for fridgidare gas stove': 'clock control for frigidaire gas stove',
'outlet expsnsion surge protector': 'outlet expansion surge protector',
'rigid pipe threader': 'ridgid pipe threader',
'electical box': 'electrical box',
'insect granuels': 'insect granules',
'compsit outside corner': 'composite outside corner',
'cabinet kitchen ligth': 'cabinet kitchen light',
'dewalt ratchet srewdriver': 'dewalt ratchet screwdriver',
'18.5 outside chair cushiobs': '18.5 outside chair cushions',
'fenching and gate latches': 'fencing and gate latches',
'heater for refrigertor': 'heater for refrigerator',
'motion detect indoor': 'motion detector indoor',
'refrigerater french doors ge brand': 'refrigerator french doors ge brand',
'tiki tourches': 'tiki torches',
'gren house kits': 'greenhouse kits',
'5000 btu aircondition': '5000 btu air conditioner',
'airator dishwasher': 'aerator dishwasher',
'2x6 metal brakets': '2x6 metal brackets',
'weldn 3': 'weldon 3',
'ceiling paint pray': 'ceiling paint spray',
'flourescent fixture metal parts': 'fluorescent fixture metal parts',
'natural hickery kitchen cabinets': 'natural hickory kitchen cabinets',
'kitchen aide dishwasher': 'kitchenaid dishwasher',
'led track lightning systems': 'led track lighting systems',
'duplex receptacle nickle': 'duplex receptacle nickel',
'12 foot ceadar': '12 foot cedar',
'faux wood shade 100 jnches': 'faux wood shade 100 inches',
'contracto0r hose': 'contractor hose',
'lspacers for toilet bowl': 'spacers for toilet bowl',
'aftificial prelit christmas trees': 'artificial prelit christmas trees',
'paint colores by rooms': 'paint colors by rooms',
'warm whit led bulb': 'warm white led bulb',
'clamps for unistruct': 'clamps for unistrut',
'kitchen trviso price phister': 'kitchen treviso price pfister',
'10guage copper wire 3 stand': '10 gauge copper wire 3 stand',
'deep frezer with glass cover': 'deep freezer with glass cover',
'powder clorine shock treatment': 'powder chlorine shock treatment',
'galvanaized can': 'galvanized can',
'prebent aluminum facia': 'prebent aluminum fascia',
'vinyl scrapper for jack hammer': 'vinyl scraper for jack hammer',
'dwaft outside plants': 'dwarf outside plants',
'tilebath walls small': 'tile bath walls small',
'2 ton aircondition': '2 ton air conditioner',
'martha stewart metalic paint gallon': 'martha stewart metallic paint gallon',
'schilage electronic deadbolts locks': 'schlage electronic deadbolts locks',
'60x65shower doors': '60x65 shower doors',
'tile slide cuter': 'tile slide cutter',
'eagle peak hoickory': 'eagle peak hickory',
'gas black range worldpool': 'gas black range whirlpool',
'trigger makita skillsaw': 'trigger makita skil saw',
'hardi lap hanger': 'hardie lap hanger',
'master flow insolated duct wrap': 'master flow insulated duct wrap',
'replacment stove knobs': 'replacement stove knobs',
'outdoor alrm': 'outdoor alarm',
'wireless outdoor thermom': 'wireless outdoor thermometer',
'faun paint': 'fawn paint',
'wireless security caamera': 'wireless security camera',
'fiet electric led gu10': 'feit electric led gu10',
'stair unners': 'stair runners',
'stainstess steel spray paint': 'stainless steel spray paint',
'mount blanv': 'mont blanc',
'riobi power tool combo': 'ryobi power tool combo',
'24 sydey collection': '24 sydney collection',
'air compresser': 'air compressor',
'no tresspassing signs': 'no trespassing signs',
'flexable 6 inch': 'flexible 6 inch',
'wall beveled framelessmirror': 'wall beveled frameless mirror',
'slidein range bisque': 'slide in range bisque',
'router templit kits letters': 'router template kits letters',
'american sandard 1660.225,': 'american standard 1660.225,',
'onyx sand porcelian': 'onyx sand porcelain',
'watherproof electrical boxes': 'weatherproof electrical boxes',
'carpet remmnant': 'carpet remnant',
'8\' sigle wall gal pipe': '8\' single wall galv pipe',
'byfold hinges': 'bi fold hinges',
'terra cota quarry stones': 'terracotta quarry stones',
'rustolem appliance touch up paint': 'rustoleum appliance touch up paint',
'rain nickle': 'rain nickel',
'whirlpool light bulb part 8206232': 'whirlpool light bulb part 8206232a',
'Vigaro fall fertilizer': 'Vigoro fall fertilizer',
'pneumatic cynlinder': 'pneumatic cylinder',
'20 ft electical romex': '20 ft electrical romex',
'medicine cabinets recessable black': 'medicine cabinets recessed black',
'krass 30 inch kitchen sink': 'kraus 30 inch kitchen sink',
'stainless steel grat': 'stainless steel grate',
'suncort 8\' duct fans': 'suncourt 8\' duct fans',
'nutmag mirrors': 'nutmeg mirrors',
'clawfoot tub faucit kit': 'clawfoot tub faucet kit',
'protective pper': 'protective paper',
'touchless dishwashing kintchen dispenser': 'touchless dishwashing kitchen dispenser',
'air temperture contorl valve': 'air temperature control valve',
'melinger hand truck wheals': 'melinger hand truck wheels',
'watt premiere water filters': 'watt premier water filters',
'weed killer spray contaner': 'weed killer spray container',
'18in hardware coth': '18in hardware cloth',
'ac window supprt': 'ac window support',
'vegetable plannter': 'vegetable planter',
'soap punp': 'soap pump',
'wall paper murial glue': 'wallpaper mural glue',
'vertical binds hardware': 'vertical blinds hardware',
'rubbermaid verital sheds': 'rubbermaid vertical sheds',
'1/2 in. extension joint': '1/2 in. expansion joint',
'curtin rods': 'curtain rods',
'edge glued rounda': 'edge glued rounds',
'plywood edge taope': 'plywood edge tape',
'36\' copoktop': '36\' cooktop',
'curtains non black out': 'curtains not blackout',
'honolule center drain': 'honolulu center drain',
'toliet snake': 'toilet snake',
'black and deckerbattery pack': 'black and decker battery pack',
'beer and wine combination frigerator': 'beer and wine combination refrigerator',
'mess wire fencing': 'mesh wire fencing',
'ecosmart 90 led daylight br30': 'ecosmart 90w led daylight br30',
'miniture bulbs 2 pin': 'miniature bulbs 2 pin',
'dishwasher water connection vlave': 'dishwasher water connection valve',
'ant bait raps': 'ant bait traps',
'coragated aluimin special order': 'corrugated aluminum special order',
'carpot canopy 10x20': 'carport canopy 10x20',
'10 foot white ethjernet cable': '10 foot white ethernet cable',
'polished chrome cbinet pulls': 'polished chrome cabinet pulls',
'cooper tubing': 'copper tubing',
'dwarf pereniel plants': 'dwarf perennial plants',
'lampost motion detector': 'lamp post motion detector',
'3 gutter oulets': '3 gutter outlets',
'kohler shower ddoors for tubs in nickel': 'kohler shower doors for tubs in nickel',
'zep liquid air fresher': 'zep liquid air freshener',
'rewiring built in oven': 'wiring built in oven',
'10/4 SJ CABLE': '10/4 SJO CABLE',
'tempered glass wndow': 'tempered glass window',
'mataeials needed for paver patio': 'materials needed for paver patio',
'tankles water heater gas outdoor': 'tankless water heater gas outdoor',
'ypermethrin': 'cypermethrin',
'kwikset halifax door leaver': 'kwikset halifax door lever',
'ryobi coordless 18v starter kit': 'ryobi cordless 18v starter kit',
'habor gazeebo': 'harbor gazebo',
'electric barbeque grills': 'electric barbecue grills',
'rasin raised garden bed': 'resin raised garden bed',
'barbeque grills big and easy': 'barbecue grills big and easy',
'floor warming matt': 'floor warming mat',
'machette': 'machete',
'cool tube lgts': 'cool tube lights',
'universal faucet connect': 'universal faucet connector',
'daltile hexgon': 'daltile hexagon',
'hurracaine brackets': 'hurricane brackets',
'martha stewart curtiins': 'martha stewart curtains',
'byfold doors': 'bifold doors',
'2 tier adjustable cabinet orgainzer': '2 tier adjustable cabinet organizer',
'7w compact flourescent bulb': '7w compact fluorescent bulb',
'singel wall stove pipe': 'single wall stove pipe',
'wheeld trimmer': 'wheeled trimmer',
'boader rocks': 'border rocks',
'crown moldinf jig': 'crown molding jig',
'small refridgerators': 'small refrigerators',
'blind courner': 'blind corner',
'asphault gap repair': 'asphalt gap repair',
'no. 30 ridgid cutting wheel': 'no. 30 rigid cutting wheel',
'battery cable conector': 'battery cable connector',
'coranado baseboard pine': 'coronado baseboard pine',
'cerrowire 18 gauge': 'cerro wire 18 gauge',
'universal exstention cord': 'universal extension cord',
'wirlpool counterdepth side by side refrigrator': 'whirlpool counter depth side by side refrigerator',
'cedar bahr 502 stain': 'cedar behr 502 stain',
'small tracerse rods': 'small traverse rods',
'yelloe safet tape': 'yellow safety tape',
'elctric heating lamps': 'electric heating lamps',
't8 flourescent bulbs': 't8 fluorescent bulbs',
'u bents fluorescent': 'u bend fluorescent',
'pergo lamate flooring': 'pergo laminate flooring',
'sweenys mole and gopher repelant': 'sweeney\'s mole and gopher repellent',
'rg6 connecto': 'rg6 connector',
'ge electriv burners': 'ge electric burners',
'replacement part for koehler toilet kb3': 'replacement part for kohler toilet kb3',
'furiture paint, stain and varnish': 'furniture paint, stain and varnish',
'mission prarie camber top slab': 'mission prairie camber top slab',
'mirr edge': 'mirror edge',
'orbital sanding disck': 'orbital sanding disc',
'quickrete 50 lb mix': 'quikrete 50 lb mix',
'high efficiency dust baf rigid vac': 'high efficiency dust bag ridgid vac',
'liminate flooring cleaning': 'laminate flooring cleaning',
'gtxworks trimmer spools': 'gt worx trimmer spools',
'securty bar mounts': 'security bar mounts',
'fall supression kit': 'fall suppression kit',
'weatherproof boom box': 'waterproof boombox',
'geld wen 2500 96 x 36': 'jeld wen 2500 96 x 36',
'enfineered floors drifting sand': 'engineered floors drifting sand',
'well pump back presure valve': 'well pump back pressure valve',
'heavy duty shevlving': 'heavy duty shelving',
'mmodel': 'model',
'frigidare stainless refrig': 'frigidaire stainless refrig',
'rusteoulm spray paint': 'rustoleum spray paint',
't5 high output ligh': 't5 high output light',
'sandpap': 'sandpaper',
'cerowire 12 gauge': 'cerro wire 12 gauge',
'what rings for toitets': 'what rings for toilets',
'infrared theomomter': 'infrared thermometer',
'1x6 toungh groove': '1x6 tongue groove',
'v ceader board': 'v cedar board',
'sodpstone': 'soapstone',
'10 yeaer smoke detectors/carbon monoxide combo': '10 year smoke detectors/carbon monoxide combo',
'kkohler toilet seat': 'kohler toilet seat',
'pink toliet seat elongated': 'pink toilet seat elongated',
'flexiblr bit': 'flexible bit',
'coleman instasmart grill': 'coleman instastart grill',
'exide battery 75,car battrey': 'exide battery 75,car battery',
'black cherry stainer': 'black cherry stain',
'1x4 pre primed mfd trim': '1 x 4 pre primed mdf trim',
'mnt movr combo shovel': 'mnt move combo shovel',
'100 watt candlabra bulb': '100 watt candelabra bulb',
'samsung black stainles': 'samsung black stainless',
'dewalt jig saw blad': 'dewalt jig saw blade',
'alluminum downspout connector': 'aluminum downspout connector',
'alltyp of fences': 'all type of fences',
'clow hammer 16 0z': 'claw hammer 16 0z',
'tomatoe plants': 'tomato plants',
'white lacquer wall selves': 'white lacquer wall shelves',
'pressure guage': 'pressure gauge',
'slid pad': 'slide pad',
'female hose connectore': 'female hose connector',
'solor lamp outside': 'solar lamp outside',
'daltile urban camoflogue': 'daltile urban camouflage',
'deocorative screws for hanging pictures': 'decorative screws for hanging pictures',
'kitchen composie double sinks': 'kitchen composite double sinks',
'whitesilicone': 'white silicone',
'self contained recepticles': 'self contained receptacles',
'brass handel door': 'brass handle door',
'charley brown christmas trees': 'charlie brown christmas trees',
'carbon fiber vinel': 'carbon fiber vinyl',
'phillips fluorescent 40': 'philips fluorescent 40',
'36 inxh return air grill': '36 inch return air grill',
'garden pond pump impellor': 'garden pond pump impeller',
'vinal flooring 25 year warranty': 'vinyl flooring 25 year warranty',
'mulcing blades for troy built': 'mulching blades for troy bilt',
'5 1/4 deckboard': '5 1/4 deck board',
'plaste dip': 'plasti dip',
'cemnt pads for makita bo5030': 'cement pads for makita bo5030',
'ge beverage refriderator': 'ge beverage refrigerator',
'bathroom plummbing': 'bathroom plumbing',
'gas pire column': 'gas fire column',
'confrence': 'conference',
'clock cuitain rod wood': 'clock curtain rod wood',
'decrotive outdoor lighting': 'decorative outdoor lighting',
'ballast for single t12 fluorscent bulb': 'ballast for single t12 fluorescent bulb',
'workstar cordless and recharable work light': 'workstar cordless and rechargeable work light',
'light bulb 250 cfl': 'light bulb 250w cfl',
'rubber gromet': 'rubber grommet',
'spray metallicpaint': 'spray metallic paint',
'paint quart zise': 'paint quart size',
'blinds for portch': 'blinds for porch',
'sable browj 95': 'sable brown 95',
'1/2 conduet': '1/2 conduit',
'wooden curton rod brackets': 'wooden curtain rod brackets',
'corbels and shelfs': 'corbels and shelves',
'seimens typ qt breaker': 'siemens type qt breaker',
'steel builco': 'steel bilco',
'metal joinst': 'metal joist',
'externol patio doors': 'external patio doors',
'FENSE LIGHTING': 'FENCE LIGHTING',
'oil bronzed wine glass rack': 'oiled bronze wine glass rack',
'klien lether pouch': 'klein leather pouch',
'shark rocket filtes': 'shark rocket filters',
'4x7 ruggs': '4 x 7 rugs',
'24 elicreic stove': '24 electric stove',
'grill hasmilton': 'grill hamilton',
'air vents for plumping': 'air vents for plumbing',
'gazebo with shelfs': 'gazebo with shelves',
'expanding plastic sleeves for scews': 'expanding plastic sleeves for screws',
'oli rubbed bronze drain': 'oil rubbed bronze drain',
'clothsline rope': 'clothesline rope',
'stove gas replacement knops': 'stove gas replacement knobs',
'rechargale batteries for solar lights': 'rechargeable batteries for solar lights',
'standard artificial grasa synthetic lawn turf': 'standard artificial grass synthetic lawn turf',
'new deck for rtz 50': 'new deck for rzt 50',
'wire shelv liner': 'wire shelf liner',
'wood paint with primerin blue': 'wood paint with primer in blue',
'fabreeze': 'febreze',
'ceilng fan': 'ceiling fan',
'manuel for 425 - 1649': 'manual for 425 - 1649',
'14 in dimond circular saw blade': '14 in diamond circular saw blade',
'berhr solid 213 deep': 'behr solid 213 deep',
'driveway m arkers': 'driveway markers',
'commercil threshold': 'commercial threshold',
'multinozzle spray painting': 'multi nozzle spray painting',
'shower nitch': 'shower niche',
'1/2x1/2 quater round': '1/2 x 1/2 quarter round',
'Insulted work gloves': 'Insulated work gloves',
'5000 lumnes': '5000 lumens',
'magnets for gromets': 'magnets for grommets',
'toro springkler': 'toro sprinkler',
'motion sensoring black decorative lamp': 'motion sensing black decorative lamp',
'proclean concentrated drain cleaner': 'pro clean concentrated drain cleaner',
'feather river doorsth sidelights': 'feather river doors sidelights',
'ridgid powerwasher parts': 'ridgid power washer parts',
'skill pressure sander': 'skil pressure sander',
'outdoor vertical sheda': 'outdoor vertical sheds',
'brick web thin brick flats': 'brickweb thin brick flats',
'airguard undelayment': 'airguard underlayment',
'toyotaa': 'toyota',
'round rug for kitch': 'round rug for kitchen',
'round one piece tiolet': 'round one piece toilet',
'sppeed square': 'speed square',
'adirondak chair': 'adirondack chair',
'hickory hadwre touch of spring': 'hickory hardware touch of spring',
'garge door handle': 'garage door handle',
'whiteled tree': 'white led tree',
'airosol epoxy paint': 'aerosol epoxy paint',
'ice ring machine': 'ice rink machine',
'deep expresso walnut/new ellenton': 'deep espresso walnut/new ellenton',
'interior walls bieges brown': 'interior walls beige brown',
'pet disinfectent': 'pet disinfectant',
'altra furniture parsons credenza desk with drawer and bookc': 'altra furniture parsons credenza desk with drawer and books',
'gorilla gold cpvc gluetm': 'gorilla gold cpvc glue',
'aligator clips': 'alligator clips',
'irrigation pipe connectoer': 'irrigation pipe connector',
'citronella fire pot fue': 'citronella fire pot fuel',
'garden spreklers heads': 'garden sprinklers heads',
'light swith insulation': 'light switch insulation',
'dual lock 3m veclro': 'dual lock 3m velcro',
'water proof mc connecter': 'waterproof dc connector',
'snow blowerr scraper blade': 'snowblower scraper blade',
'vesel tub': 'vessel tub',
'carrrs': 'careers',
'odl 6\' x 6\'retractable screens': 'odl 6\' x 6 retractable screens',
'outdoord storage locker': 'outdoor storage locker',
'standing seam roof panals': 'standing seam roof panels',
'phillips 65w 2 pack': 'philips 65w 2 pack',
'2 squares double 5 vinly siding': '2 squares double 5 vinyl siding',
'fabric steam cleamer': 'fabric steam cleaner',
'scikkens stain': 'sikkens stain',
'polyethylne cap': 'polyethylene cap',
'decorative interor glass doors': 'decorative interior glass doors',
'vanity top for two vessell': 'vanity top for two vessel',
'giant bird of paridise': 'giant bird of paradise',
'almeda hickory': 'alameda hickory',
'cabinet ba rpulls in bronze': 'cabinet bar pulls in bronze',
'l screwa': 'l screws',
'johan deer 0 turns': 'john deere 0 turns',
'milwankee 7 pc set': 'milwaukee 7 pc set',
'faucet pl801l 18 guage': 'faucet pl801l 18 gauge',
'12 light bronze chandilier': '12 light bronze chandelier',
'flourecent light plastic covers': 'fluorescent light plastic covers',
'roof pannel foam': 'roof panel foam',
'under cabinet lighting ro-hs': 'under cabinet lighting rohs',
'round lshower kit': 'round shower kit',
'concreet enchors': 'concrete anchors',
'woodwen pallet': 'wooden pallet',
'shigles': 'shingles',
'comercial plank doors': 'commercial plank doors',
'stainless steel kithen faucet with soap dispenser': 'stainless steel kitchen faucet with soap dispenser',
'm4 50 srcew': 'm4 50 screw',
'splitbolt connector': 'split bolt connector',
'charming 18 roll': 'charmin 18 roll',
'table glass oatu': 'table glass oahu',
'kohlor flush for toilet tank 4421': 'kohler flush for toilet tank 4421',
'outdoor pendant lioghting': 'outdoor pendant lighting',
'24 inflex gas line': '24 in flex gas line',
'lawn mower rechargeable batterys': 'lawn mower rechargeable batteries',
'merola metalic tile': 'merola metallic tile',
'above ground pool vaccume': 'above ground pool vacuum',
'bosss water softner': 'boss water softener',
'moen one handel kitchen faucet repair parts': 'moen one handle kitchen faucet repair parts',
'sanding machinehardwood floors': 'sanding machine hardwood floors',
'super patch driverway sealler': 'super patch driveway sealer',
'sschlueter shower system': 'schluter shower system',
'offset flang': 'offset flange',
'aluminium tube rectangle': 'aluminium tube rectangular',
'legrad keystone cat5e jack': 'legrand keystone cat5e jack',
'yellow jacket extenison cord': 'yellow jacket extension cord',
'Habachi': 'Hibachi',
'mini pendant braket': 'mini pendant bracket',
'hose to presure washer': 'hose to pressure washer',
'gliddon speed wall': 'glidden speed wall',
'new age produucts': 'new age products',
'archor tub and shower faucet trim': 'archer tub and shower faucet trim',
'space saving stoage': 'space saving storage',
'vinyl flooring that clicks togther': 'vinyl flooring that clicks together',
'gladden smooth stone': 'glidden smooth stone',
'knape vogt baseket': 'knape vogt basket',
'ul liquidthight 25': 'ul liquidtight 25',
'white glossy furniture pain': 'white gloss furniture paint',
'square bannister': 'square banister',
'greenh wall paint': 'green wall paint',
'tile medalions for the floor or wall': 'tile medallions for the floor or wall',
'milwalke brewers garden flag': 'milwaukee brewers garden flag',
'versatiube': 'versatube',
'kenocen can nozzle': 'kenken can nozzle',
'mosaic esterior': 'mosaic exterior',
'winow wheels': 'window wheels',
'stud popers': 'stud poppers',
'trane 2.5 toon 13 seer heat pump': 'trane 2.5 ton 13 seer heat pump',
'ultra vue quick screeen': 'ultra vue quick screen',
'watterproof cleated boots': 'waterproof cleated boots',
'hdx pneumaitic paint': 'hdx pneumatic paint',
'biscue dishwashers': 'bisque dishwashers',
'sunbrella sipcovers': 'sunbrella slipcovers',
'miracle grow water absorbing crystals': 'miracle gro water absorbing crystals',
'disposal rim and stopperkohler': 'disposal rim and stopper kohler',
'long brakets': 'long brackets',
'freplace gloves': 'fireplace gloves',
'ridgid power drve pipe threadrs': 'ridgid power drive pipe threader',
'12x24 shefl': '12x24 shelf',
'1x6 prime molding': '1x6 primed molding',
'countertop soap dispensor': 'countertop soap dispenser',
'bushbutton for door bell': 'push button for doorbell',
'cauk saver': 'caulk saver',
'rubber stipper': 'rubber stopper',
'16 inch flourescent': '16 inch fluorescent',
'pendents amber': 'pendants amber',
'newtone broan round 751': 'nutone broan round 751',
'danze shower vlve': 'danze shower valve',
'wooden track drawer replacment': 'wooden track drawer replacement',
'single granit bathroom vanity': 'single granite bathroom vanity',
'oval steele tubs': 'oval steel tubs',
'liquid weed and feeed': 'liquid weed and feed',
'outodoor oatoman': 'outdoor ottoman',
'nutone vaccum wall plate': 'nutone vacuum wall plate',
'collor clamp': 'collar clamp',
'pure air ultra filtration syste,m': 'pure air ultra filtration system',
'llantana': 'lantana',
'white melimine cabinet': 'white melamine cabinet',
'2-handlet diverter repair kit': '2-handle diverter repair kit',
'mosiac lamps': 'mosaic lamps',
'exterior pipeinsulation': 'exterior pipe insulation',
'warm espresso bamboo quarteround': 'warm espresso bamboo quarter round',
'hardwood medialons': 'hardwood medallions',
'tub/hand shoer diverter with trim': 'tub/hand shower diverter with trim',
'locite 2 plus 1': 'loctite 2 plus 1',
'kwiksest door handle delta': 'kwikset door handle delta',
'frame nail hitschi': 'frame nailer hitachi',
'30 mirrow medicine cabinet': '30 mirrored medicine cabinet',
'pecane trees': 'pecan trees',
'lifeproof carpet sample lower trasure': 'lifeproof carpet sample lower treasure',
'umbrell hole ring': 'umbrella hole ring',
'melmane wood': 'melamine wood',
'melomine accessories': 'melamine accessories',
'windows single hang': 'windows single hung',
'portabe bar': 'portable bar',
'crystable table set lamps': 'crystal table set lamps',
'schlage handleset bermingham': 'schlage handleset birmingham',
'lp gas converion kit': 'lp gas conversion kit',
'quart exterior semi glass enamel': 'quart exterior semi gloss enamel',
'woodrx ultra natural': 'wood rx ultra natural',
'brushed barringnton': 'brushed barrington',
'leather lgue': 'leather glue',
'moen bronze low arch faucet': 'moen bronze low arc faucet',
'18 inch linen closit': '18 inch linen closet',
'bear paint green myth': 'behr paint green myth',
'solar light rechargable batteries': 'solar light rechargeable batteries',
'solar powered emergancy unit': 'solar powered emergency unit',
'kohler 3 handle shower reapair kit': 'kohler 3 handle shower repair kit',
'thermadore black cast kitchen sink': 'thermador black cast kitchen sink',
'dental shelf door': 'dentil shelf door',
'seed starting mixx': 'seed starting mix',
'rubberaid dust mop': 'rubbermaid dust mop',
'phillips bugle-head finethread sharp': 'phillips bugle-head fine thread sharp',
'black laminate shelfing': 'black laminate shelving',
'ice maker cylinoid ge': 'ice maker solenoid ge',
'home decorators mantle green': 'home decorators mantel green',
'perrenial white daisy like': 'perennial white daisy like',
'chamber-top halifax glass dooor': 'chamber-top halifax glass door',
'depp well socket set': 'deep well socket set',
'hanger racc vertical': 'hanger rack vertical',
'tool package with pilers,needlenose': 'tool package with pliers,needlenose',
'fome core board': 'foam core board',
'colaroo outdoor shades corded': 'coolaroo outdoor shades corded',
'decoator chain': 'decorator chain',
'rust oleum dark hunter green spray enamel paint': 'rustoleum dark hunter green spray enamel paint',
'lights and siloutte': 'lights and silhouette',
'real live orchred plants': 'real live orchid plants',
'2ftx3ft industrail rbber mat': '2ftx3ft industrial rubber mat',
'fernace vent shut off': 'furnace vent shut off',
'cedar wood balisters': 'cedar wood balusters',
'gliden premium semi gloss quart': 'glidden premium semi gloss quart',
'mosaic tile costal mist': 'mosaic tile coastal mist',
'toilet lever kphler brass': 'toilet lever kohler brass',
'front doors - poinye zinc': 'front doors - pointe zinc',
'matte bailey mohogany': 'matte bailey mahogany',
'wesleyand': 'wesleyan',
'plasic diffuser': 'plastic diffuser',
'cover kage for pet': 'cover page for pet',
'network agapter': 'network adapter',
'whitehaus bathroom sinl': 'whitehaus bathroom sink',
'icey tech': 'icey tek',
'kaorik wine': 'kalorik wine',
'susbenders': 'suspenders',
'policarbonate case': 'polycarbonate case',
'shaw livng rugs model rac66': 'shaw living rugs model rac66',
'carpet in bassment': 'carpet in basement',
'bifold doorsfold plantation': 'bi fold doors fold plantation',
'handheld seed speader': 'handheld seed spreader',
'hot dipped galvinized coil nails': 'hot dipped galvanized coil nails',
'hand saw sharpner': 'hand saw sharpener',
'mattress foam protecter': 'mattress foam protector',
'n utdriver bit': 'nut driver bit',
'lattice wwod tone': 'lattice wood tone',
'our door receptacles': 'outdoor receptacles',
'great outdors': 'great outdoors',
'exterior string ligts': 'exterior string lights',
'dog ,cat,repellant': 'dog ,cat,repellent',
'20a wht nylon duple': '20a wht nylon duplex',
'fatmax leveler premier': 'fatmax level premier',
'ralph laren brown paints': 'ralph lauren brown paints',
'liquid bi fuels': 'liquid biofuels',
'scrubbin sponge': 'scrubbing sponge',
'ceramic tile tooth brush and soap holder': 'ceramic tile toothbrush and soap holder',
'cultured marbl;e shower walls': 'cultured marble shower walls',
'did recorder player': 'dvd recorder player',
'golith': 'goliath',
'black maytag french door refrigirator': 'black maytag french door refrigerator',
'stair nose santos maogani': 'stair nose santos mahogany',
'l tub fauctes': 'l tub faucets',
'eyebolt brass': 'eye bolt brass',
'terracotta exteriorpaint': 'terracotta exterior paint',
'manuel venting sky light': 'manual venting skylight',
'bathroom fan motion sencer': 'bathroom fan motion sensor',
'hard start capacitator': 'hard start capacitor',
'windows gazing bead': 'windows glazing bead',
'vanitiy top back splach': 'vanity top backsplash',
'large yellow screw inground anchors': 'large yellow screw in ground anchors',
'heavy duty polyurathane': 'heavy duty polyurethane',
'folfable stool': 'foldable stool',
'charlston south carolina': 'charleston south carolina',
'pine flooring, tang end grove': 'pine flooring, tongue and groove',
'starter fuil': 'starter fuel',
'granite colr group prices': 'granite color group prices',
'calanvreas': 'calaveras',
'golden krome spray': 'gold chrome spray',
'g e micewave': 'g e microwave',
'sheet meatal hole cutter': 'sheet metal hole cutter',
'zurn hot short stemcartridge': 'zurn hot short stem cartridge',
'outdoor picture ftames': 'outdoor picture frames',
'shower pad porceleain': 'shower pan porcelain',
'battery under counter lightening': 'battery under counter lighting',
'elictric door bail': 'electric door bell',
'barbeque insert': 'barbecue insert',
'barclay glass bathroom shelfs': 'barclay glass bathroom shelves',
'preserva wood caryon': 'preserva wood crayon',
'white grey floor tile mosiac': 'white grey floor tile mosaic',
'minwax wood puty': 'minwax wood putty',
'the govenore': 'the governor',
'diverter 5 in. tub spout with slip fit connection in chrom': 'diverter 5 in. tub spout with slip fit connection in chrome',
'vinyl plank blue slatr': 'vinyl plank blue slate',
'frameless shwoer panel': 'frameless shower panel',
'virtue usa huntshire': 'virtu usa huntshire',
'3.5 Hindge': '3.5 Hinge',
'round plastic tablrs': 'round plastic tables',
'paint storage contaiers': 'paint storage containers',
'centerset 2-handle weall': 'centerset 2-handle wall',
'wax ring with self taping bolts': 'wax ring with self tapping bolts',
'gama sonic winsor pier base': 'gama sonic windsor pier base',
'pilla windows': 'pella windows',
'dresser acessories': 'dresser accessories',
'duel compression 1/2 x 3/8 valve': 'dual compression 1/2 x 3/8 valve',
'american atanderd plebe 4086': 'american standard plebe 4086',
'dyson ball allergy vaccume': 'dyson ball allergy vacuum',
'low woltage relay': 'low voltage relay',
'hand steam cleanere': 'hand steam cleaner',
'eiectric concrte mixer': 'electric concrete mixer',
'pemco sill extender': 'pemko sill extender',
'silver branzing rods': 'silver brazing rods',
'sanding beltsr': 'sanding belts',
'dorr faceplates': 'door faceplates',
'stainless steel ball beating for hinges': 'stainless steel ball bearing for hinges',
'stabilty': 'stability',
'hose bibb replacement valve': 'hose bib replacement valve',
'long shower curtins': 'long shower curtains',
'crub rubber': 'crumb rubber',
'swivel saftey cover': 'swivel safety cover',
'makita oscilating saw': 'makita oscillating saw',
'whithaus faucet speckled brass': 'whitehaus faucet speckled brass',
'energy efficent skylight': 'energy efficient skylight',
'garden seed packs': 'garden seed packets',
'boshe double bevel sliding miter saw': 'bosch double bevel sliding miter saw',
'taylor test lit': 'taylor test kit',
'chargril grill': 'charbroil grill',
'over ran': 'overran',
'recipricating saw 15 amp': 'reciprocating saw 15 amp',
'mikita 18v 2.6 ah': 'makita 18v 2.6 ah',
'no burn spry': 'no burn spray',
'cuctis soil': 'cactus soil',
'brushed stainless cabin ate hardware': 'brushed stainless cabinet hardware',
'fork lift strps': 'forklift straps',
'electrian': 'electrician',
'doorbell chimes and transformr': 'doorbell chimes and transformer',
'faux diamondplate': 'faux diamond plate',
'milstead vintage maple engineered flooring': 'millstead vintage maple engineered flooring',
'ce tech coaxial cablenail in clips': 'ce tech coaxial cable nail in clips',
'bq heat distributipn plates': 'bbq heat distribution plates',
'metal lath stuko': 'metal lath stucco',
'cord less drill portcable': 'cordless drill porter cable',
'round bulb sting lights': 'round bulb string lights',
'lp coversion kit maytag dryer': 'lp conversion kit maytag dryer',
'chase lounger covers': 'chaise lounge covers',
'insl-x pure step': 'insl-x sure step',
'gerber knife tactiical': 'gerber knife tactical',
'deecals number': 'decals number',
'hampton bat 26\'. w tilt out hamper white': 'hampton bay 26\'. w tilt out hamper white',
'outdoor chritstmas light remote': 'outdoor christmas light remote',
'wood fuelpellets': 'wood fuel pellets',
'cpipe lamp': 'pipe lamp',
'wiemans stainless cleaner': 'weimans stainless cleaner',
'10 roll up outside blinds': '10 roll up outdoor blinds',
'wainscote': 'wainscot',
'heat resistant spicket': 'heat resistant spigot',
'garage shelve': 'garage shelf',
'shevles': 'shelves',
'storage shelfs': 'storage shelves',
'proipane': 'propane',
'ventless gas heters': 'ventless gas heaters',
'vinal fencing': 'vinyl fencing',
'toliet bowl': 'toilet bowl',
'toliet bowl wrench': 'toilet bowl wrench',
'fanc wire': 'fancy wire',
't post fence assesories': 't post fence accessories',
'telescooping ladder': 'telescoping ladder',
'spring haven brown all weather wicked': 'spring haven brown all weather wicker',
'36 exterior steele door': '36 exterior steel door',
'faucetskitchen': 'faucets kitchen',
'batt insulatiom': 'batt insulation',
'congolium': 'congoleum',
'vinal flooring': 'vinyl flooring',
'vynil floorring': 'vinyl flooring',
'clacier bay toliet': 'glacier bay toilet',
'GLAZER BAY TOILET': 'GLACIER BAY TOILET',
'norton hot water heater ingniter': 'norton hot water heater igniter',
'undercounter lighs': 'under counter lights',
'stainless refridgerator': 'stainless refrigerator',
'stainless steel refridgerator': 'stainless steel refrigerator',
'window ac manuel operation': 'window ac manual operation',
'rustolem': 'rustoleum',
'18v drill brushles': '18v drill brushless',
'dining sets outdo?': 'dining sets outdoor?',
'eat resistant epoxy': 'heat resistant epoxy',
'cordless drils': 'cordless drills',
'3 piece bfush set': '3 piece brush set',
'kitchen faucet installtion tools': 'kitchen faucet installation tools',
'Moen Kitchen sink fauset': 'Moen Kitchen sink faucet',
'plaqstic bucket': 'plastic bucket',
'3m winow film': '3m window film',
'water softner': 'water softener',
'flourescent light bulp': 'fluorescent light bulb',
'closermaid cabinet': 'closetmaid cabinet',
'raised panel extirior doors': 'raised panel exterior doors',
'blcktop repair kit': 'blacktop repair kit',
'peal and stick flashning': 'peel and stick flashing',
'marshaltown 6 inch': 'marshalltown 6 inch',
'vynel wall tiles': 'vinyl wall tiles',
'presusre treated post': 'pressure treated post',
'LAWN LEAF VACUM': 'LAWN LEAF VACUUM',
'space heatres': 'space heaters',
'alumium fence 6 ft 6ft': 'aluminum fence 6 ft 6 ft',
'bathroom sinks kholer': 'bathroom sinks kohler',
'pedistal': 'pedestal',
'clear eppoxy': 'clear epoxy',
'wood fir plank flooring': 'wood for plank flooring',
'quickcrete waterproof cement': 'quikrete waterproof cement',
'rood rake': 'roof rake',
'propane gas tank meater': 'propane gas tank meter',
'ac cooling fin straightenrs': 'ac cooling fin straightener',
'slidng panel lock': 'sliding panel lock',
'closet maiid cabinets': 'closet maid cabinets',
'ridge power tools combo packs': 'ridgid power tools combo packs',
'backsplash tiiles': 'backsplash tiles',
'cabinet knobsd': 'cabinet knobs',
'cabnet knobs': 'cabinet knobs',
'dealt air compressor parts': 'dewalt air compressor parts',
'spgot': 'spigot',
'paver bricks scolloped': 'paver bricks scalloped',
'CHASE LOUNGE': 'CHAISE LOUNGE',
'layndry tu': 'laundry tu',
'submeribale pedistal sump pump': 'submersible pedestal sump pump',
'celling fans': 'ceiling fans',
'wall sconse': 'wall sconce',
'93 inch widecellular shades': '93 inch wide cellular shades',
'post white ligth': 'post white light',
'palmero brushed nickel ceiling fan': 'palermo brushed nickel ceiling fan',
'aromaticeatern red cedar planking': 'aromatic eastern red cedar planking',
'black and decker hobby crafter': 'black and decker hobbycrafter',
'front load fridaire': 'front load frigidaire',
'pedestial washer': 'pedestal washer',
'whilrpool front loader washer': 'whirlpool front loader washer',
'extrior louvored wood door 30x80': 'exterior louvered wood door 30x80',
'interior doorser': 'interior doors',
'dill battery 12v model g0805': 'drill battery 12v model g0805',
'10 stair lader': '10 stair ladder',
'milwakee 1/2 impact cordless': 'milwaukee 1/2 impact cordless',
'kolher': 'kohler',
'floor slealer': 'floor sealer',
'high traffic floor polurethane paint': 'high traffic floor polyurethane paint',
'sawzall blades miluakee': 'sawzall blades milwaukee',
'vaccum hose': 'vacuum hose',
'vynal repalcement windows': 'vinyl replacement windows',
'vinil for flors': 'vinyl for floors',
'led withe': 'led white',
'squar flushmount lights': 'square flush mount lights',
'huskey 18': 'husky 18',
'remove oder from kerosine': 'remove odor from kerosene',
'25ft huskt tape': '25 ft husky tape',
'plastic corrougeted roofing': 'plastic corrugated roofing',
'kholerhighland white toilet': 'kohler highline white toilet',
'toilet seat for briggs toliet': 'toilet seat for briggs toilet',
'steel shelve': 'steel shelf',
'dig irritation drip': 'dig irrigation drip',
'kohler pedastal sink': 'kohler pedestal sink',
'high loss natural jabota': 'high loss natural jatoba',
'Huskavarna': 'Husqvarna',
'power cordclass 2 power model xy_2900600_u': 'power cord class 2 power model xy_2900600_u',
'treaated plywood': 'treated plywood',
'air condtioning wall unit': 'air conditioning wall unit',
'wall air conditioneer': 'wall air conditioner',
'window ac insaller': 'window ac installer',
'sensor porch ligts': 'sensor porch lights',
'miricile applet or and tray': 'miracle applet or and tray',
'paint refil tray': 'paint refill tray',
'door knobs exteria': 'door knobs exterior',
'exhaustless portable airconditioner': 'exhaustless portable air conditioner',
'portable aircondition': 'portable air conditioner',
'oscilliating too': 'oscillating tool',
'PYWOOD': 'PLYWOOD',
'rigid nailer': 'ridgid nailer',
'bankoft toilet biscuit': 'bancroft toilet biscuit',
'mown pull down faucet': 'moen pull down faucet',
'lo gas water heater': 'low gas water heater',
'richman water heater': 'richmond water heater',
'tall toliet': 'tall toilet',
'ridding mower covers': 'riding mower covers',
'hole angel jig': 'hole angle jig',
'10 deep kitchen sink porcelin': '10 deep kitchen sink porcelain',
'plastic tiles pcv': 'plastic tiles pvc',
'vinyl sheeti': 'vinyl sheet',
'samsungelectric ranges': 'samsung electric ranges',
'frameless shoer doors': 'frameless shower doors',
'webber charcoal grill': 'weber charcoal grill',
'kerosine heaters': 'kerosene heaters',
'kersone heaters': 'kerosene heaters',
'propain heater': 'propane heater',
'heating elements for dyer whirlpool': 'heating elements for dryer whirlpool',
'safty glasses': 'safety glasses',
'eletric stove': 'electric stove',
'Schecule 40 Pipe': 'Schedule 40 Pipe',
'bayonett saw blades': 'bayonet saw blades',
'sconses': 'sconces',
'52\' pinacle ceiling fan': '52\' pinnacle ceiling fan',
'atic fans with lubers': 'attic fans with louvers',
'cealing fans': 'ceiling fans',
'hampton bay out door celing fan': 'hampton bay outdoor ceiling fan',
'out door celing fan': 'outdoor ceiling fan',
'kitchen exaust fan': 'kitchen exhaust fan',
'Cimmaron': 'Cimarron',
'fridgedaire': 'frigidaire',
'frigidaire washer door striker/catch': 'frigidaire washer door striker/latch',
'lawn mover wrench': 'lawn mower wrench',
'bmboo lattecie': 'bamboo lattice',
'1 handle tub and shower faucet shower and tub vlaves': '1 handle tub and shower faucet shower and tub valves',
'hansgroph faucets bathroom': 'hansgrohe faucets bathroom',
'led light bulbsbulbs': 'led light bulbs bulbs',
'landscape srone': 'landscape stone',
'braid nailer combo kit': 'brad nailer combo kit',
'doors for mobilhomes': 'doors for mobile homes',
'smaller closet lights': 'small closet lights',
'traficmaster': 'trafficmaster',
'hardi board smooth': 'hardie board smooth',
'wainscoating': 'wainscoting',
'galvanisedround fire pit ring': 'galvanized round fire pit ring',
'electrichot water heaters residential': 'electric hot water heaters residential',
'garage shelf unjit': 'garage shelf unit',
'stone baxksplash': 'stone backsplash',
'pendent cealing fixture': 'pendant ceiling fixture',
'undercabinet ligghts': 'under cabinet lights',
'martha stewartcabinet pull': 'martha stewart cabinet pull',
'4 fluorescant fixture covers': '4 fluorescent fixture covers',
'exterior vanyl french door': 'exterior vinyl french door',
'adheasive': 'adhesive',
'lineulium floor': 'linoleum floor',
'plexiglass selves': 'plexiglass shelves',
'Allure mellowood flooring': 'Allure mellow wood flooring',
'allure tile sedon?': 'allure tile sedona?',
'allure vinyl tilecordoba': 'allure vinyl tile cordoba',
'wood veener facing for kitchen cabinets': 'wood veneer facing for kitchen cabinets',
'painters plastice': 'painters plastic',
'granitne sealer': 'granite sealer',
'55 inch cultured marble vanity tope': '55 inch cultured marble vanity top',
'mirros': 'mirrors',
'garge floor paint': 'garage floor paint',
'weather indoor and outpoor temp': 'weather indoor and outdoor temp',
'ryobi blower with batery': 'ryobi blower with battery',
'powerwasher hose': 'power washer hose',
'mikita 9.5 volt drill': 'makita 9.5 volt drill',
'vinal fence straps': 'vinyl fence straps',
'black chandelier wjth black shades': 'black chandelier with black shades',
'medecine cabinet': 'medicine cabinet',
'medicient cabinet': 'medicine cabinet',
'serface mount medicine cabinets': 'surface mount medicine cabinets',
'husqvarna presure washer': 'husqvarna pressure washer',
'back yard weather forecasteer': 'backyard weather forecaster',
'chain link fenceing': 'chain link fencing',
'jogsaw tool': 'jigsaw tool',
'lg ruff wall instalation': 'lg ruff wall installation',
'pcv pipe sement': 'pvc pipe cement',
'hardi trim': 'hardietrim',
'vynal siding insol': 'vinyl siding insol',
'cheapete gas 40 gallon hot water heater': 'cheapest gas 40 gallon hot water heater',
'powervent water heater': 'power vent water heater',
'exterieur door 32 inch': 'exterior door 32 inch',
'vynal floor matting': 'vinyl floor matting',
'door knobsw': 'door knobs',
'black decke weed eaters': 'black decker weed eaters',
'lectric string trimmer cst1200r': 'electric string trimmer cst1200r',
'1.4 mircowave over the stove': '1.4 microwave over the stove',
'stove excaust fan': 'stove exhaust fan',
'mobile home extior doors': 'mobile home exterior doors',
'wood lathesw': 'wood lathes',
'anderson replacement double hung window 34.5x36.5': 'andersen replacement double hung window 34.5x 36.5',
'contrcator baseboard': 'contractor baseboard',
'moehn kitchen facet 87211srssd': 'moen kitchen faucet 87211srs',
'repare kit for 2-handle side sprayer kitchen faucet': 'repair kit for 2-handle side sprayer kitchen faucet',
'ecco friendly garden hose': 'eco friendly garden hose',
'flex gardn hose': 'flex garden hose',
'garden host 50': 'garden hose 50',
'bathroon lighting': 'bathroom lighting',
'lanscape timber': 'landscape timber',
'bathroom valnity lights': 'bathroom vanity lights',
'gas pressure regular': 'gas pressure regulator',
'ashely 48 in electric chi': 'ashley 48 in electric chi',
'2x6 treted 8ft long': '2x6 treated 8ft long',
'wheel borrow': 'wheelbarrow',
'whellbarrow': 'wheelbarrow',
'scement bags': 'cement bags',
'accordian door': 'accordion door',
'Electic Lawn Mowers': 'Electric Lawn Mowers',
'hampton bay cabinetscornor cabinetupper': 'hampton bay cabinets corner cabinet upper',
'electric pump for sprying': 'electric pump for spraying',
'front foor 2 siding': 'front door 2 siding',
'whirlpool lgas dryer': 'whirlpool gas dryer',
'pressure treated lumber spaint': 'pressure treated lumber paint',
'rhee. 40 gallon water heaters': 'rheem. 40 gallon water heaters',
'8x96 white decrotive shelf': '8x96 white decorative shelf',
'bathroom pendastal': 'bathroom pedestal',
'r25/r30 faced insullation': 'r25/r30 faced insulation',
'heavy dutty letter support': 'heavy duty letter support',
'ceder decking': 'cedar decking',
'negitave air machine': 'negative air machine',
'outdoor maouse traps': 'outdoor mouse traps',
'storeage shed': 'storage shed',
'car canoply': 'car canopy',
'commerical tile': 'commercial tile',
'1 1/2 colated rock screws': '1 1/2 collated rock screws',
'sheeet rock mud': 'sheetrock mud',
'counterdepth fridge': 'counter depth fridge',
'maytag refregirator': 'maytag refrigerator',
'whirlpool french door frig 30 wide': 'whirlpool french door fridge 30 wide',
'wirlpool 30 wide french door': 'whirlpool 30 wide french door',
'dleta shower faucet handles': 'delta shower faucet handles',
'38 grainte composit sink': '38 granite composite sink',
'blown in insulaation': 'blown in insulation',
'foam insulatino': 'foam insulation',
'doors interiorwith door jams': 'doors interior with door jams',
'residentialsteel door and frame': 'residential steel door and frame',
'wood swimg set kits': 'wood swing set kits',
'quickcrete resurfacer': 'quikrete resurfacer',
'2 inch srew cap': '2 inch screw cap',
'30 gar builtin ranges': '30 gas built in ranges',
'samsong stive': 'samsung stove',
'chissel': 'chisel',
'rigid compound miter saw': 'ridgid compound miter saw',
'rigid compound miter saw dust pouch': 'ridgid compound miter saw dust pouch',
'shampoo and lotion automatice dispenser': 'shampoo and lotion automatic dispenser',
'wall scone': 'wall sconce',
'rubber for refridgerators': 'rubber for refrigerators',
'water proofing shower membrame': 'waterproofing shower membrane',
'fridigdaire back gas range': 'frigidaire black gas range',
'cabrio dryder': 'cabrio dryer',
'whilrpool cabrio dryer': 'whirlpool cabrio dryer',
'light switcht sensor': 'light switch sensor',
'calutta marble laminate countertop': 'calcutta marble laminate countertop',
'vinylcorner boards 4 inch': 'vinyl corner boards 4 inch',
'plastix box': 'plastic box',
'scurity screen doors': 'security screen doors',
'nonadhesive vinyl flooring': 'non adhesive vinyl flooring',
'trafficmaster interloclk': 'trafficmaster interlock',
'anntenias': 'antennas',
'clothes dryer srand': 'clothes dryer stand',
'eletric water heater': 'electric water heater',
'sharkbike push to connect 3/4': 'sharkbite push to connect 3/4',
'fuel nozzle furnance': 'fuel nozzle furnace',
'ryobi one batery': 'ryobi one battery',
'5/8 floring plywood weatherproof': '5/8 flooring plywood weatherproof',
'mitter saw manual': 'miter saw manual',
'selenoid for dryer': 'solenoid for dryer',
'presure coated wood': 'pressure coated wood',
'composote lumber': 'composite lumber',
'14 awgsoilid wire': '14 awg solid wire',
'welded wire fenching 12 gauge': 'welded wire fencing 12 gauge',
'patio chair cusions': 'patio chair cushions',
'viynl patches': 'vinyl patches',
'7 in. stove pie': '7 in. stove pipe',
'whirlpoolgas stove': 'whirlpool gas stove',
'whirpool microwave 1.4 cu ft': 'whirlpool microwave 1.4 cu ft',
'whirpool refrigerator': 'whirlpool refrigerator',
'3\' nailes': '3\' nails',
'nailer tooal': 'nailer tool',
'weed barier': 'weed barrier',
'oped garage door indicator': 'open garage door indicator',
'styrafoam': 'styrofoam',
'10 foot step laddert': '10 foot step ladder',
'3 1/2 hardwar': '3 1/2 hardware',
'double control shower vavle': 'double control shower valve',
'replacement shower encosure rod': 'replacement shower enclosure rod',
'baby gurad gate': 'baby guard gate',
'joint compund light weight': 'joint compound lightweight',
'sheetrock high preformance joint compound': 'sheetrock high performance joint compound',
'1x2 appearnce boards': '1x2 appearance boards',
'lumber 2x8 composit': 'lumber 2x8 composite',
'floot ball': 'float ball',
'dewalt empact driver': 'dewalt impact driver',
'bosh cordless combo set': 'bosch cordless combo set',
'ryobi 18v battwery': 'ryobi 18v battery',
'kihchen cabinet slidr shelves': 'kitchen cabinet slide shelves',
'chesnut border edging': 'chestnut border edging',
'outdoor seat cushions 24.5 whte': 'outdoor seat cushions 24.5 white',
'12x12 tile msaic': '12x12 tile mosaic',
'skill screwdriver battery': 'skil screwdriver battery',
'manual for airens lawnmower': 'manual for ariens lawn mower',
'gas stabilisor': 'gas stabilizer',
'4 x 4 white pocelain tile': '4 x 4 white porcelain tile',
'rigid pipe cutter': 'ridgid pipe cutter',
'24 regrigerators': '24 refrigerators',
'refrigerato 33 inch wide': 'refrigerator 33 inch wide',
'smudge proof stainless steele': 'smudge proof stainless steel',
'whirpool amana': 'whirlpool amana',
'moen banbury 24 in. doubletowel bar': 'moen banbury 24 in. double towel bar',
'4\' r;ubber top set base': '4\' rubber top set base',
'extension springes': 'extension springs',
'grass string trimmer electric homelight': 'grass string trimmer electric homelite',
'craftman style lights': 'craftsman style lights',
'glacier bay delmare expresso wall mirror': 'glacier bay del mar espresso wall mirror',
'dollie 600 lbs': 'dolly 600 lbs',
'patio tille': 'patio tile',
'eucalptus white board': 'eucalyptus white board',
'vynal tile': 'vinyl tile',
'heat reducing window flim': 'heat reducing window film',
'Porach Light': 'Porch Light',
'brissell zing vacuum bags': 'bissell zing vacuum bags',
'toillet': 'toilet',
'kitchen aid refrigirator light bulb:': 'kitchenaid refrigerator light bulb:',
'chadelier': 'chandelier',
'cararra marble': 'carrara marble',
'coedless makita chainsaw with batteries': 'cordless makita chainsaw with batteries',
'mikita cordless drill': 'makita cordless drill',
'antique brass hindges for doors': 'antique brass hinges for doors',
'riobi battery': 'ryobi battery',
'feerzer': 'freezer',
'schlade wirell door lock': 'schlage wireless door lock',
'water proff board': 'waterproof board',
'celing light holder': 'ceiling light holder',
'wood toold': 'wood tools',
'4 inch insolation': '4 inch insulation',
'Urehtane Foam Sheet': 'Urethane Foam Sheet',
'4 center lavatory facuet': '4 center lavatory faucet',
'Shower facuet': 'Shower faucet',
'electric dyrer heater elemnet': 'electric dryer heater element',
'milluakee drill bits': 'milwaukee drill bits',
'scrren wire': 'screen wire',
'safegaurd 30 synthetic felt': 'safeguard 30 synthetic felt',
'hampden bay chandelier': 'hampton bay chandelier',
'1/2 inch pnumatic stapler': '1/2 inch pneumatic stapler',
'12\' firetreat 2x4': '12\' fire treated 2x4',
'american-standarfairfield elongated one-piece 1.6 gpf toilet': 'american-standard fairfield elongated one-piece 1.6 gpf toilet',
'toilet aquaia': 'toilet aquia',
'Comercial electric': 'Commercial electric',
'light puff defuser': 'light puff diffuser',
'ryobi drill prass': 'ryobi drill press',
'110v ectric dryers': '110v electric dryers',
'FIRE RESTISTANT BOARD': 'FIRE RESISTANT BOARD',
'vinyle plankj': 'vinyl plank',
'cordless backpack vaccume': 'cordless backpack vacuum',
'hampton baysolar bird lights': 'hampton bay solar bird lights',
'kohler chair height elongated toliet': 'kohler chair height elongated toilet',
'electic fireplace': 'electric fireplace',
'hampton bay jmestown': 'hampton bay jamestown',
'surfacemount kitchen sink': 'surface mount kitchen sink',
'rigid wet nozzelsqueegee': 'ridgid wet nozzle squeegee',
'vacumns': 'vacuums',
'gble vent': 'gable vent',
'ventalation': 'ventilation',
'biinds and shades': 'blinds and shades',
'copact drills cordless': 'compact drills cordless',
'ridge 18v hammer': 'ridgid 18v hammer',
'heavy dutty garden hose': 'heavy duty garden hose',
'1/2\' extirior plywood': '1/2\' exterior plywood',
'gutter water reflector': 'gutter water deflector',
'under cabinet led light accesory pack': 'under cabinet led light accessory pack',
'armstroung floor adhesive': 'armstrong floor adhesive',
'whirlpoolstainless steel refrig': 'whirlpool stainless steel refrig',
'black and decker elctric': 'black and decker electric',
'cordless edgere': 'cordless edger',
'white electrtical outlets': 'white electrical outlets',
'tan unmbrella': 'tan umbrella',
'gothic fence picketts': 'gothic fence pickets',
'vinyl 1 bilnd': 'vinyl 1 blinds',
'console tab;le': 'console table',
'T-5 florescent light fixtures': 'T-5 fluorescent light fixtures',
'royobi pedestal grinder wheel': 'ryobi pedestal grinder wheel',
'wall panaling': 'wall paneling',
'PORCH STAIR RAILLING': 'PORCH STAIR RAILING',
'micro fibe': 'microfiber',
'champion toliet part': 'champion toilet parts',
'rr vaccum filter': 'rr vacuum filter',
'exhust fan': 'exhaust fan',
'corragated metal': 'corrugated metal',
'gasolene generaters and inverters': 'gasoline generators and inverters',
'stailess steel top stoves': 'stainless steel top stoves',
'top freezer refrigeratot': 'top freezer refrigerator',
'3/4 inche rock': '3/4 inch rock',
'12 roofing pannel': '12 roofing panel',
'blakck in decker edger': 'black and decker edger',
'tile scrapper': 'tile scraper',
'brick morter': 'brick mortar',
'cement blodks': 'cement blocks',
'unmortified mortor': 'unmodified mortar',
'bifold door hardw': 'bifold door hardware',
'metal scerews': 'metal screws',
'sliding doos for backyard': 'sliding doors for backyard',
'screen fame corner': 'screen frame corner',
'electric lawn mowerectrical': 'electric lawn mower electrical',
'clacer bay all n one sink': 'glacier bay all in one sink',
'sola water fountain': 'solar water fountain',
'closet clothes rackclosetmaid': 'closet clothes rack closetmaid',
'passload': 'paslode',
'kitchen tile backspl': 'kitchen tile backsplash',
'viyle fencing': 'vinyl fencing',
'flexible tourche extension': 'flexible torch extension',
'6 pnl molded': '6 panel molded',
'soild core flush pre hung door': 'solid core flush prehung door',
'convction heater': 'convection heater',
'closet orginizer shoe rack wire': 'closet organizer shoe rack wire',
'freesstanding': 'free standing',
'mmirror closet doors': 'mirror closet doors',
'maratha stewart monogram wreath': 'martha stewart monogram wreath',
'edsel heavy duty 5': 'edsal heavy duty 5',
'11 ft extension cord groud': '11 ft extension cord ground',
'indoor/otdoor extensions cords e176194': 'indoor/outdoor extension cords e176194',
'outdoor extention cords e': 'outdoor extension cords e',
'unface insulation 23 inches wide': 'unfaced insulation 23 inches wide',
'porble toilets': 'portable toilets',
'toilet saftey seat': 'toilet safety seat',
'silca sand': 'silica sand',
'tall 18 in storage cabnet': 'tall 18 in storage cabinet',
'20x8 storge shed': '20 x 8 storage shed',
'rubbermade shed': 'rubbermaid shed',
'rubbermaid resin storage cabnetsn': 'rubbermaid resin storage cabinets',
'cedar wod chips': 'cedar wood chips',
'hidraulic tools': 'hydraulic tools',
'celing fans with lighting and remote': 'ceiling fans with lighting and remote',
'fridigidaire drop in oven': 'frigidaire drop in oven',
'tub surround pices': 'tub surround prices',
'allure flooring oak expresso': 'allure flooring oak espresso',
'pass and seymore light cover switch': 'pass and seymour light cover switch',
'28x54 replacment window': '28x54 replacement windows',
'anderson windows new constraction': 'anderson windows new construction',
'swamp oolers': 'swamp coolers',
'wahing machines': 'washing machines',
'interior primed mdf crown mouldin': 'interior primed mdf crown moulding',
'built in convectionoven': 'built in convection oven',
'flpwers for your garden': 'flowers for your garden',
'closetr rod': 'closet rod',
'unfinished wide bplanked hickory flooring': 'unfinished wide plank hickory flooring',
'48v to 110 invertor': '48v to 110v inverter',
'landscape wateting': 'landscape watering',
'sockets for fluorescence fixtres': 'sockets for fluorescent fixtures',
'woodceramic floor tile': 'wood ceramic floor tile',
'brigsg and stations 500 seris': 'briggs and stations 500 series',
'green carpert': 'green carpet',
'pressure treated step tread 6ft': 'pressure treated stair tread 6ft',
'hand pump gfor water': 'hand pump for water',
'rutic lighting': 'rustic lighting',
'cender blocks': 'cinder blocks',
'talsrar': 'talstar',
'rybi power tools': 'ryobi power tools',
'portercable 6 gal': 'porter cable 6 gal',
'table covers waterproff': 'table covers waterproof',
'solid alium square tubing': 'solid aluminum square tubing',
'deck post jhardware': 'deck post hardware',
'hunter new bronzel fans': 'hunter new bronze fans',
'16d framin': '16d framing',
'moen brushed nickel batharoom': 'moen brushed nickel bathroom',
'barriar plastic': 'barrier plastic',
'window ac/hehat units': 'window ac/heat units',
'icycle lights': 'icicle lights',
'4 gallon expanion': '4 gallon expansion',
'floor mount lawndry seek': 'floor mount laundry sink',
'high addhesion primer': 'high adhesion primer',
'24 gauge wire connectorsa': '24 gauge wire connectors',
'sterio wire for indoor speakers': 'stereo wire for indoor speakers',
'garage bicyclestorage': 'garage bicycle storage',
'how mustall tankless water heater': 'how install tankless water heater',
'chelsea white acrylic oval in rectangl': 'chelsea white acrylic oval in rectangle',
'cleaning jeta for whirlpool': 'cleaning jets for whirlpool',
'bathroom faucet replacment valve': 'bathroom faucet replacement valve',
'3x5 cemet board': '3x5 cement board',
'vaccumm': 'vacuum',
'ghroe shower headstrong shower heads': 'grohe shower headstrong shower heads',
'mial boxes': 'mail boxes',
'claw tups': 'claw tips',
'facia corner brace': 'fascia corner brace',
'pegisas sink top': 'pegasus sink top',
'mirroes for doors': 'mirrors for doors',
'counter depth refridgidere': 'counter depth refrigerator',
'corrigaed fiberglass roofing': 'corrugated fiberglass roofing',
'window airconditionerwith heaters': 'window air conditioners with heaters',
'extention rail for opener': 'extension rail for opener',
'whitecomposite fascia board': 'white composite fascia board',
'vanity topp 31 white': 'vanity top 31 white',
'underhood range fan': 'under hood range fan',
'price pfister trevisa': 'price pfister treviso',
'milwaukee cordlees tools': 'milwaukee cordless tools',
'pendent light': 'pendant light',
'pre-emergent weed contro': 'pre-emergent weed control',
'is this item in stoes?': 'is this item in store?',
'door home secutity': 'door home security',
'3oo watt haalogen bulbs': '3oo watt halogen bulbs',
'96 in flourescent bulbs': '96 in fluorescent bulbs',
'shop ceiling fane': 'shop ceiling fan',
'aaa batteries everready gold': 'aaa batteries eveready gold',
'buth tub faucet': 'bathtub faucet',
'delta montecello tub faucet': 'delta monticello tub faucet',
'ge spring water heater': 'geospring water heater',
'ge water heater egnighter': 'ge water heater igniter',
'31x19 one piecs bathroom sink': '31x19 one piece bathroom sink',
'replacment clips for wire rack': 'replacement clips for wire rack',
'ac air diverer': 'ac air diverter',
'3 sewer pipce': '3 sewer pipe',
'3\' electical pipe': '3\' electrical pipe',
'large outside horizontal storage shed': 'large outdoor horizontal storage shed',
'swing hangar hardware': 'swing hanger hardware',
'dim able balafon flood light': 'dimmable balafon flood light',
'phillips exterior led': 'philips exterior led',
'banity 11 watt light bulb': 'vanity 11 watt light bulb',
'kithchen install': 'kitchen install',
'magnet stainless steel for diswasher': 'magnet stainless steel for dishwasher',
'phone spliter': 'phone splitter',
'receptical': 'receptacle',
'water resistent electrical outlets': 'water resistant electrical outlets',
'kitchenaid superb oven': 'kitchenaid superba oven',
'403esprit 2x4 ceing tile': '403 esprit 2x4 ceiling tile',
'wall excess panel': 'wall access panel',
'drop celing tiles': 'drop ceiling tiles',
'pvc drop in celing tiles': 'pvc drop in ceiling tiles',
'pl gas hose': 'lp gas hose',
'12 v landscaping ligtening fixture': '12v landscape lighting fixture',
'behr white external semigloss paint': 'behr white exterior semi gloss paint',
'GRAGE DOOR OPENER': 'GARAGE DOOR OPENER',
'grage doors': 'garage doors',
'24 inch med oak base': '24 inch medium oak base',
'okeefes working hands': 'o\'keeffe\'s working hands',
'phenofin': 'penofin',
'8 foot galvinezed': '8 foot galvanized',
'12 mobil home air duct': '12 mobile home air duct',
'door hinges for americana refrigator': 'door hinges for americana refrigerator',
'tub drain kit bronz': 'tub drain kit bronze',
'halligon light bulb': 'halogen light bulb',
'husky rachet': 'husky ratchet',
'andersen vnyl windows': 'andersen vinyl windows',
'balwind double cilynder lock': 'baldwin double cylinder lock',
'drop down ceiling ppanel': 'drop down ceiling panel',
'arearugs and mats': 'area rugs and mats',
'dark expresso paint for wood': 'dark espresso paint for wood',
'melamine shelvees': 'melamine shelves',
'mosaic whitel and black tile': 'mosaic white and black tile',
'8 wre wheel': '8 wire wheel',
'9\' plna replament blade': '9\' plane replacement blade',
'saw zall blades': 'sawzall blades',
'pain pot': 'paint pot',
'drain cleaneraner machines': 'drain cleaner machines',
'anderson storm doors pet': 'andersen storm doors pet',
'basement window replacement insructions': 'basement window replacement instructions',
'grill cover brinkman double grill': 'grill cover brinkmann double grill',
'gerber daisies': 'gerbera daisies',
'gerber daisy': 'gerbera daisy',
'exterior wood stainolid color': 'exterior wood stain color',
'2700 br30 led': '2700k br30 led',
'3m wheather stripping': '3m weather stripping',
'barn doorhinges': 'barn door hinges',
'plywood progect': 'plywood project',
'28 guage screen': '28 gauge screen',
'lampsade pendent light': 'lamp shade pendant light',
'kitchen cabiner corner': 'kitchen cabinet corner',
'paatio swings': 'patio swings',
'12 bar chian for echo': '12 bar chain for echo',
'bix max 7x7': 'big max 7x7',
'bathtub faucethandle replacement parts': 'bathtub faucet handle replacement parts',
'prelit spiral trees': 'pre lit spiral trees',
'12 sthel chainsaws': '12 stihl chainsaws',
'10 ft drain house': '10 ft drain hose',
'american standard tiolet flappers': 'american standard toilet flappers',
'solar out doors post lights': 'solar outdoor post lights',
'kitchen cabinet with counertop': 'kitchen cabinet with countertop',
'Painting Cabniets': 'Painting Cabinets',
'18x18 teracota porcelain floor tiles': '18x18 terracotta porcelain floor tiles',
'drywal': 'drywall',
'pencle trim tile': 'pencil trim tile',
'vinyl latice': 'vinyl lattice',
'angle findeer': 'angle finder',
'laminate tile comercial': 'laminate tile commercial',
'couner deep refrigerators': 'counter deep refrigerators',
'chritmas tree': 'christmas tree',
'plug in carbon monoxcide': 'plug in carbon monoxide',
'cabinet handels': 'cabinet handles',
'frigidair drop in': 'frigidaire drop in',
'7\' hex hed bolt': '7\' hex head bolt',
'vent fllters': 'vent filters',
'horizontall': 'horizontal',
'3 x 6 blace tile': '3 x 6 black tile',
'rostoluem spray paint': 'rustoleum spray paint',
'power drill battery an charger': 'power drill battery and charger',
'rayobi blue charger': 'ryobi blue charger',
'robyi': 'ryobi',
'5/4 pressure treaded decking': '5/4 pressure treated decking',
'white carrara herring bome': 'white carrara herringbone',
'sailr blue': 'sailor blue',
'charbroil classic': 'char broil classic',
'14 electric concrete saw with vc-u dch300': '14 electric concrete saw with vac-u dch 300',
'potable air conditioners': 'portable air conditioners',
'fin heating tubeing': 'fin heating tubing',
'fine/line baseboarrd': 'fine/line baseboard',
'hot water heating eliment': 'hot water heating element',
'toiet': 'toilet',
'hole house fan': 'whole house fan',
'montaga bay tile': 'montego bay tile',
'40 gal liquid propan': '40 gal liquid propane',
'4 x 4 pos cap': '4x4 post cap',
'white quartz cointertop': 'white quartz countertop',
'elongated bone toilest': 'elongated bone toilet',
'white acryl paint': 'white acrylic paint',
'foundstion vents': 'foundation vents',
'sqeaky carpet stair kit': 'squeaky carpet stair kit',
'defusiers for floors': 'diffusers for floors',
'8\' galvanized roll top edginh': '8\' galvanized roll top edging',
'marithon water heater element': 'marathon water heater element',
'wirerless light switch': 'wireless light switch',
'moen posi-temp tim kit': 'moen posi-temp trim kit',
'shower dooroil rubbed bronze': 'shower door oil rubbed bronze',
'wireing': 'wiring',
'kitchen aid architecs series 11': 'kitchenaid architect series 11',
'wall oven combon': 'wall oven combo',
'survival babkpack': 'survival backpack',
'wire dstaples': 'wire staples',
'4in drain gratewhite': '4in drain grate white',
'shitch cover': 'switch cover',
'vitarera quartz': 'viatera quartz',
'5/8-in masonary drill bit': '5/8-in masonry drill bit',
'brinkman grill grates': 'brinkmann grill grates',
'pest repellant': 'pest repellent',
'bathun drain plunger': 'bathtub drain plunger',
'incounter gas cook range': 'encounter gas cook range',
'peat moss bails': 'peat moss bales',
'3-piece bath accessory kit in chrom': '3-piece bath accessory kit in chrome',
'alameda hickey laminate': 'alameda hickory laminate',
'flooring moisture barier': 'flooring moisture barrier',
'vinylcove base': 'vinyl cove base',
'ge diswasher': 'ge dishwasher',
'b10 led bub': 'b10 led bulb',
'cub cadetcordless hedge trimmer': 'cub cadet cordless hedge trimmer',
'hampton bay jewelery armoire wht': 'hampton bay jewelry armoire white',
'perenials': 'perennials',
'heat ventss': 'heat vents',
'mobil home glass door': 'mobile home glass door',
'lamanet floor cutter': 'laminate floor cutter',
'on off valvefor tub faucet': 'on off valve for tub faucet',
'assie grill fire and ash': 'aussie grill fire and ash',
'hanging worklight fixtures ceiling': 'hanging work light fixtures ceiling',
'20 amp tamper resitance duplex receptacle': '20 amp tamper resistant duplex receptacle',
'liqwuid nail': 'liquid nail',
'1/2 tee pvcp': '1/2 tee pvc',
'toilet repair kit cadet 3 flowise 2-piece 1.28 gpf round fro': 'toilet repair kit cadet 3 flowise 2-piece 1.28 gpf round front',
'50 amp turn look plug': '50 amp turn lock plug',
'6x6 colunm caps': '6x6 column caps',
'12 valleta': '12 valletta',
'pellitized lime': 'pelletized lime',
'concrete sonic tub': 'concrete sonic tube',
'110 air conditior an heat': '110 air conditioner and heat',
'what is best for settingfence posts in soil?': 'what is best for setting fence posts in soil?',
'washer dryer folding worksurface': 'washer dryer folding work surface',
'outdoor spigot spliter': 'outdoor spigot splitter',
'alumiunm gate': 'aluminum gate',
'lawm mower': 'lawn mower',
'door floor plate slideing doors': 'door floor plate sliding doors',
'akkegro': 'allegro',
'wead burner': 'weed burner',
'galvinized nails 3': 'galvanized nails 3',
'artifical turf border': 'artificial turf border',
'oppeuss light trim ring': 'oppeus light trim ring',
'12 ft john boat': '12ft jon boat',
'outdoor coucg': 'outdoor couch',
'drywall panel hoisst': 'drywall panel hoist',
'ego hainsaw': 'ego chainsaw',
'hibascus plant': 'hibiscus plant',
'pullbehind fertilizer spreader': 'pull behind fertilizer spreader',
'door latch uard': 'door latch guard',
'water suppy box': 'water supply box',
'octagon eve vents': 'octagon eave vents',
'el ctrical s ez': 'electrical sez',
'varnishe': 'varnish',
'klien rg6': 'klein rg6',
'floor matt': 'floor mat',
'60 shower ddor': '60 shower door',
'blue tapeexhaust fan/light': 'blue tape exhaust fan/light',
'rocks hydrophonics': 'rocks hydroponics',
'mesquito spray': 'mosquito spray',
'alumiun grove in': 'aluminum grove in',
'lithonia outdoor wall paks': 'lithonia outdoor wall packs',
'60 in. shower door brushed nicker': '60 in. shower door brushed nickel',
'makit 12v': 'makita 12v',
'black and yellow non skip tape': 'black and yellow non skid tape',
'skylifghts': 'skylights',
'led hale gin g9': 'led halogen g9',
'electrical pipe flexable': 'electrical pipe flexible',
'emt stroas': 'emt straps',
'ridged 1 emt conduit': 'rigid 1 emt conduit',
'baliey window roller shades': 'bailey window roller shades',
'hampton bay reswood valley 5 pc patio seating set with fire': 'hampton bay redwood valley 5 pc patio seating set with fire',
'lawn grass catchbag': 'lawn grass catcher bag',
'1/4 lauwan under layment': '1/4 lauan underlayment',
'window tintinig': 'window tinting',
'4 inch round bellbox cover': '4 inch round bell box cover',
'vinal latice fence': 'vinyl lattice fence',
'solar pest repelers': 'solar pest repellers',
'barn doorspring latches': 'barn door spring latches',
'3 gauge copper phhn': '3 gauge copper thhn',
'three wire hottube': 'three wire hot tub',
'shope cloths': 'shop clothes',
'bbostitch tool set': 'bostitch tool set',
'outdoor hightop dining': 'outdoor high top dining',
'delata raincan': 'delta raincan',
'soap wash maching tilde': 'soap wash machine tilde',
'16 ftdecking boards': '16 ft decking boards',
'1 amp receptical': '1 amp receptacle',
'outdoor gfi': 'outdoor gfci',
'bbq burner replacment': 'bbq burner replacement',
'levin 25 wat usb': 'levin 25 watt usb',
'delta diverte rhandle in rb': 'delta diverter handle in rb',
'3 pane craftsman door': '3 panel craftsman door',
'charolettetown': 'charlottetown',
'raised toelit sseat': 'raised toilet seat',
'webber spirit gas grill': 'weber spirit gas grill',
'adapter for extention cord': 'adapter for extension cord',
'bathrub and shower wall kits': 'bathtub and shower wall kits',
'sofit vents 4x16': 'soffit vents 4 x 16',
'1/2 inch isp water supply line': '1/2 inch ips water supply line',
'eurothem thermostatic valve': 'eurotherm thermostatic valve',
'plactic totes 36 inches wide': 'plastic totes 36 inches wide',
'pest control diat': 'pest control diet',
'black cobwoys star': 'black cowboys star',
'whirpool oven 5.1': 'whirlpool oven 5.1',
'min fridges for campers': 'mini fridges for campers',
'howards restore a finish': 'howards restor a finish',
'ge just cut fraiser fur': 'ge just cut fraser fir',
'25 watt warmlight bulb': '25 watt warm light bulb',
'kichen island': 'kitchen island',
'duel mount stainless steel sinks': 'dual mount stainless steel sinks',
'home sevalance cameras': 'home surveillance cameras',
'marbel vinyl tile': 'marble vinyl tile',
'30 entry door 9 litr': '30 entry door 9 lite',
'roxul sale n sound': 'roxul safe n sound',
'4 guage use': '4 gauge use',
'jigsaw tblades': 'jigsaw t blades',
'jigsaww blades': 'jigsaw blades',
'clawfoot tub cutain': 'clawfoot tub curtain',
'raised garden ed': 'raised garden bed',
'58.75x80 sliding glass door': '58.75x 80 sliding glass door',
'1/4 nich tee': '1/4 inch tee',
'alluminun wire splice': 'aluminum wire splice',
'2 sheet metal screrw': '2 sheet metal screw',
'non electically conductive epoxy': 'non electrically conductive epoxy',
'led fluoreecent light replacement': 'led fluorescent light replacement',
't8 8 ft 4-light flourescent fixture': 't8 8 ft 4-light fluorescent fixture',
'othor ant killer': 'ortho ant killer',
'spectacide for lawnscarpenter ants': 'spectracide for lawns carpenter ants',
'ccurved shower door': 'curved shower door',
'4in pvc electrcial boxes': '4in pvc electrical boxes',
'hampton bay fan replacemtn': 'hampton bay fan replacement',
'6\' remodel can valted celing cans': '6\' remodel can vaulted ceiling cans',
'roman tub faucers': 'roman tub faucets',
'flourescent paint by rustoleum': 'fluorescent paint by rustoleum',
'hidden fastners': 'hidden fasteners',
'otdoor sola': 'outdoor solar',
'solar post l8ghts': 'solar post lights',
'plus 3 tintet': 'plus 3 tinted',
'barbeque tools': 'barbecue tools',
'circular flourecent lights': 'circular fluorescent lights',
'rain barrells': 'rain barrels',
'gagarage storage cabinets': 'garage storage cabinets',
'brown blasplash tile': 'brown backsplash tile',
'evap cooler theromsat': 'evap cooler thermostat',
'undergroud telephone wire': 'underground telephone wire',
'cop mail adapter': 'cop male adapter',
'set crews for glass': 'set screws for glass',
'roybi lazer circular saw': 'ryobi laser circular saw',
'walnuit stain': 'walnut stain',
'ruber door extension': 'rubber door extension',
'home decorators cinamon': 'home decorators cinnamon',
'apoxy patch': 'epoxy patch',
'batroom fan heater light': 'bathroom fan heater light',
'commercial radient ceiling heaters': 'commercial radiant ceiling heaters',
'surveilance camera': 'surveillance camera',
'tub facet set': 'tub faucet set',
'solistone pebbble': 'solistone pebble',
'1 1/4 galvenized steel pipe fittings': '1 1/4 galvanized steel pipe fittings',
'22.4 cubit feet refrigerator': '22.4 cubic feet refrigerator',
'behr premium plus ultrta': 'behr premium plus ultra',
'autoficial grass': 'artificial grass',
'huskey scocket set': 'husky socket set',
'husky black toll boxes': 'husky black tool boxes',
'isunderlayment requiered for metal roof': 'is underlayment required for metal roof',
'safety glass with perscription': 'safety glass with prescription',
'polished brass 8 spread lavitory faucet': 'polished brass 8 spread lavatory faucet',
'heat only therostats': 'heat only thermostats',
'65 watt dim able': '65 watt dimmable',
'1-1/4 pocket hole screwsw': '1-1/4 pocket hole screws',
'wwod floor runner': 'wood floor runner',
'bostic wood floor glue': 'bostik wood floor glue',
'hand shovles': 'hand shovels',
'garage orgnize': 'garage organizer',
'diamond plate storge unit': 'diamond plate storage unit',
'silcone': 'silicone',
'packing suplies': 'packing supplies',
'ridgid planner': 'ridgid planer',
'shower fiberglas': 'shower fiberglass',
'curtain rod wrp': 'curtain rod wrap',
'fire place accessories gas loggs': 'fireplace accessories gas logs',
'recesseingd light housing': 'recessed light housing',
'100 amps circuit braker': '100 amps circuit breaker',
'delta satin nickle shower systems': 'delta satin nickel shower systems',
'auqatic shower & bath': 'aquatic shower',
'termini mosquito garlic spray': 'terminix mosquito garlic spray',
'arbourist safety climbing belt': 'arborist safety climbing belt',
'vynal wood fence': 'vinyl wood fence',
'acrylic primere': 'acrylic primer',
'20\' facia board': '20\' fascia board',
'17 1/2 high tolite': '17 1/2 high toilet',
'howard restore a finish': 'howard restor a finish',
'tub enclouseure with tub': 'tub enclosure with tub',
'leaf guards for stomr windows': 'leaf guards for storm windows',
'sliding tub soors': 'sliding tub doors',
'amdry wallpanel': 'amdry wall panel',
'22.1 refrierator': '22.1 refrigerator',
'fram boxes': 'frame boxes',
'patio tbricks': 'patio bricks',
'6 foot treshold': '6 foot threshold',
'florencet light cover': 'fluorescent light cover',
'taracota drain pan': 'terracotta drain pan',
'smaller single deadbolt lock': 'small single deadbolt lock',
'lmainate boards': 'laminate boards',
'acuria lattace panels': 'acurio lattice panels',
'adirondeck cusion': 'adirondack cushion',
'oscilating fan': 'oscillating fan',
'washing machine plug adapator': 'washing machine plug adapter',
'concrette pier': 'concrete pier',
'southren gray tile': 'southern gray tile',
'dealt portable table saw table': 'dewalt portable table saw table',
'matte heat resistant pain': 'matte heat resistant paint',
'White Temper Resistant Duplex Outlet': 'White Tamper Resistant Duplex Outlet',
'screws for deckin': 'screws for decking',
'20 gl. hose end sprayer': '20 gal. hose end sprayer',
'sliding door storage cabi nets': 'sliding door storage cabinets',
'tinted masonary sealer': 'tinted masonry sealer',
'kids toilet seateat': 'kids toilet seat eat',
'anderson storm door screen roller': 'andersen storm door screen roller',
'vaccuum cleaners for hardwood and carpet': 'vacuum cleaners for hardwood and carpet',
'copper baluseter': 'copper baluster',
'aluninion circular blade': 'aluminium circular blade',
'ceiling light nickle 2-light': 'ceiling light nickel 2-light',
'adirondac, patio chair': 'adirondack, patio chair',
'flourescent tube': 'fluorescent tube',
'polyurethane adhesiv': 'polyurethane adhesive',
'extirior clear spray paint': 'exterior clear spray paint',
'outdoor faucwts': 'outdoor faucets',
'asphaul based coating': 'asphalt based coating',
'3/8 couipling': '3/8 coupling',
'2x4x10 pressure treater': '2x4x10 pressure treated',
'koehler faucet': 'kohler faucet',
'led rop light clips': 'led rope light clips',
'square d double brakers': 'square d double breakers',
'30 inchesbathroom vanity': '30 inches bathroom vanity',
'1/2 \' copper fiting': '1/2 \' copper fitting',
'capital cap for colum': 'capital cap for column',
'grass turf pavewrs': 'grass turf pavers',
'lowvoltage indoor accent lights': 'low voltage indoor accent lights',
'dremel minimate cordless moto tool': 'dremel minimite cordless moto tool',
'96 right hand miter tyhoon ice': '96 right hand miter typhoon ice',
'magnet base tool loight': 'magnetic base tool light',
'robi 18v saw': 'ryobi 18v saw',
'5 light hanging chandielier': '5 light hanging chandelier',
'Moem faucet repair': 'Moen faucet repair',
'3x6 daltile white 101 kohler': '3x6 daltile white k101 kohler',
'lock cmbo': 'lock combo',
'trimmer/edger\'s, gas powered': 'trimmer/edgers, gas powered',
'generaor for fridge': 'generator for fridge',
'led light bulbs dimable spot': 'led light bulbs dimmable spot',
'outdoor seatting cushions': 'outdoor seating cushions',
'full size frigde': 'full size fridge',
'ASHPHALT SEALER': 'ASPHALT SEALER',
'behr ultra pint': 'behr ultra paint',
'emparador mosaic bamboo brick': 'emperador mosaic bamboo brick',
'bath mirror cabintes': 'bath mirror cabinets',
'floor squeege': 'floor squeegee',
'squeege': 'squeegee',
'allure golden oaksku579331': 'allure golden oak sku 579331',
'artificial turf for petrs': 'artificial turf for pets',
'8 foot florescent light bulb': '8 foot fluorescent light bulb',
'3x3 diamond thread plate': '3x3 diamond tread plate',
'handical rail': 'handicap rail',
'moen grab bar securemount': 'moen grab bar secure mount',
'ceiling mount electical box': 'ceiling mount electrical box',
'stainless steal hose clamps': 'stainless steel hose clamps',
'sod grass san agustino': 'sod grass san agustin',
'bateries 9v': 'batteries 9v',
'kohler brushed nickle framless shower doors': 'kohler brushed nickel frameless shower doors',
'mirro shower doors': 'mirror shower doors',
'daylillies': 'daylilies',
'fridgedaire fridge': 'frigidaire fridge',
'storage buiding 12\' x 20\'': 'storage building 12\' x 20\'',
'pvc valvez': 'pvc valves',
'socket magnectic extension': 'socket magnetic extension',
'shop vac aacessories': 'shop vac accessories',
'roll jp door': 'roll up door',
'rollup door': 'roll up door',
'steibler eltron': 'stiebel eltron',
'liquid itght non metalic': 'liquid tight non metallic',
'metalic lquid tight': 'metallic liquid tight',
'22 bin plastic drawer parts storage organiz': '22 bin plastic drawer parts storage organizer',
'marroon roof screws': 'maroon roof screws',
'battery opererated lighting': 'battery operated lighting',
'roybi pop up': 'ryobi pop up',
'connectorv 30': 'connector 30',
'ge gfi braker 30amp': 'ge gfci breaker 30 amp',
'pipe swer': 'pipe sewer',
'treaded pvc pipe fitting': 'threaded pvc pipe fitting',
'cornewr bathtub': 'corner bathtub',
'whirlpool apron bathtup': 'whirlpool apron bathtub',
'veranda facia': 'veranda fascia',
'rrecessed light trim ring': 'recessed light trim ring',
'1 light steele sconce': '1 light steel sconce',
'7\' 90 elboq': '7\' 90 elbow',
'drawer guides and slides': 'drawer glides and slides',
'christmsa dog': 'christmas dog',
'light weight coccrete': 'lightweight concrete',
'hardwoo flooring 2 1/4 in': 'hardwood flooring 2 1/4 in',
'garden hose filter attactchent': 'garden hose filter attachment',
'milwaukie saw blades': 'milwaukee saw blades',
'dewalt extention cord': 'dewalt extension cord',
'hampton bay high gloss jabot laminate': 'hampton bay high gloss jatoba laminate',
'20v blacker and decker charger': '20v black and decker charger',
'15 water depth bathub': '15 water depth bathtub',
'magnetized wall covering': 'magnetic wall covering',
'fire brick and morter': 'fire brick and mortar',
'anderson french wood patio door 400 series': 'andersen frenchwood patio door 400 series',
'outdoor baners': 'outdoor banners',
'osciallating blade to cut tile': 'oscillating blade to cut tile',
'one way valae': 'one way valve',
'black decker matris': 'black decker matrix',
'makita skill saw': 'makita skil saw',
'tuscon patio pavers': 'tucson patio pavers',
'plastic florring': 'plastic flooring',
'fungicidal seed innoculant': 'fungicidal seed inoculant',
'pcv coated hardware cloth': 'pvc coated hardware cloth',
'2x2 ceiling tilepantq22s': '2x2 ceiling tile paint 22s',
'rectangulat wihite ceramic sink bathroom': 'rectangular white ceramic sink bathroom',
'battery operataed wall light': 'battery operated wall light',
'72 inchtrack light': '72 inch track light',
'suny citrus fertilizer': 'sunny citrus fertilizer',
'48 inch aluminum shower curtin rod': '48 inch aluminum shower curtain rod',
'dehumidifyer': 'dehumidifier',
'earthquaike': 'earthquake',
'phillips led sparkle light bulbs': 'philips led sparkle light bulbs',
'metalic silver spray': 'metallic silver spray',
'all retaing wall': 'all retaining wall',
'high temperate sealant': 'high temperature sealant',
'greecian white porcelein marble': 'greecian white porcelain marble',
'shelves stailess stel': 'shelves stainless steel',
'wallmounted garage shelves': 'wall mounted garage shelves',
'remote meat thermom': 'remote meat thermometer',
'pvc threaded elbo': 'pvc threaded elbow',
'summit 20 in elctric range': 'summit 20 in electric range',
'groung fault electric outlet': 'ground fault electrical outlet',
'prenneols flower seeds': 'perennials flower seeds',
'hyrdaulic oil for kohler': 'hydraulic oil for kohler',
'hot/cold porcelin handles': 'hot/cold porcelain handles',
'white vanites with tops': 'white vanities with tops',
'exterier door keypad': 'exterior door keypad',
'purpor power': 'purple power',
'automatic drower closer': 'automatic drawer closer',
'potable firepace': 'portable fireplace',
'azelas': 'azaleas',
'mta distributions log splitter': 'mta distributors log splitter',
'standing town rack': 'standing towel rack',
'zinser stain cover': 'zinsser stain cover',
'weed trimer push type': 'weed trimmer push type',
'centipe grass seed': 'centipede grass seed',
'36 curved showered curtain rod': '36 curved shower curtain rod',
'4 quck grip 101': '4 quick grip 101',
'metal gringing weel 5/8': 'metal grinding wheel 5/8',
'weelbarrow': 'wheelbarrow',
'baraar emy': 'bazaar emy',
'wetbar sink and faucet': 'wet bar sink and faucet',
'perenial flowers': 'perennial flowers',
'infred turkey fryer': 'infrared turkey fryer',
'oil rubbed bronse bathroom lighting': 'oil rubbed bronze bathroom lighting',
'solor power lighting for exterior': 'solar power lighting for exterior',
'infloor heating antifreeze': 'in floor heating antifreeze',
'galvinized conduit pipe': 'galvanized conduit pipe',
'double curtain rod connecter': 'double curtain rod connector',
'drop cieling tiles 2ft by 4 ft': 'drop ceiling tiles 2ft by 4ft',
'plug in led night lite photocell': 'plug in led night light photocell',
'rough limber': 'rough lumber',
'48x48 windoww': '48x48 window',
'high intensity t5 flourescent lights': 'high intensity t5 fluorescent lights',
'brinly hardy 40 inc tow behind': 'brinly hardy 40 inch tow behind',
'ornge 5x7 rugs': 'orange 5x7 rugs',
'kitchenmaid built-in double drawer': 'kitchenaid built-in double drawer',
'safety latter': 'safety ladder',
'blind replacemetn': 'blind replacement',
'stainless steeel collated nails': 'stainless steel collated nails',
'hang rials barnyard doors': 'hang rails barnyard doors',
'tall black toliet': 'tall black toilet',
'fint tube': 'find tube',
'24 inches rerefrigerator': '24 inches refrigerator',
'ge microwave wall oven comb': 'ge microwave wall oven combo',
'presure treated': 'pressure treated',
'husky 46 9 drawer mobil': 'husky 46 9 drawer mobile',
'apartment size ge refrigertor stainless steel': 'apartment size ge refrigerator stainless steel',
'penedtrating stain': 'penetrating stain',
'briggsstraton 11 horse air filter': 'briggs stratton 11 horse air filter',
'hoovwe cordless vacuum cleaners': 'hoover cordless vacuum cleaners',
'tumbler dryer hose and claps': 'tumble dryer hose and clamps',
'antique truch': 'antique truck',
'hohler black and tan': 'kohler black and tan',
'spray and forget house nad deck': 'spray and forget house and deck',
'apriaire humidifier water panel': 'aprilaire humidifier water panel',
'unsanded groutr': 'unsanded grout',
'60 wat soft watt 2700k a19 dimibal led': '60 watt soft watt 2700k a19 dimmable led',
'7.5 mconnection for 9000 btu': '7.5 connection for 9000 btu',
'dimer switch and fan control': 'dimmer switch and fan control',
'granitecounter top cararra': 'granite countertop carrara',
'20 amp decor outlet ivory': '20 amp decora outlet ivory',
'rock wall papper': 'rock wallpaper',
'thin set fray': 'thin set gray',
'glass mirrior doors 72x80': 'glass mirror doors 72x80',
'heirloom whie': 'heirloom white',
'wood shelfing': 'wood shelving',
'kohler top mont bathroom sink': 'kohler top mount bathroom sink',
'outdoor dust to dawn light': 'outdoor dusk to dawn light',
'windowbalance': 'window balance',
'gunstock oak liamate': 'gunstock oak laminate',
'gardden benches': 'garden benches',
'strended electrical wire': 'stranded electrical wire',
'counter refinsher': 'counter refinishing',
'unfinished wood p-lant stand': 'unfinished wood plant stand',
'celing fan 60': 'ceiling fan 60',
'porta nailor': 'porta nailer',
't fittin': 't fitting',
'bousch lazer level gll2-80p': 'bosch laser level gll2-80p',
'2 1/2 inch nail boxe': '2 1/2 inch nail box',
'bonda body filler': 'bondo body filler',
'window manganetic lock': 'window magnetic lock',
'cat 5 cable uv restance': 'cat 5 cable uv resistance',
'3 4 toilet phlange': '3 4 toilet flange',
'aa batteried': 'aa batteries',
'6 pvc flixible coupling pipe': '6 pvc flexible coupling pipe',
'7 footaluminum awning': '7 foot aluminum awning',
'carburator': 'carburetor',
'water mainfold': 'water manifold',
'kholer bathroom wall lights': 'kohler bathroom wall lights',
'toro belt pully': 'toro belt pulley',
'paper lawn tefuse bags': 'paper lawn refuse bags',
'wadrobe moving boxes': 'wardrobe moving boxes',
'ultra clarifer, pool': 'ultra clarifier, pool',
'trash caninet slide': 'trash cabinet slide',
'craftig pvc cabinets': 'crafting pvc cabinets',
'plastic organozers': 'plastic organizers',
'rj45 crinp tool': 'rj45 crimp tool',
'darby 18 inch dishwasher': 'danby 18 inch dishwasher',
'10 x 10 gaxebo garden house': '10x10 gazebo garden house',
'colonial caseing': 'colonial casing',
'tarp for outsid furniture': 'tarp for outside furniture',
'phlne batteries': 'phone batteries',
'eatrhwise mower blades': 'earthwise mower blades',
'outdoor artifical lawn': 'outdoor artificial lawn',
'dual mount porcelin kitchen sinks': 'dual mount porcelain kitchen sinks',
'sflexible shower': 'flexible shower',
'savfavieh rug pad': 'safavieh rug pad',
'tigerwood perigo laminate flooring': 'tigerwood pergo laminate flooring',
'2\' flourescent lighting': '2\' fluorescent lighting',
'concerte stair railings': 'concrete stair railings',
'indoor infered heaters': 'indoor infrared heaters',
'tensil ties': 'tinsel ties',
'20 ampweather proof recepticles': '20 amp weatherproof receptacles',
'hdmi cabl': 'hdmi cable',
'matage double oven ranges': 'maytag double oven ranges',
'navarra sierra passage doorknob set': 'navarra sierra passage door knob set',
'outdoor furniture cover martha steward': 'outdoor furniture cover martha stewart',
'divonshire': 'devonshire',
'marine grade painr': 'marine grade paint',
'counter and appliance gaperaser': 'counter and appliance gap eraser',
'whirpool range hood 36': 'whirlpool range hood 36',
'flourecent': 'fluorescent',
'drain spoutts': 'drain spouts',
'1/4 shut off velves': '1/4 shut off valves',
'porta cool': 'portacool',
'yard walll': 'yard wall',
'kohler elongaterd toilet seat': 'kohler elongated toilet seat',
'kohler lighted tolet seats': 'kohler lighted toilet seats',
'cree led bub 6-pack': 'cree led bulb 6-pack',
'concrere chisel': 'concrete chisel',
'pedistal sink, 27\'': 'pedestal sink, 27\'',
'florsent replacement diffuser': 'fluorescent replacement diffuser',
'chlorox': 'clorox',
'core aeretor': 'core aerator',
'water proofing connector': 'waterproof connector',
'washer/dryr': 'washer/dryer',
'cambria java refridgerator': 'cambria java refrigerator',
'decrotive metal deck rail incecerts': 'decorative metal deck rail inserts',
'whirl pool water heater pilot': 'whirlpool water heater pilot',
'siemens double pole gfi': 'siemens double pole gfci',
'hampton bay alenxander oak': 'hampton bay alexander oak',
'32 inchvinyl screen doors': '32 inch vinyl screen doors',
'hamptonbay shaker cabinets wall': 'hampton bay shaker cabinets wall',
'3/8 entension': '3/8 extension',
'10x12 outdoor gazabos': '10x12 outdoor gazebos',
'seet metal tools': 'sheet metal tools',
'boch gll': 'bosch gll',
'dealt 8v screwdriver': 'dewalt 8v screwdriver',
'hand heald showers and ada grab bars': 'hand held showers and ada grab bars',
'200 amp outdoor circut breaker panel': '200 amp outdoor circuit breaker panel',
'fingerprint lockset': 'fingerprint locks',
'weekender powerwasher extension arms': 'weekender power washer extension arms',
'makita drill batterie charger': 'makita drill battery charger',
'ridgid fan': 'rigid fan',
'swifer wet cloth': 'swiffer wet cloth',
'hot water recirculator': 'hot water recirculation',
'riding mower blabes': 'riding mower blades',
'chain sherpeners': 'chain sharpeners',
'relief valve for rudd hot water heater': 'relief valve for ruud hot water heater',
'ceiling light brackt': 'ceiling light bracket',
'perferated pipe': 'perforated pipe',
'bath room sink accecories': 'bathroom sink accessories',
'ding room set': 'dining room set',
'2 ton expoxy': '2 ton epoxy',
'cutkler hammer breaker': 'cutler hammer breaker',
'red color cauking': 'red color caulking',
'strap and t hindge': 'strap and t hinge',
'screw driver 10 iches': 'screwdriver 10 inches',
'shower glass slelves': 'shower glass shelves',
'playststion 4 destiny bundle': 'playstation 4 destiny bundle',
'air conditiooning filter 14\'': 'air conditioning filter 14\'',
'sliding reversable patio door': 'sliding reversible patio door',
'rust oleam pinters touch black': 'rust oleum painters touch black',
'apron sink firecaly two bowl': 'apron sink fireclay two bowl',
'condesate pump': 'condensate pump',
'bronze outdoor ceiling dan': 'bronze outdoor ceiling fan',
'8 guage wire': '8 gauge wire',
'capacitor for quaterhorse motor 110 volts': 'capacitor for quarter horse motor 110 volts',
'anderson storm doors antique bronze': 'andersen storm doors antique bronze',
'gas enthonal free': 'gas ethanol free',
'is item at homedop': 'is item at home depot',
'drain stopper exstension': 'drain stopper extension',
'no tresspassing': 'no trespassing',
'100 gallon storage ben': '100 gallon storage bin',
'paint hardner': 'paint hardener',
'mystick permanent adhesive value pack': 'mystik permanent adhesive value pack',
'clear vlyvynal an rolls': 'clear polyvinyl and rolls',
'kliz primers': 'kilz primers',
'one way scrue removal tool': 'one way screw removal tool',
'stainless dishwaser smugde proof': 'stainless dishwasher smudge proof',
'hex shank drill bitt sets': 'hex shank drill bit sets',
'3.9 high effeciency front load washer': '3.9 high efficiency front load washer',
'concret patio floor': 'concrete patio floor',
'in the ground rodiron plant hanger': 'in the ground rod iron plant hanger',
'anderson storm door series 2500 sandtone polished brass': 'andersen storm door series 2500 sandstone polished brass',
'stainless steele screws': 'stainless steel screws',
'spray sealent for showers': 'spray sealant for showers',
'split line air conditioing': 'split line air conditioning',
'water softner pellet': 'water softener pellet',
'shelac': 'shellac',
'helti tools': 'hilti tools',
'PHILLIPS POST LIGHT BULB': 'PHILIPS POST LIGHT BULB',
'post light bulbl': 'post light bulb',
'tiolet': 'toilet',
'indoor home decor raindeer': 'indoor home decor reindeer',
'dinning tables': 'dining tables',
'patio dinning tables': 'patio dining tables',
'dremel router acessary': 'dremel router accessory',
'accordion door harware': 'accordion door hardware',
'edget tape': 'edge tape',
'verneer edging tool': 'veneer edging tool',
'drywall fastner': 'drywall fastener',
'heat pump acessories': 'heat pump accessories',
'scroll saw spirsl blade': 'scroll saw spiral blade',
'kitchen mat boack': 'kitchen mat black',
'chamberlain chain and pulliepaarts': 'chamberlain chain and pulley parts',
'swivle fitting for gas': 'swivel fitting for gas',
'SOLDERING IRORN': 'SOLDERING IRON',
'oaint marker': 'paint marker',
'upsidedowncan marker paint': 'upside down can marker paint',
'rope chritsmas lights': 'rope christmas lights',
'shower curtin rod': 'shower curtain rod',
'scoaring pads': 'scouring pads',
'spring set for price fister': 'spring set for price pfister',
'laquer thinner': 'lacquer thinner',
'mout faucet water filter': 'mount faucet water filter',
'NEUMATIC DOOR ARM': 'PNEUMATIC DOOR ARM',
'ceiling tile square fotage': 'ceiling tile square footage',
'ne angle base': 'neo angle base',
'1/4 in.-20 x 1 in. stainless steel flat-head socket cap scre': '1/4 in.-20 x 1 in. stainless steel flat-head socket cap screw',
'flexable pipe for propane': 'flexible pipe for propane',
'daltile accent peices': 'daltile accent pieces',
'specticide weed and grass rtu refill': 'spectracide weed and grass rtu refill',
'wood ddeck kits': 'wood deck kits',
'closetmaid hang9ing shelf': 'closetmaid hanging shelf',
'asb shower with curtian': 'asb shower with curtain',
'ptouch labeling tape': 'p touch labeling tape',
'misquito': 'mosquito',
'yard fooger': 'yard fogger',
'plastic splash guarf': 'plastic splash guard',
'3 light celling mount': '3 light ceiling mount',
'textered wallpaper': 'textured wallpaper',
'thermostat w remote senser': 'thermostat w remote sensor',
'spray oil prier': 'spray oil primer',
'maxx shower door': 'maax shower door',
'corion shower base': 'corian shower base',
'stapler hammers': 'staple hammers',
'2in non metalic standing coupling': '2in non metallic standing coupling',
'backyard xs capes': 'backyard xscapes',
'kraylon non skid': 'krylon non skid',
'pendent lights wit conversion kits': 'pendant lights with conversion kits',
'american wood charllotesville natural hickory': 'american wood charlottesville natural hickory',
'1/0 aqg': '1/0 awg',
'artci shag rug': 'arctic shag rug',
'omen single hole bathroom faucet': 'moen single hole bathroom faucet',
'john deere d100 sereissnow blade': 'john deere d100 series snow blade',
'brownbrick wallpaper': 'brown brick wallpaper',
'clear corrougated sheets': 'clear corrugated sheets',
'pressuer control valve': 'pressure control valve',
'white acryllic sheet': 'white acrylic sheet',
'wg307work jaw saw': 'wg307 worx jawsaw',
'plaskolight ceiling panel': 'plaskolite ceiling panel',
'charger y maintainer': 'charger and maintainer',
'waterless urinal conversion kist': 'waterless urinal conversion kit',
'hot water heating recirculitating pumps': 'hot water heater recirculating pumps',
'two gang carlton switch red dpt': 'two gang carlton switch red dot',
'kohler shower cartidges': 'kohler shower cartridges',
'rigid portable tool boxes': 'ridgid portable tool boxes',
'magniflier lamp': 'magnifier lamp',
'irragation controler': 'irrigation controller',
'minala rope': 'manila rope',
'wood sculture tool': 'wood sculpture tool',
'combination fan and lightwall switches': 'combination fan and light wall switches',
'acid stian': 'acid stain',
'bathtub deck mouted faucet with sprayer': 'bathtub deck mounted faucet with sprayer',
'attachments for zero turn touro': 'attachments for zero turn toro',
'wood pellats for grills': 'wood pellets for grills',
'whirpool 7000 washer': 'whirlpool 7000 washer',
'kitchenover sink lighting': 'kitchen over sink lighting',
'pegasus antique black side spalsh': 'pegasus antique black side splash',
'lock tight pl': 'loctite pl',
'landscasping ms international polish black stone': 'landscaping ms international polish black stone',
'1.4 cubit ft micro wave': '1.4 cubic ft microwave',
'square soffet vents': 'square soffit vents',
'exterior for pastic shutters': 'exterior for plastic shutters',
'exterior hous shutters': 'exterior house shutters',
'nutone ventiliation fan parts': 'nutone ventilation fan parts',
'belt anf tie rack': 'belt and tie rack',
'no elecetrity lights': 'no electricity lights',
'merola porcelain mosiac': 'merola porcelain mosaic',
'knotches': 'notches',
'savavieh soho': 'safavieh soho',
'double doors with security licks': 'double doors with security locks',
'glass tile backsp gpxtpnrf': 'glass tile backsp gpx pnrf',
'cabibet shelf pins': 'cabinet shelf pins',
'kolher repair': 'kohler repair',
'mantle brakets': 'mantle brackets',
'masonry painnt': 'masonry paint',
'muliti locks': 'multi locks',
'serger sewimg machine': 'serger sewing machine',
'mirror installation hardwawrd': 'mirror installation hardware',
'walnut porcelian': 'walnut porcelain',
'40 airens mulching kit': '40 ariens mulching kit',
'porcelaine cleaner': 'porcelain cleaner',
'monococcon 8x8 ceramic azuvi tile': 'monococcion 8x8 ceramic azuvi tile',
'black patioo set': 'black patio set',
'3/8 viyl j channel': '3/8 vinyl j channel',
'5/8 j chann': '5/8 j channel',
'home alerty': 'home alert',
'linen storage cabnit': 'linen storage cabinet',
'natur gas heat': 'natural gas heat',
'repacement toilet handle': 'replacement toilet handle',
'poyurethane clear satin': 'polyurethane clear satin',
'garbage desposal': 'garbage disposal',
'fire restaint paint': 'fire resistant paint',
'bathroom floting ball': 'bathroom floating ball',
'kitchen aid processer': 'kitchenaid processor',
'fire extinguishhers': 'fire extinguishers',
'trex fenc': 'trex fence',
'circular sawshop vac': 'circular saw shop vac',
'arylic wood paint': 'acrylic wood paint',
'appache mills plush tiles': 'apache mills plush tiles',
'phillips tuvpl-l 36': 'philips tuv pl-l 36',
'framed inerior door': 'framed interior door',
'end squicky floor': 'end squeaky floor',
'hoover prower scub deluxe': 'hoover power scrub deluxe',
'pernennial grass seed': 'perennial grass seed',
'phone linesplice connectors': 'phone line splice connectors',
'grow boz and pots': 'grow box and pots',
'organic leafgrow soil': 'organic leaf grow soil',
'6 foot pation table': '6 foot patio table',
'replacement patio unbrella pole': 'replacement patio umbrella pole',
'exteriro door 30 * 80': 'exterior door 30 * 80',
'oilrubbed bronze 3/8in riser': 'oil rubbed bronze 3/8in riser',
'latge storage containers': 'large storage containers',
'fridgidaire water filter': 'frigidaire water filter',
'sheeking for log cabin': 'seeking for log cabin',
'modern shower facuet': 'modern shower faucet',
'mirror, brushed nichel': 'mirror, brushed nickel',
'antic brass chandelier': 'antique brass chandelier',
'bufflo box wrench': 'buffalo box wrench',
'armstrong hardwood flooring422250z5p': 'armstrong hardwood flooring 422250z5p',
'mixet math faucet': 'mixet bath faucet',
'24 port patch pane': '24 port patch panel',
'black postlantern': 'black post lantern',
'needel valve': 'needle valve',
'wood ballusters': 'wood balusters',
'sharkbite sprinler': 'sharkbite sprinkler',
'1/2 hp genie screw drive garage door openner': '1/2 hp genie screw drive garage door opener',
'black dimmable gimble lights': 'black dimmable gimbal lights',
'power gable mount attic fac': 'power gable mount attic fan',
'door threshholds': 'door thresholds',
'rubber office chair sweel': 'rubber office chair wheel',
'16x7 garage door sandtone': '16x7 garage door sandstone',
'dal tile 12x24 porcelaine black tile': 'daltile 12x24 porcelain black tile',
'non ferroue saw blade': 'non ferrous saw blade',
'aluminum three way swich': 'aluminum three way switch',
'racheting wrench': 'ratcheting wrench',
'shower wal hook': 'shower wall hook',
'inflatable pool pumper': 'inflatable pool pump',
'cub cadet 46 balde': 'cub cadet 46 blade',
'spade terminalsnylon insulated': 'spade terminals nylon insulated',
'jimmyproof lock': 'jimmy proof lock',
'braSS pie fittings': 'braSS pipe fittings',
'brushed nichol hanging lights': 'brushed nickel hanging lights',
'lockbox keydoor lock': 'lockbox key door lock',
'white cabnet 30 inch base': 'white cabinet 30 inch base',
'ryobi replacemet batteries': 'ryobi replacement batteries',
'bath bord': 'bath board',
'aerp garden': 'aerogarden',
'white sign lettters': 'white sign letters',
'sqaure vessel sink': 'square vessel sink',
'i beam brackest': 'i beam brackets',
'paint for aluminun siding': 'paint for aluminum siding',
'digital temp monotor': 'digital temp monitor',
'floatinf shelving': 'floating shelving',
'light buld for stinger zapper': 'light bulb for stinger zapper',
'custom counterto': 'custom countertop',
'replacement delta faucet cartrigdge': 'replacement delta faucet cartridge',
'laundry bnasket': 'laundry basket',
'air conditon cooper soft': 'air conditioner copper soft',
'wood qwik bolts': 'wood kwik bolts',
'bolt conrete anchors': 'bolt concrete anchors',
'outdoor dining se?': 'outdoor dining set?',
'glass sheet mosiacs': 'glass sheet mosaics',
'whites parkle': 'white sparkle',
'fiskers titanium 1 1/2 loppers': 'fiskars titanium 1 1/2 loppers',
'cement mason bit': 'cement masonry bit',
'bananna leaves plant': 'banana leaves plant',
'fi nish screws': 'finish screws',
'tolet handle left hand': 'toilet handle left hand',
'sika repair shp': 'sika repair shop',
'murry circuit breakers 20 amps': 'murray circuit breakers 20 amps',
'hand pipe theader': 'hand pipe threader',
'powermate walkbehind trimmer': 'powermate walk behind trimmer',
'metal clothes handing carts': 'metal clothes hanging carts',
'electric radiatior heat': 'electric radiator heat',
'shopvac filter hepa': 'shop vac filter hepa',
'hampton bay fenving': 'hampton bay fencing',
'knife sharppener': 'knife sharpener',
'atttic heat barrier': 'attic heat barrier',
'wondow curtains': 'window curtains',
'american standard town square widespread facet': 'american standard town square widespread faucet',
'5.0 chest freezerz': '5.0 chest freezers',
'20 amp surger protector': '20 amp surge protector',
'f 30 flourescent light fixture': 'f30 fluorescent light fixture',
'1/2 inch rubber lep tips': '1/2 inch rubber leg tips',
'threader rod end coupler': 'threaded rod end coupler',
'lamated counter tops': 'laminate countertops',
'railing kit system round ballusters': 'railing kit system round balusters',
'sintetic grass': 'synthetic grass',
'landry sink': 'laundry sink',
'solar led light dust to dawn': 'solar led light dusk to dawn',
'pegro xp coffee step': 'pergo xp coffee step',
'maytag two door refridgerator': 'maytag two door refrigerator',
'reprobramable combination lock': 'programmable combination lock',
'pnematic flooring nails 16 gauge': 'pneumatic flooring nailer 16 gauge',
'outide dog kennel': 'outside dog kennel',
'6 incn door knocker': '6 inch door knocker',
'non programmable vertical thermost': 'non programmable vertical thermostat',
'windser light coco': 'windsor light coco',
'cooling towes': 'cooling towers',
'glacier bay shower catridge': 'glacier bay shower cartridge',
'ge discontinnued top freezers': 'ge discontinued top freezers',
'security camaras': 'security cameras',
'toiles partes': 'toilet parts',
'pegasus ntique brass': 'pegasus antique brass',
'water pic shower head chrome': 'waterpik shower head chrome',
'85 gall tall 4500': '85 gal tall 4500',
'contempery ceiling fans': 'contemporary ceiling fans',
'toile seat lid': 'toilet seat lid',
'milwaukee noncontact tester': 'milwaukee non contact tester',
'emser ocuntry': 'emser country',
'front screen for a gazeebo': 'front screen for a gazebo',
'fatpack 18v': 'fat pack 18v',
'bathroom kraft made': 'bathroom kraftmaid',
'1/4 qk connect x 1/8 mip': '1/4 quick connect x 1/8 mip',
'plate for faucet stoper': 'plate for faucet stopper',
'femaie gas fitting quick disonnect': 'female gas fitting quick disconnect',
'recesse light bulbs': 'recessed light bulbs',
'3m 60926 vapor catridges': '3m 60926 vapor cartridges',
'weather strip for commerial door': 'weather strip for commercial door',
'arcadia mettal locks': 'arcadia metal locks',
'gekko gauges': 'gecko gauges',
'frigidaire water firlters': 'frigidaire water filters',
'30 par haolgen bulbs': '30 par halogen bulbs',
'red devil scraperreplacement bldes': 'red devil scraper replacement blades',
'gcfi outlet': 'gfci outlet',
'mohawk oak wood fllors': 'mohawk oak wood floors',
'all porpose stools': 'all purpose stools',
'primered floor molding': 'primed floor molding',
'glass cleaner concintrete': 'glass cleaner concentrate',
'30 amp surface mount recepticle': '30 amp surface mount receptacle',
'60 x 100 aluminun mesh': '60 x 100 aluminum mesh',
'tile border black and whit': 'tile border black and white',
'peir mount black': 'pier mount black',
'xtra wide baby gates': 'extra wide baby gates',
'roffing caulk': 'roofing caulk',
'1/2 inc pvc treaded connector': '1/2 inch pvc threaded connector',
'electric hock for lift': 'electric shock for lift',
'greak': 'greek',
'airfilter 20x24': 'air filter 20x24',
'extenion cord storage': 'extension cord storage',
'shluter': 'schluter',
'circular saw rrip fence': 'circular saw rip fence',
'HEATED TOLIET SEAT': 'HEATED TOILET SEAT',
'rount magnet': 'round magnet',
'handi cap sink faucett': 'handicap sink faucet',
'arc fault circute breaker 1pole 15 amp': 'arc fault circuit breaker 1 pole 15 amp',
'oreck full reease carpet cleaner': 'oreck full release carpet cleaner',
'min split mounting brackets': 'mini split mounting brackets',
'kholer sink 20x17': 'kohler sink 20x17',
'heavy duty extensoion cordyellow only': 'heavy duty extension cord yellow only',
'3 newll post': '3 newel post',
'veraluz 4 light bathroom vanity': 'varaluz 4 light bathroom vanity',
'anual combo': 'annual combo',
'ciling pan': 'ceiling pan',
'syllicone lube': 'silicone lube',
'hdx 20\' hight velocity floor fan': 'hdx 20\' high velocity floor fan',
'30 inch kitchenaide cooktops': '30 inch kitchenaid cooktops',
'kusshuln concrete mixer': 'kushlan concrete mixer',
'roles of concreate mesh': 'roles of concrete mesh',
'hardward for pull out waste bin': 'hardware for pull out waste bin',
'glass towel bar braket': 'glass towel bar bracket',
'living room cabnets': 'living room cabinets',
'1-1/4 extention pvc': '1-1/4 extension pvc',
'metal double gain boxes': 'metal double gang boxes',
'fabric umbella': 'fabric umbrella',
'club cadet 46 belt': 'cub cadet 46 belt',
'window air conditionerriding lawn mowers': 'window air conditioner riding lawn mowers',
'digital cammera': 'digital camera',
'prppane pan': 'propane pan',
'oride plant': 'pride plant',
'home decorator outoddor patio cordless shades': 'home decorator outdoor patio cordless shades',
'1x1 square tubeing': '1x1 square tubing',
'water filter for frigidaire refrigirator': 'water filter for frigidaire refrigerator',
'linier track pendant': 'linear track pendant',
'medal stud finder': 'metal stud finder',
'mke m12 heated hoddie kit': 'mke m12 heated hoodie kit',
'bilt in pool': 'built in pool',
'buit in shower base': 'built in shower base',
'grohsafe roughin valve 35015': 'grohsafe rough in valve 35015',
'tank insualation': 'tank insulation',
'khols double toilet bowl': 'kohl\'s double toilet bowl',
'atlantiic can racks': 'atlantic can racks',
'skylites': 'skylights',
'kwikset passive door knob': 'kwikset passage door knob',
'loadspeaker': 'loudspeaker',
'koehler enamel cast iron sink': 'kohler enameled cast iron sink',
'tood handle lock': 'todd handle lock',
'sable brow grout': 'sable brown grout',
'rewd bird feeder': 'red bird feeder',
'lilac aera rug': 'lilac area rug',
'lightsavannah 3-light burnished ing fixtures': 'light savannah 3-light burnished ing fixtures',
'clear vynil for patio': 'clear vinyl for patio',
'intersate battery': 'interstate battery',
'jeldewen prairie mission door': 'jeld wen prairie mission door',
'honey oak tmolding': 'honey oak t molding',
'COMPLET SHOWER KIT': 'COMPLETE SHOWER KIT',
'36\' florescent light bulb': '36\' fluorescent light bulb',
'melon sunbrellap': 'melon sunbrella',
'28 kg washign machine': '28 kg washing machine',
'metal trash cas': 'metal trash cans',
'front door with side transome': 'front door with side transom',
'tribecia': 'tribeca',
'exterior shutters byrgundy': 'exterior shutters burgundy',
'light switchvers for little girls': 'light switches for little girls',
'miraposa whirlpool tub': 'mariposa whirlpool tub',
'schoolhouse pendqnt light': 'schoolhouse pendant light',
'cablrail': 'cable rail',
'vinly seat cleaner': 'vinyl seat cleaner',
'metal 3 tiertrolley': 'metal 3 tier trolley',
'white pendant uplight': 'white pendant light',
'lbathroom vanity lights chrome 3': 'bathroom vanity lights chrome 3',
'brushed nickel knobw': 'brushed nickel knobs',
'Renassaince': 'Renaissance',
'simpon strong tie wedge': 'simpson strong tie wedge',
'silocone repairs': 'silicone repairs',
'chocolate brown blackspash': 'chocolate brown backsplash',
'portabel tabel, plastic': 'portable table, plastic',
'safavieh courtyard dark biege area rug': 'safavieh courtyard dark beige area rug',
'theromometer smart': 'thermometer smart',
'hummngbird feeders': 'hummingbird feeders',
'diverter handels': 'diverter handles',
'dynamic desighn planters': 'dynamic design planters',
'pri meld flush bi fold doors': 'primed flush bifold doors',
'fisher and penkel': 'fisher and paykel',
'price of 1 gal beher marquee paint': 'price of 1 gal behr marquee paint',
'makersbot': 'makerbot',
'shelter logic sun sahde': 'shelterlogic sun shade',
'moen 4 port pex vavle': 'moen 4 port pex valve',
'ceiling fan extension wre': 'ceiling fan extension wire',
'single knobreplacement for shower kohler': 'single knob replacement for shower kohler',
'high gloss waterborne acrylic enamal': 'high gloss waterborne acrylic enamel',
'cattale': 'cattle',
'double deountable': 'double demountable',
'fantsastic': 'fantastic',
'milwaulkee battery charger': 'milwaukee battery charger',
'tandom 30 20': 'tandem 30 20',
'schluter kurdie': 'schluter kerdi',
'square buckes': 'square buckets',
'pro series vinal post': 'pro series vinyl post',
'krud cutter rust': 'krud kutter rust',
'warm espresso distresed': 'warm espresso distressed',
'levinton phone tv combo': 'leviton phone tv combo',
'makita planner knives': 'makita planer knives',
'barictric walk in tubs': 'bariatric walk in tubs',
'woper blades': 'wiper blades',
'kidcraft 18 doll furniture': 'kidkraft 18 doll furniture',
'stickon shower wall tower': 'stick on shower wall tower',
'riding lawn mower accesores': 'riding lawn mower accessories',
'towel bar nickel gracier 18\'': 'towel bar nickel glacier 18\'',
'compreshion repair kit': 'compression repair kit',
'huskie air compressors accessories': 'husky air compressors accessories',
'36 inch neo angle glass doooors': '36 inch neo angle glass doors',
'gerber cohort fine edg knife': 'gerber cohort fine edge knife',
'work force prpane heatr': 'workforce propane heater',
'progress lighting nottingdon': 'progress lighting nottington',
'dog leash atachments': 'dog leash attachments',
'elaphent ear': 'elephant ear',
'veeneer wood tape': 'veneer wood tape',
'siccsers': 'scissors',
'klien folding 6ft ruler': 'klein folding 6ft ruler',
'wall socket covedrs': 'wall socket covers',
'klein 8 inch plies': 'klein 8 inch pliers',
'screen doors: screen tight doors 32 in. unfinished wood t-ba': 'screen doors: screen tight doors 32 in. unfinished wood t-bar',
'g e dishwaaher': 'g e dishwasher',
'white semigloass': 'white semi gloss',
'shop swiming pools': 'shop swimming pools',
'rectangular baulaster': 'rectangular baluster',
'cedar 0roofing shingles': 'cedar roofing shingles',
'prehung door fanlite': 'prehung door fan lite',
'martha suart carpet tobacco leaf': 'martha stewart carpet tobacco leaf',
'furnance gas upflow': 'furnace gas upflow',
'spalted m aple': 'spalted maple',
'crimpling pleirs': 'crimping pliers',
'cold stem for glacer bay faucets': 'cold stem for glacier bay faucets',
'holegen flood light 35w': 'halogen flood light 35w',
'ridgid ipact wrench': 'rigid impact wrench',
'twin wsher dryer gas': 'twin washer dryer gas',
'Diamond HArd Acrylic Enamal': 'Diamond HArd Acrylic Enamel',
'stainless steel wall pannels': 'stainless steel wall panels',
'perenial bulb': 'perennial bulb',
'caroilne avenue 36 in single vanity in white marble top in l': 'caroline avenue 36 in single vanity in white marble top in l',
'broadway collectionchrome vanity fixture': 'broadway collection chrome vanity fixture',
'vogoro flower': 'vigoro flower',
'guarge parnel': 'gauge panel',
'sweeep pan': 'sweep pan',
'dewalt magnetic drive quide': 'dewalt magnetic drive guide',
'milwuakee magnetic drive guide': 'milwaukee magnetic drive guide',
'stainlss steel wire wheels': 'stainless steel wire wheels',
'deltile 3x6 ceramic blue': 'daltile 3x6 ceramic blue',
'discontinuedbrown and tan area rug': 'discontinued brown and tan area rug',
'frost protectionm': 'frost protection',
'5 tier chandalier': '5 tier chandelier',
'perry hickory laminte': 'perry hickory laminate',
'carpet chessnut': 'carpet chestnut',
'midnight blue irridecent': 'midnight blue iridescent',
'under cabinet black flourescent': 'under cabinet black fluorescent',
'concord charcole runner': 'concord charcoal runner',
'gibrallar post series cedar post': 'gibraltar post series cedar post',
'jefrrey court 3x12': 'jeffrey court 3x12',
'baking panb': 'baking pan',
'dustless ginder': 'dustless grinder',
'paw print doorbe;;': 'paw print doorbell;;',
'rustolium paint american accesnts': 'rustoleum paint american accents',
'costum key': 'custom key',
'halh circle glass shelf': 'half circle glass shelf',
'pedestial snk': 'pedestal sink',
'cordless celullar': 'cordless cellular',
'scounces wall light outside': 'sconces wall light outside',
'gas powere wood chipper': 'gas powered wood chipper',
'hampton bay brillant maple laminate': 'hampton bay brilliant maple laminate',
't8 flourescent bulbs 4 ft 2 pack': 't8 fluorescent bulbs 4 ft 2 pack',
'leminate floor alexandrea': 'laminate floor alexandria',
'reflector 50w flurecent': 'reflector 50w fluorescent',
'he xl 44 range': 'ge xl44 range',
'branch protctor paint': 'branch protector paint',
'rehargeable aa batteries for landscape lighting': 'rechargeable aa batteries for landscape lighting',
'msa safet work hat': 'msa safety work hat',
'conemporary hanging outdoor light fixture': 'contemporary hanging outdoor light fixture',
'piano door hing': 'piano door hinge',
'kohler whole houser generator': 'kohler whole house generator',
'dynasty collecion': 'dynasty collection',
'chesapeke nightstand in cherry': 'chesapeake nightstand in cherry',
'kohler glas shower door 4ft': 'kohler glass shower door 4ft',
'apartment size refreidgerator': 'apartment size refrigerator',
'centerpise': 'centerprise',
'motar for large tilw': 'mortar for large tile',
'bathroom lightning 48 inch': 'bathroom lighting 48 inch',
'panle clamp': 'panel clamp',
'roll up door fo shed': 'roll up door for shed',
'oil rubbed bronze airgap for dishwasher': 'oil rubbed bronze air gap for dishwasher',
'multi plub adapter': 'multi plug adapter',
'decorative clarance': 'decorative clarence',
'tamper resistant combo outet black': 'tamper resistant combo outlet black',
'polyurethane collors': 'polyurethane colors',
'scrool lever': 'scroll lever',
'gentec smoke detector': 'gentex smoke detector',
'kohler claxton biscuit sink': 'kohler caxton biscuit sink',
'strapping for cielings': 'strapping for ceilings',
'wall mounteddrop leaf table': 'wall mounted drop leaf table',
'chamberlain intercomm': 'chamberlain intercom',
'sumpter oask': 'sumpter oak',
'torino chandler 5 light bn': 'torino chandelier 5 light bn',
'allure red mahoghany': 'allure red mahogany',
'ge personal eletrical home security': 'ge personal electric home security',
'for rent sighn': 'for rent sign',
'coper clad aluminum': 'copper clad aluminum',
'homeywell cool moisture humidifier filters': 'honeywell cool moisture humidifier filters',
'hdc fairlawm jasper cane': 'hdc fairlawn jasper cane',
'wire fen c e': 'wire fence',
'cap screww everbilt 1/4in x2in': 'cap screw everbilt 1/4in x2in',
'metal urathane': 'metal urethane',
'blitz colth': 'blitz cloth',
'commercial accunts': 'commercial accounts',
'electic chainsaw worx': 'electric chainsaw worx',
'power toll accesories': 'power tool accessories',
'leviton - decora 3 gang midway nylon wall plate - light almo': 'leviton - decora 3 gang midway nylon wall plate - light almond',
'pond filter mediumpond filter pads': 'pond filter media pond filter pads',
'tall wine cabnet': 'tall wine cabinet',
'bulk calking': 'bulk caulking',
'insolated cooler with a strap': 'insulated cooler with a strap',
'concete placer': 'concrete placer',
'transmissin leak stopper': 'transmission leak stopper',
'toilet in buisk': 'toilet in buick',
'black wire hidder': 'black wire hider',
'braid trim ceramic title molding': 'braid trim ceramic tile molding',
'laundry tub fosets valves': 'laundry tub faucets valves',
'schlage plymoth orbit oil rubbed bronze': 'schlage plymouth orbit oil rubbed bronze',
'romanic poetry flat interior paint': 'romantic poetry flat interior paint',
'worklight 500 watt bullbs': 'worklight 500 watt bulbs',
'elvies ornament': 'elvis ornament',
'dpcam camera': 'dropcam camera',
'clorine tabs for septic': 'chlorine tabs for septic',
'interor door framed': 'interior door frame',
'hot dipped galvanized screwes': 'hot dipped galvanized screws',
'14 ft. w x29 ft. l x 14 ft.h': '14 ft. w x 29 ft. x 14 ft.h',
'water resistent top': 'water resistant top',
'galvinize 2 in box of screws': 'galvanized 2 in box of screws',
'taupe teasure carpet': 'taupe treasure carpet',
'nickle vanity lighting mosaics': 'nickel vanity lighting mosaics',
'heat circualtor': 'heat circulator',
'flexible pvc joing': 'flexible pvc joint',
'14 metal abresive blade': '14 metal abrasive blade',
'foldin g patio doors': 'folding patio doors',
'primeline mirror sliding doors': 'prime line mirror sliding doors',
'sanora maple flooring': 'sonora maple flooring',
'plastic paint containwes with lid': 'plastic paint containers with lid',
'deck fasting systems': 'deck fastening systems',
'long handled squeege window cleaning': 'long handled squeegee window cleaning',
'lsnd scape trim edger': 'landscape trim edger',
'rust oleum aged iron': 'rustoleum aged iron',
'redi ledge cooner': 'redi ledge corner',
'milwakee work radio': 'milwaukee work radio',
'progress piedmot': 'progress piedmont',
'home security camera cablee': 'home security camera cable',
'white rock daltale': 'white rock daltile',
'japenes lilacs': 'japanese lilacs',
'thickrubber mat': 'thick rubber mat',
'topdown bottom up shades': 'top down bottom up shades',
'locktite 9oz 2in1 premium sealant': 'loctite 9oz 2in1 premium sealant',
'evaporative thermstate': 'evaporative thermostat',
'red devil paint cleanaer': 'red devil paint cleaner',
'beer wine refrigeratr': 'beer wine refrigerator',
'forced air vents covrs': 'forced air vents covers',
'ew drops marquee paint': 'dew drops marquee paint',
'kitchen sink and fawcet black dual mount': 'kitchen sink and faucet black dual mount',
'dimmable fluoreecent': 'dimmable fluorescent',
'textured 6 pannel hollow core primed composite prehung inter': 'textured 6 panel hollow core primed composite prehung inter',
'dakato 4 light': 'dakota 4 light',
'playset handels': 'playset handles',
'vauhhan hammers': 'vaughan hammers',
'sterling frosted glass shower ath doors': 'sterling frosted glass shower bath doors',
'autom tic drawer lite': 'automatic drawer light',
'all trellisses': 'all trellises',
'american standard 5324.019 enlongate toilet seat': 'american standard 5324.019 elongated toilet seat',
'15 in built in maytag trash compactorr': '15 in built in maytag trash compactor',
'3 butto pico pj-3b': '3 button pico pj-3b',
'ligth': 'light',
'sissors': 'scissors'
}
|
dnc1994/Kaggle-Playground
|
typo_dict.py
|
Python
|
mit
| 184,114
|
[
"Amber",
"CRYSTAL",
"ESPResSo"
] |
302b312bff642cd702773060045e188592345fc9914d13a891c33fbd095dc095
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
import math
from random import gauss
from scipy import special
from dates import get_previous_day, get_next_day
def get_missing_data_point(required_dates, daily_data, date):
"""
Calculate a suitable data point with relevant probability from known info
"""
if get_previous_day(date) in daily_data and get_next_day(date) in daily_data:
# if we have neighbouring data points, generate point with gaussian smoothing
return get_smoothed_value(get_previous_day(date), get_next_day(date))
else:
# if we have no neighbouring data, take a probabilistic guess
return get_gaussian_random(len(required_dates))
def get_gaussian_random(time_range):
"""
Return a random value for the data point between 0 and 100, over a gaussian distribution
determined by the range of data required
"""
# The actual data will be 100 and 0 once each over the respective time range
# so over a larger time range, the probability of this specific entry being high or low
# decreases. We adjust the standard deviation accordingly to generate reasonable values.
lower = 0
upper = 100
mean = 50
chance_of_extremity = 1 / time_range
f = 1 - chance_of_extremity
num_standard_devs = special.erfinv(f) * math.sqrt(2)
standard_dev = (upper - mean) / num_standard_devs
value = gauss(mean, standard_dev)
while value < lower or value > upper:
#check if value outside range. This will basically never happen
value = gauss(mean, standard_dev)
return 24 * round(value)
def get_smoothed_value(prev, next):
"""
Given the data points for the next and previous days, generate the data point
using modified Gaussian smoothing
"""
# unsophisticated average of neighbouring 2 points
straight_average = (prev + next) / 2
# add some noise with gaussian distribution centered on this point
mean = straight_average
std_dev = abs(straight_average - next) / 5
value = gauss(mean, std_dev) # less than 0.1% chance of not falling between next and prev
return round(value)
|
googleinterns/sgonks
|
project/services/data_updater/scripts/data_generator.py
|
Python
|
apache-2.0
| 2,700
|
[
"Gaussian"
] |
644b3548b391a9704ff3c69e111e97fd24dd87e10a0b11e62ccf23f474192302
|
# -*- coding: utf-8 -*-
# Copyright 2015 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import os
import sys
import time
import unittest
import pytest
import django
from splinter import Browser
from .base import BaseBrowserTests, get_browser
from .fake_webapp import EXAMPLE_APP
from .is_element_present_nojs import IsElementPresentNoJSTest
sys.path.append("tests/fake_django")
os.environ["DJANGO_SETTINGS_MODULE"] = "settings"
django.setup()
class DjangoClientDriverTest(
BaseBrowserTests, IsElementPresentNoJSTest, unittest.TestCase
):
@pytest.fixture(autouse=True, scope='class')
def setup_browser(self, request):
request.cls.browser = get_browser('django')
request.addfinalizer(request.cls.browser.quit)
@pytest.fixture(autouse=True)
def visit_example_app(self):
self.browser.visit(EXAMPLE_APP)
def test_should_support_with_statement(self):
with Browser("django") as internet:
self.assertIsNotNone(internet)
def test_attach_file(self):
"should provide a way to change file field value"
file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "mockfile.txt"
)
self.browser.attach_file("file", file_path)
self.browser.find_by_name("upload").click()
html = self.browser.html
self.assertIn("text/plain", html)
self.assertIn(open(file_path, "rb").read().decode("utf-8"), html)
def test_forward_to_none_page(self):
"should not fail when trying to forward to none"
browser = Browser("django")
browser.visit(EXAMPLE_APP)
browser.forward()
self.assertEqual(EXAMPLE_APP, browser.url)
browser.quit()
def test_can_clear_password_field_content(self):
"django should not be able to clear"
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("password").first.clear()
def test_can_clear_tel_field_content(self):
"django should not be able to clear"
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("telephone").first.clear()
def test_can_clear_text_field_content(self):
"django should not be able to clear"
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("query").first.clear()
def test_cant_switch_to_frame(self):
"django driver should not be able to switch to frames"
with self.assertRaises(NotImplementedError) as cm:
self.browser.get_iframe("frame_123")
self.fail()
e = cm.exception
self.assertEqual("django doesn't support frames.", e.args[0])
def test_simple_type(self):
"""
django won't support type method
because it doesn't interact with JavaScript
"""
with self.assertRaises(NotImplementedError):
self.browser.type("query", "with type method")
def test_simple_type_on_element(self):
"""
django won't support type method
because it doesn't interact with JavaScript
"""
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("query").type("with type method")
def test_slowly_typing(self):
"""
django won't support type method
because it doesn't interact with JavaScript
"""
with self.assertRaises(NotImplementedError):
self.browser.type("query", "with type method", slowly=True)
def test_slowly_typing_on_element(self):
"""
django won't support type method
on element because it doesn't interac with JavaScript
"""
with self.assertRaises(NotImplementedError):
query = self.browser.find_by_name("query")
query.type("with type method", slowly=True)
def test_cant_mouseover(self):
"django should not be able to put the mouse over the element"
with self.assertRaises(NotImplementedError):
self.browser.find_by_css("#visible").mouse_over()
def test_cant_mouseout(self):
"django should not be able to mouse out of an element"
with self.assertRaises(NotImplementedError):
self.browser.find_by_css("#visible").mouse_out()
def test_links_with_nested_tags_xpath(self):
links = self.browser.find_by_xpath('//a/span[text()="first bar"]/..')
self.assertEqual(
len(links),
1,
'Found not exactly one link with a span with text "BAR ONE". %s'
% (map(lambda item: item.outer_html, links)),
)
def test_finding_all_links_by_non_ascii_text(self):
"should find links by non ascii text"
non_ascii_encodings = {
"pangram_pl": u"Jeżu klątw, spłódź Finom część gry hańb!",
"pangram_ja": u"天 地 星 空",
"pangram_ru": u"В чащах юга жил бы цитрус? Да, но фальшивый экземпляр!",
"pangram_eo": u"Laŭ Ludoviko Zamenhof bongustas freŝa ĉeĥa manĝaĵo kun spicoj.",
}
for key, text in non_ascii_encodings.items():
link = self.browser.links.find_by_text(text)
self.assertEqual(key, link["id"])
def test_cookies_extra_parameters(self):
"""Cookie can be created with extra parameters."""
timestamp = int(time.time() + 120)
self.browser.cookies.add({'sha': 'zam'}, expires=timestamp)
cookie = self.browser._browser.cookies['sha']
assert timestamp == cookie['expires']
class DjangoClientDriverTestWithCustomHeaders(unittest.TestCase):
@classmethod
def setUpClass(cls):
custom_headers = {
"X-Splinter-Customheaders-1": "Hello",
"X-Splinter-Customheaders-2": "Bye",
}
cls.browser = Browser("django", custom_headers=custom_headers)
def test_create_a_phantomjs_with_custom_headers(self):
self.browser.visit(EXAMPLE_APP + "headers")
self.assertTrue(
self.browser.is_text_present("X-Splinter-Customheaders-1: Hello")
)
self.assertTrue(self.browser.is_text_present("X-Splinter-Customheaders-2: Bye"))
@classmethod
def tearDownClass(cls):
cls.browser.quit()
|
cobrateam/splinter
|
tests/test_djangoclient.py
|
Python
|
bsd-3-clause
| 6,394
|
[
"VisIt"
] |
2a69f52e3dfd18a311556d5446b3211ef20bbef91b808cd4ddf2cf8aa13a59ad
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Base classes for the selection writers
======================================
Specialized SelectionWriters are derived from
:class:`SelectionWriterBase`. Override the :meth:`~SelectionWriterBase._write_head`,
:meth:`~SelectionWriterBase._translate`, and :meth:`~SelectionWriterBase._write_tail`
methods.
.. autoclass:: SelectionWriterBase
:members: __init__, write, _translate, _write_head, _write_tail, comment
.. autofunction:: join
"""
from __future__ import absolute_import
import six
from six.moves import range
import os.path
from ..lib import util
from . import _SELECTION_WRITERS
def join(seq, string="", func=None):
"""Create a list from sequence.
*string* is appended to each element but the last.
*func* is applied to every element before appending *string*.
"""
if func is None:
func = lambda x: x
return [func(x) + string for x in seq[:-1]] + [func(seq[-1])]
class _Selectionmeta(type):
# Auto register upon class creation
def __init__(cls, name, bases, classdict):
type.__init__(type, name, bases, classdict)
try:
fmt = util.asiterable(classdict['format'])
except KeyError:
pass
else:
for f in fmt:
if f is None:
continue
f = f.upper()
_SELECTION_WRITERS[f] = cls
class SelectionWriterBase(six.with_metaclass(_Selectionmeta)):
"""Export a selection in MDAnalysis to a format usable in an external package.
The :class:`SelectionWriterBase` writes a selection string to a file
that can be used in another package such as `VMD`_, `PyMOL`_,
`Gromacs`_ or `CHARMM`_. In this way, analysis and visualization
can be done with the best or most convenient tools at hand.
:class:`SelectionWriterBase` is a base class and child classes are
derived with the appropriate customizations for the package file
format.
.. _VMD: http://www.ks.uiuc.edu/Research/vmd/
.. _PyMol: http://www.pymol.org/
.. _CHARMM: http://www.charmm.org/
.. _Gromacs: http://www.gromacs.org/
.. versionchanged:: 0.11.0
Can now also write to a :class:`~MDAnalysis.lib.util.NamedStream` instead
of a normal file (using :class:`~MDAnalysis.lib.util.openany`).
.. versionchanged:: 0.16.0
Remove the `wa` mode. The file is now open when the instance is created
and closed with the :meth:`close` method or when exiting the `with`
statement.
"""
#: Name of the format.
format = None
#: Extension of output files.
ext = None
#: Special character to continue a line across a newline.
continuation = ''
#: Comment format string; should contain '%s' or ``None`` for no comments.
commentfmt = None
default_numterms = 8
def __init__(self, filename, mode="w", numterms=None, preamble=None, **kwargs):
"""Set up for writing to *filename*.
Parameters
----------
filename:
output file
mode:
create a new file ("w"), or append ("a") to existing file ["w"]
numterms:
number of individual index numbers per line for output
formats that write multiple entries in one line. If set
to 0 or ``False`` then no special formatting is done [8]
preamble:
string that is written as a comment at the top of the file []
kwargs:
use as defaults for :meth:`write`
"""
self.filename = util.filename(filename, ext=self.ext)
if not mode in ('a', 'w'):
raise ValueError("mode must be one of 'w', 'a', not {0!r}".format(mode))
self.mode = mode
self._current_mode = mode[0]
if numterms is None or numterms < 0:
self.numterms = self.default_numterms
elif numterms is False:
self.numterms = 0
else:
self.numterms = numterms
self.preamble = preamble
self.otherargs = kwargs # hack
self.number = 0
self._outfile = util.anyopen(self.filename, mode=self._current_mode)
self.write_preamble()
def comment(self, s):
"""Return string *s* interpolated into the comment format string.
If no :attr:`SelectionWriterBase.commentfmt` is defined (None) then the
empty string is returned because presumably there is no way to enter
comments into the file.
A newline is appended to non-empty strings.
"""
if self.commentfmt is None:
return ''
return self.commentfmt % s + '\n'
def write_preamble(self):
"""Write a header, depending on the file format."""
if self.preamble is None:
return
self._outfile.write(self.comment(self.preamble))
def write(self, selection, number=None, name=None, frame=None, mode=None):
"""Write selection to the output file.
Parameters
----------
selection:
a :class:`MDAnalysis.core.groups.AtomGroup`
number:
selection will be named "mdanalysis<number>"
(``None`` auto increments between writes; useful
when appending) [``None``]
name:
selection will be named *name* (instead of numbered) [``None``]
frame:
write selection of this frame (or the current one if
``None`` [``None``]
"""
u = selection.universe
if frame is not None:
u.trajectory[frame] # advance to frame
else:
try:
frame = u.trajectory.ts.frame
except AttributeError:
frame = 1 # should catch cases when we are analyzing a single PDB (?)
name = name or self.otherargs.get('name', None)
if name is None:
if number is None:
self.number += 1
number = self.number
name = "mdanalysis{number:03d}".format(**vars())
# build whole selection in one go (cleaner way to deal with
# to deal with line breaks after self.numterms entries)
# selection_list must contain entries to be joined with spaces or linebreaks
selection_terms = self._translate(selection.atoms)
step = self.numterms or len(selection.atoms)
out = self._outfile
self._write_head(out, name=name)
for iatom in range(0, len(selection.atoms), step):
line = selection_terms[iatom:iatom + step]
out.write(" ".join(line))
if len(line) == step and not iatom + step == len(selection.atoms):
out.write(' ' + self.continuation + '\n')
out.write(' ') # safe so that we don't have to put a space at the start of tail
self._write_tail(out)
out.write('\n') # always terminate with newline
def close(self):
"""Close the file
.. versionadded:: 0.16.0
"""
self._outfile.close()
def _translate(self, atoms, **kwargs):
"""Translate atoms into a list of native selection terms.
- build list of ALL selection terms as if this was a single line, e.g.
``['index 12 |', 'index 22 |', 'index 33']``
- only one term per atom!!
- terms *must* be strings
- something like::
" ".join(terms)
must work
"""
raise NotImplementedError
def _write_head(self, out, **kwargs):
"""Initial output to open file object *out*."""
pass
def _write_tail(self, out, **kwargs):
"""Last output to open file object *out*."""
pass
# Context manager support to match Coordinate writers
# all file handles use a with block in their write method, so these do nothing special
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
|
kain88-de/mdanalysis
|
package/MDAnalysis/selections/base.py
|
Python
|
gpl-2.0
| 8,918
|
[
"CHARMM",
"Gromacs",
"MDAnalysis",
"PyMOL",
"VMD"
] |
5f01a0a402199a8258dffca0dbd6ed3bb08741b3bd20a7c7d271c8042b35f4d9
|
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2015 Brian Pursley
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, argparse
class ComparisonNetwork(list):
def __str__(self):
result = ""
usedInputs = []
current = []
for c in self:
if c[0] in usedInputs or c[1] in usedInputs:
result += str(current).replace(" ", "") + "\n"
del usedInputs[:]
del current[:]
current.append(c)
usedInputs.append(c[0])
usedInputs.append(c[1])
result += str(current).replace(" ", "")
return result
def sortBinarySequence(self, sequence):
result = sequence
for c in self:
if (result >> c[0]) & 1 < (result >> c[1]) & 1:
result = (result - 2**c[1]) | 2**c[0]
return result
def sortSequence(self, sequence):
result = list(sequence)
for c in self:
if result[c[0]] > result[c[1]]:
result[c[0]], result[c[1]] = result[c[1]], result[c[0]]
return result
def getMaxInput(self):
max = 0
for c in self:
if c[0] > max:
max= c[0]
if c[1] > max:
max = c[1]
return max
def svg(self):
scale = 1
xscale = scale * 35
yscale = scale * 20
innerResult = ''
x = xscale
usedInputs = []
for c in self:
if c[0] in usedInputs or c[1] in usedInputs:
x += xscale
del usedInputs[:]
for ui in usedInputs:
if (ui > c[0] and ui < c[1]) or (ui > c[1] and ui < c[0]):
x += xscale / 3
break
y0 = yscale + c[0] * yscale
y1 = yscale + c[1] * yscale
innerResult += "<circle cx='%s' cy='%s' r='%s' style='stroke:black;stroke-width:1;fill=yellow' />"%(x, y0, 3)
innerResult += "<line x1='%s' y1='%s' x2='%s' y2='%s' style='stroke:black;stroke-width:%s' />"%(x, y0, x, y1, 1)
innerResult += "<circle cx='%s' cy='%s' r='%s' style='stroke:black;stroke-width:1;fill=yellow' />"%(x, y1, 3)
usedInputs.append(c[0])
usedInputs.append(c[1])
w = x + xscale
n = self.getMaxInput() + 1
h = (n + 1) * yscale
result = "<?xml version='1.0' encoding='utf-8'?>"
result += "<!DOCTYPE svg>"
result += "<svg width='%spx' height='%spx' xmlns='http://www.w3.org/2000/svg'>"%(w, h)
for i in range(0, n):
y = yscale + i * yscale
result += "<line x1='%s' y1='%s' x2='%s' y2='%s' style='stroke:black;stroke-width:%s' />"%(0, y, w, y, 1)
result += innerResult
result += "</svg>"
return result
class SortingNetworkChecker:
def __init__(self, numberOfInputs):
self.numberOfInputs = numberOfInputs
self.sortedBinarySequences = []
self.maxSequenceToCheck = 2**numberOfInputs
for i in range(0, numberOfInputs + 1):
bits = "0" * i + "1" * (numberOfInputs - i)
self.sortedBinarySequences.append(int(bits, 2))
def isSortingNetwork(self, cn):
for i in range(1, self.maxSequenceToCheck):
if cn.sortBinarySequence(i) not in self.sortedBinarySequences:
return False
return True
def readComparisonNetwork(filename):
cn = ComparisonNetwork()
if filename:
with open(filename, 'r') as f:
for line in f:
cn += eval(line)
else:
for line in sys.stdin:
cn += eval(line)
return cn
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", metavar="inputfile", help="specify a file containing comparison network definition")
parser.add_argument("-o", "--output", metavar="outputfile", nargs='?', const='', help="specify a file for saving the comparison network definition")
parser.add_argument("-c", "--check", action="store_true", help="check whether it is a sorting network")
parser.add_argument("-s", "--sort", metavar="list", nargs='?', const='', help="sorts the list using the input comparison network")
parser.add_argument("--svg", metavar="outputfile", nargs='?', const='', help="generate SVG")
args = parser.parse_args()
if args.check:
cn = readComparisonNetwork(args.input)
checker = SortingNetworkChecker(cn.getMaxInput() + 1)
print checker.isSortingNetwork(cn)
if args.svg or args.svg == "":
cn = readComparisonNetwork(args.input)
if args.svg == "":
print cn.svg()
else:
with open(args.svg, "w") as f:
f.write(cn.svg())
if args.output or args.output == "":
cn = readComparisonNetwork(args.input)
if args.output == "":
print str(cn)
else:
with open(args.output, "w") as f:
f.write(str(cn))
if args.sort or args.sort == "":
cn = readComparisonNetwork(args.input)
if args.sort == "":
inputSequence = eval(sys.stdin.readline())
else:
inputSequence = eval(args.sort)
print cn.sortSequence(inputSequence)
if __name__ == "__main__":
main()
|
weissan/BlockQuicksort
|
LomutoBlockQuickSortGenerator/sortingnetwork.py
|
Python
|
gpl-3.0
| 5,530
|
[
"Brian"
] |
c92605fed7a6ab99e657770824ba32a30a520bf8e8bdf49c5acd0b2ccb28c4b1
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Test neurom._neuronfunc functionality'''
import tempfile
from nose import tools as nt
import os
import numpy as np
from neurom import fst, load_neuron, NeuriteType
from neurom.fst import _neuronfunc as _nf
from neurom.core import make_soma, Neurite, Section
from neurom.core import _soma
from neurom.core.dataformat import POINT_TYPE
from neurom.core.population import Population
from neurom.io.datawrapper import BlockNeuronBuilder
from utils import _close, _equal
_PWD = os.path.dirname(os.path.abspath(__file__))
H5_PATH = os.path.join(_PWD, '../../../test_data/h5/v1/')
NRN = load_neuron(os.path.join(H5_PATH, 'Neuron.h5'))
SWC_PATH = os.path.join(_PWD, '../../../test_data/swc')
SIMPLE = load_neuron(os.path.join(SWC_PATH, 'simple.swc'))
def test_soma_surface_area():
ret = _nf.soma_surface_area(SIMPLE)
nt.eq_(ret, 12.566370614359172)
def test_soma_surface_areas():
ret = _nf.soma_surface_areas(SIMPLE)
nt.eq_(ret, [12.566370614359172, ])
def test_soma_radii():
ret = _nf.soma_radii(SIMPLE)
nt.eq_(ret, [1., ])
def test_trunk_section_lengths():
ret = _nf.trunk_section_lengths(SIMPLE)
nt.eq_(ret, [5.0, 4.0])
def test_trunk_origin_radii():
ret = _nf.trunk_origin_radii(SIMPLE)
nt.eq_(ret, [1.0, 1.0])
def test_trunk_origin_azimuths():
ret = _nf.trunk_origin_azimuths(SIMPLE)
nt.eq_(ret, [0.0, 0.0])
def test_trunk_origin_elevations():
class Mock(object):
pass
n0 = Mock()
n1 = Mock()
s = make_soma([[0, 0, 0, 4]])
t0 = Section(((1, 0, 0, 2), (2, 1, 1, 2)))
t0.type = NeuriteType.basal_dendrite
t1 = Section(((0, 1, 0, 2), (1, 2, 1, 2)))
t1.type = NeuriteType.basal_dendrite
n0.neurites = [Neurite(t0), Neurite(t1)]
n0.soma = s
t2 = Section(((0, -1, 0, 2), (-1, -2, -1, 2)))
t2.type = NeuriteType.basal_dendrite
n1.neurites = [Neurite(t2)]
n1.soma = s
pop = Population([n0, n1])
nt.eq_(list(_nf.trunk_origin_elevations(pop)),
[0.0, np.pi/2., -np.pi/2.])
nt.eq_(
list(_nf.trunk_origin_elevations(pop, neurite_type=NeuriteType.basal_dendrite)),
[0.0, np.pi/2., -np.pi/2.])
nt.eq_(len(_nf.trunk_origin_elevations(pop, neurite_type=NeuriteType.axon)),
0)
nt.eq_(len(_nf.trunk_origin_elevations(pop, neurite_type=NeuriteType.apical_dendrite)),
0)
@nt.raises(Exception)
def test_trunk_elevation_zero_norm_vector_raises():
_nf.trunk_origin_elevations(NRN)
def test_sholl_crossings_simple():
center = SIMPLE.soma.center
radii = []
nt.eq_(list(_nf.sholl_crossings(SIMPLE, center, radii=radii)),
[])
radii = [1.0]
nt.eq_([2],
list(_nf.sholl_crossings(SIMPLE, center, radii=radii)))
radii = [1.0, 5.1]
nt.eq_([2, 4],
list(_nf.sholl_crossings(SIMPLE, center, radii=radii)))
radii = [1., 4., 5.]
nt.eq_([2, 4, 5],
list(_nf.sholl_crossings(SIMPLE, center, radii=radii)))
def load_swc(string):
with tempfile.NamedTemporaryFile(prefix='test_neuron_func', mode='w', suffix='.swc') as fd:
fd.write(string)
fd.flush()
return load_neuron(fd.name)
def test_sholl_analysis_custom():
#recreate morphs from Fig 2 of
#http://dx.doi.org/10.1016/j.jneumeth.2014.01.016
radii = np.arange(10, 81, 10)
center = 0, 0, 0
morph_A = load_swc('''\
1 1 0 0 0 1. -1
2 3 0 0 0 1. 1
3 3 80 0 0 1. 2
4 4 0 0 0 1. 1
5 4 -80 0 0 1. 4''')
nt.eq_(list(_nf.sholl_crossings(morph_A, center, radii=radii)),
[2, 2, 2, 2, 2, 2, 2, 2])
morph_B = load_swc('''\
1 1 0 0 0 1. -1
2 3 0 0 0 1. 1
3 3 35 0 0 1. 2
4 3 51 10 0 1. 3
5 3 51 5 0 1. 3
6 3 51 0 0 1. 3
7 3 51 -5 0 1. 3
8 3 51 -10 0 1. 3
9 4 -35 0 0 1. 2
10 4 -51 10 0 1. 9
11 4 -51 5 0 1. 9
12 4 -51 0 0 1. 9
13 4 -51 -5 0 1. 9
14 4 -51 -10 0 1. 9
''')
nt.eq_(list(_nf.sholl_crossings(morph_B, center, radii=radii)),
[2, 2, 2, 10, 10, 0, 0, 0])
morph_C = load_swc('''\
1 1 0 0 0 1. -1
2 3 0 0 0 1. 1
3 3 65 0 0 1. 2
4 3 85 10 0 1. 3
5 3 85 5 0 1. 3
6 3 85 0 0 1. 3
7 3 85 -5 0 1. 3
8 3 85 -10 0 1. 3
9 4 65 0 0 1. 2
10 4 85 10 0 1. 9
11 4 85 5 0 1. 9
12 4 85 0 0 1. 9
13 4 85 -5 0 1. 9
14 4 85 -10 0 1. 9
''')
nt.eq_(list(_nf.sholl_crossings(morph_C, center, radii=radii)),
[2, 2, 2, 2, 2, 2, 10, 10])
#view.neuron(morph_C)[0].savefig('foo.png')
|
juanchopanza/NeuroM
|
neurom/fst/tests/test_neuronfunc.py
|
Python
|
bsd-3-clause
| 6,265
|
[
"NEURON"
] |
3b5edf5ce0fe7fcc0f95229f4f939b63847ea0d76d16fbad8d8ad0c2e0928c10
|
# BOM_SOLAR_ASCII_TO_NETCDF.PY
#
# The input proprietary format supported by this example is a Bureau of
# Meteorology (BoM) ArcView ASCII grid file. The format contains a header
# (defines the shape and size of the grid), space-separated ASCII data, and
# tail metadata that contains internal BoM processing information.
#
# The ASCII data is converted to a NumPy array. The precision of values within
# the grid are reduced to 1 decimal place (for consistency between variables),
# and the missing value is redefined to -999.0.
#
# File-specific metadata (from the header, the tail, and from the processing in
# this script) is written to a JSON file. In a second-step, the JSON file is
# added to the NetCDF file by netcdf_json_wrapper.py. This two-step
# process allows manual editing or checking of the metadata if required.
#
# By default the following files are created:
# - [verbose filename].json
# - [verbose filename].nc
# The selection of outputs can be customised by parameters to
# bom_ascii_to_flt_nc().
import os
import numpy as np
from scipy import interpolate
import re
import hashlib
from datetime import timedelta
from datetime import datetime
from collections import OrderedDict
import netcdf_builder as nb
import json_handler as jh
import numpy_routines as nr
from solar_obstime_lookup import get_lookup_list
def split_bom_file(fname):
"""
Split a BoM ASCII grid file into its header, data and tail components
End-of-line characters and whitespace at either end of a line are
striped off. The head and tail components are returned as a list of
lines. The data component is returned as a list of lists, with each
inner list being a list of (string) values in a line.
The number of rows and columns in the grid are determined from the
head component as it is read ('ncols' and 'nrows', respectively).
A ValueError is raised if either the number of data elements in a line
is not equal to the expected number of columns, or if the number of
data lines is not equal to the expected number of rows.
"""
headmeta = []
datarows = []
tailmeta = []
ncols = -1
nrows = -1
isheader = 1
f = open(fname,'r')
for line in f:
line=line.strip()
if not line: continue
a = line.split()
# Assume header lines contain only 2 whitespace-split elements
if (len(a)==2) and isheader:
headmeta.append(line)
if a[0].lower()=='ncols': ncols=float(a[1])
elif a[0].lower()=='nrows': nrows=float(a[1])
# Else if the number of elements==ncols the line is probably data
elif len(a)==ncols:
isheader = 0
datarows.append(a)
# Else if we don't have the expected number of data rows there is
# an error
elif len(datarows)!=nrows:
raise ValueError("Line contains data but not of length "+ncols)
# Anything else is tail metadata
else:
tailmeta.append(line)
f.close()
return (headmeta,datarows,tailmeta)
def resample_data(datarows,metalist):
"""
Convert a list of lists of (string) values to a 2D NumPy array. Create
dimension vectors based on extent and cell-size information in 'metalist'.
Create target dimension vectors based on pre-defined extents.
Check the correctness of the given missing value (in metalist) against
the data. Replace all missing values with a standard missing value.
Copy data from the input array to an output array defined by the
target dimension vectors. The precision of values in the output array are
reduced to 1 decimal place (for consistency). Return the output array,
target dimension vectors and a dict that gives the extents of the target
dimension vectors and the new missing value.
"""
# Parse metalist into a dict
meta = {y[0].lower():float(y[1]) for y in [x.split() for x in metalist]}
miss = -999.0
# ASCII dimensions
input_lon = nr.create_vector(meta['xllcorner']+meta['cellsize']/2.0, \
meta['ncols'], \
meta['cellsize'])
input_lat = nr.create_vector(meta['yllcorner']+meta['cellsize']/2.0, \
meta['nrows'], \
meta['cellsize'])
input_lat = input_lat[::-1] # reverse elements
# Output dimensions - not remapping, so commented out
#(xs, xn, xc) = (112, 841, 0.05) # start, number, cellsize
#(ys, yn, yc) = (-44, 681, 0.05) # start, number, cellsize
#output_lon = nr.create_vector(xs, xn, xc)
#output_lat = nr.create_vector(ys, yn, yc)
#output_lat = output_lat[::-1] # reverse elements
# Copy datarows into a 2D array
input_data = np.array(datarows,dtype=np.float64)
meta['nodata_value'] = \
check_bom_missing(input_data[0,:], 99999.9, meta['nodata_value'])
if meta['nodata_value']!=miss:
input_data = nr.replace_values(input_data,meta['nodata_value'],miss)
print "Replaced missing data %s with %s" % (meta['nodata_value'],miss)
# Create output array
#output_data = np.zeros((output_lat.size,output_lon.size))+miss
# Copy data onto output grid
#output_data = nr.copy_grids(input_data,input_lon,input_lat,
# output_data,output_lon,output_lat)
# Reduce precision of values to 1 decimal place and convert to f32
#output_data = output_data.round(decimals=1)
#output_data = np.float32(output_data)
input_data = input_data.round(decimals=1)
input_data = np.float32(input_data)
input_dict = {'xmin':min(input_lon).round(decimals=3),
'xmax':max(input_lon).round(decimals=3),
'xstep':meta['cellsize'],
'xnum':meta['ncols'],
'xunits':'degrees_east',
'ymin':min(input_lat).round(decimals=3),
'ymax':max(input_lat).round(decimals=3),
'ystep':meta['cellsize'],
'ynum':meta['nrows'],
'yunits':'degrees_north',
'missing':miss}
return (input_data, input_lat, input_lon, input_dict)
def check_bom_missing(arr,test,default):
"""
Test for the possibility of a different missing value in 'arr'
rather than assuming that the given missing value is correct.
If occurence of 'test' is > 70% of size of 'arr' then return 'test'.
>>>check_missing(numpy.zeros(5)+99999.9, 99999.9, -99.99)
99999.9
>>>
If occurence of 'test' is <= 70% of size of 'arr' then return 'default'.
>>>check_missing(numpy.zeros(5), 99999.9, -99.99)
-99.99
>>>
"""
if np.sum(np.where(arr==test,1,0)) > arr.size*0.7:
return test
else:
return default
def create_obs_time(latvec,lonvec,d1):
"""
Create a data layer of the observation time for each pixel from a lookup
table. The lookup table defines the minutes after the start hour (d1) at
five degree increments of latitude from -10 to -44 N.
Linearly interpolate from the five degree increment values to the latitude
vector.
A 2D data layer the same size as latvec,lonvec is returned.
"""
# Choose lookup table based on date and hour
xlook,ylook = None,None
for obj in get_lookup_list():
if obj.is_applicable(d1):
xlook,ylook = obj.get_lookup()
break
if xlook is None:
raise ValueError("Date-Hour not found in solar_observation_time_lookup: "+d1.strftime("%Y-Ym-%dT%HZ"))
return None
# The lookup table values are nearly linear but not quite (R^2>=0.9944)
# So going to choose cubic spline interpolation
# http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html
# Want monotonicaly increasing xlook (latitudes).
xlook = xlook[::-1]
ylook = ylook[::-1]
tck = interpolate.splrep(xlook,ylook,s=0)
obsvec = interpolate.splev(latvec,tck,der=0)
# Replicate obsvec along width of lonvec
X,obsarr = np.meshgrid(lonvec,obsvec)
# Reduce precision to that given by the lookup table and convert to float
obsarr = obsarr.round(decimals=1)
obsarr = np.float32(obsarr)
return obsarr
def create_meta(d1,d2,datadict):
"""
Create an OrderedDict of global and variable metadata specific to this
file from various sources of information including the start and end dates,
the data extents dict and the history information.
"""
meta = OrderedDict()
# Make creation and modification dates
dmodify = datetime.utcnow().strftime("%Y%m%dT%H%M%S")
# Create multiline history text
history = """
{:s}: Reformatted to NetCDF
Solar data are written with 1 decimal precision although all the original data
values are integers.
Observation times were interpolated from the date-satellite-latitude lookup
table to the latitude vector with a cubic spline method. The interpolated
vector was then replicated across the grid for each longitude. Observation
times were written with 1 decimal precision for consistency with the lookup
table.
Solar and Observation time data have a consistent no-data value of {:.0f}.
""".format(dmodify,datadict['missing'])
# The date*, geospatial* and time* attributes come from the Attribute
# Convention for Dataset Discovery (ACDD). See,
# http://www.unidata.ucar.edu/software/netcdf/conventions.html
meta['history'] = history
meta['date_created'] = "unknown"
meta['date_modified'] = dmodify
meta['geospatial_lat_min'] = "{:.2f}".format(datadict['ymin'])
meta['geospatial_lat_max'] = "{:.2f}".format(datadict['ymax'])
meta['geospatial_lat_step'] = "{:.2f}".format(datadict['ystep'])
meta['geospatial_lat_units'] = datadict['yunits']
meta['geospatial_lon_min'] = "{:.2f}".format(datadict['xmin'])
meta['geospatial_lon_max'] = "{:.2f}".format(datadict['xmax'])
meta['geospatial_lon_step'] = "{:.2f}".format(datadict['xstep'])
meta['geospatial_lon_units'] = datadict['xunits']
meta['time_coverage_start'] = d1.strftime("%Y-%m-%dT%HZ")
meta['time_coverage_end'] = d2.strftime("%Y-%m-%dT%HZ")
meta['time_coverage_duration'] = "P1D"
meta['time_coverage_resolution'] = "PT1H"
meta['solar_dni:long_name'] = "Gridded hourly solar direct normal irradiance"
meta['solar_dni:standard_name'] = "surface_downwelling_shortwave_flux_in_air_due_to_direct_fraction"
meta['solar_dni:units'] = "W m-2"
meta['solar_dni:grid_mapping'] = 'crs'
meta['solar_ghi:long_name'] = "Gridded hourly solar global horizontal irradiance"
meta['solar_ghi:standard_name'] = "surface_downwelling_shortwave_flux_in_air"
meta['solar_ghi:units'] = "W m-2"
meta['solar_ghi:grid_mapping'] = 'crs'
meta['obs_time:long_name'] = 'Interpolated instantaneous pixel observation time relative to time dimension value'
meta['obs_time:standard_name'] = 'time'
meta['obs_time:units'] = 'minutes'
meta['obs_time:grid_mapping'] = 'crs'
meta['crs:grid_mapping_name'] = 'latitude_longitude'
meta['crs:long_name'] = 'WGS 1984 datum'
meta['crs:longitude_of_prime_meridian'] = str(0.0)
meta['crs:semi_major_axis'] = str(6378137.0)
meta['crs:inverse_flattening'] = str(298.257223563)
return meta
def bom_ascii_to_nc(year,dates,froot):
debug = False
latvec = None
lonvec = None
ncobj = None
latslice = slice(None,None,None)
lonslice = slice(None,None,None)
missed_dates = []
adict = {}
for dti,dt in enumerate(dates):
dni,ghi = get_solar_files(str(year),dt)
index = (slice(dti,None,None),latslice,lonslice)
if dni is not None:
# Split the input file into metadata and data components
dni_head,dni_rows,dni_history = split_bom_file(dni)
# Resample the data
dni_data,dni_lat,dni_lon,dni_dict = resample_data(dni_rows,dni_head)
else:
if debug: print "No dni data:",dt
if ghi is not None:
# Split the input file into metadata and data components
ghi_head,ghi_rows,ghi_history = split_bom_file(ghi)
# Resample the data
ghi_data,ghi_lat,ghi_lon,ghi_dict = resample_data(ghi_rows,ghi_head)
else:
if debug: print "No ghi data:",dt
# Skip initial dates until we get a valid file because we need lat,lon
if latvec is None and dni is not None:
if debug: print "Using dni_lat:",dt
latvec = dni_lat
lonvec = dni_lon
adict = dni_dict
miss = adict['missing']
if latvec is None and ghi is not None:
if debug: print "Using ghi_lat:",dt
latvec = ghi_lat
lonvec = ghi_lon
adict = ghi_dict
miss = adict['missing']
if latvec is None:
if debug: print "Save miss:",dt
missed_dates.append(dt)
continue
# Initialise the netcdf object
if ncobj is None:
if debug: print "Define ncobj:",dt
dni_var = 'solar_dni'
ghi_var = 'solar_ghi'
obs_var = 'obs_time'
ncobj = nb.nc_open(froot+'.nc','w',format='NETCDF4_CLASSIC')
nb.nc_set_timelatlon(ncobj,None,len(latvec),len(lonvec))
nb.nc_set_var(ncobj,dni_var,fill=miss,zlib=True)
nb.nc_set_var(ncobj,ghi_var,fill=miss,zlib=True)
nb.nc_set_var(ncobj,obs_var,fill=miss,zlib=True)
nb.nc_set_var(ncobj,'crs',dims=(),dtype="i4") # Grid mapping container
nb.nc_add_data(ncobj,'latitude',latvec)
nb.nc_add_data(ncobj,'longitude',lonvec)
dni_nc = ncobj.variables[dni_var]
ghi_nc = ncobj.variables[ghi_var]
obs_nc = ncobj.variables[obs_var]
# Add observation time layers for any missed dates
for mi,md in enumerate(missed_dates):
if debug: print "Add missed:",md
obs_data = create_obs_time(latvec,lonvec,md)
#nb.nc_add_data(ncobj,obs_var,obs_data,
# index=(slice(mi,None,None),latslice,lonslice))
obs_nc[mi,:,:] = obs_data
missed_dates = []
# Calculate the observation time layer
obs_data = create_obs_time(latvec,lonvec,dt)
obs_nc[dti,:,:] = obs_data
# Add data
if dni is not None:
#nb.nc_add_data(ncobj,dni_var,dni_data,index=index)
if debug: print "Add dni:",dni
dni_nc[dti,:,:] = dni_data
if ghi is not None:
if debug: print "Add ghi:",ghi
#nb.nc_add_data(ncobj,ghi_var,ghi_data,index=index)
ghi_nc[dti,:,:] = ghi_data
# Add time values
nb.nc_add_time(ncobj,dates)
# Create an ordered metadata dictionary
meta = create_meta(dates[0],dates[-1],adict)
# Output the file-specific metadata to a JSON file regardless of the
# requested output formats
jh.json_dump(meta, froot+'.json')
# Setting attributes here is optional in this example because it is
# known that netcdf_json_wrapper.py will be called next with
# 'froot+".json"' as one of the input files.
nb.nc_set_attributes(ncobj,meta)
if debug: print "Added attributes"
nb.nc_close(ncobj)
print "Wrote: "+froot+".nc"
return froot
def get_dates(year,month,day):
# There are no ascii files for 12-17UT.
# A month of files extend from 18UT the day before the first day of the
# month to 11 UT on the last day of the month.
# 18UT the day before
d1 = datetime(year,month,day,0) - timedelta(hours=6)
# 12UT the day
d2 = datetime(year,month,day,12)
dt = []
while d1 < d2:
if d1.hour <= 11 or d1.hour >= 18: dt.append(d1)
d1 = d1+timedelta(hours=1)
return dt
def get_solar_files(year,dt):
base = '/data/remotesensing/MTSAT-BoM/ascii-20121220'
dni_path = os.path.join(base, 'time_series_hourly_dni')
ghi_path = os.path.join(base, 'time_series_hourly_ghi')
dtstr = dt.strftime('%Y%m%d_%HUT')
dni = os.path.join(dni_path,year,'solar_dni_'+dtstr+'.txt')
ghi = os.path.join(ghi_path,year,'solar_ghi_'+dtstr+'.txt')
if not os.path.exists(dni): dni = None
if not os.path.exists(ghi): ghi = None
return dni,ghi
if __name__ == '__main__':
import sys
if len(sys.argv) < 4:
print "Usage:"
print " ", sys.argv[0], "year month day [out_path]"
exit()
else:
outpath = '.'
year,month,day = int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])
if len(sys.argv) >= 5:
outpath = sys.argv[4]
if not os.path.exists(outpath): os.makedirs(outpath)
if not os.path.exists(outpath):
exit("Could not create path", outpath)
outpath = re.sub('/$','',outpath)
dates = get_dates(year,month,day)
outroot = 'solar_dni_ghi_{:04d}{:02d}{:02d}'.format(year,month,day)
outroot = os.path.join(outpath,outroot)
bom_ascii_to_nc(year,dates,outroot)
|
KimberleyOpie/common-tools
|
raw_data_tools/bom_solar_ascii_to_netcdf_day.py
|
Python
|
apache-2.0
| 17,048
|
[
"NetCDF"
] |
24b94e689474b3cb4eae7afcbcf0b5510b723cbd557faa53c47588783036493f
|
#!/usr/bin/env python
# (C) Copyright IBM Corporation 2004
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
import license
import gl_XML, glX_XML
import sys, getopt
class PrintGenericStubs(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "gl_SPARC_asm.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright (C) 1999-2003 Brian Paul All Rights Reserved.
(C) Copyright IBM Corporation 2004""", "BRIAN PAUL, IBM")
def printRealHeader(self):
print '#ifdef __arch64__'
print '#define GL_OFF(N)\t((N) * 8)'
print '#define GL_LL\t\tldx'
print '#define GL_TIE_LD(SYM)\t%tie_ldx(SYM)'
print '#define GL_STACK_SIZE\t128'
print '#else'
print '#define GL_OFF(N)\t((N) * 4)'
print '#define GL_LL\t\tld'
print '#define GL_TIE_LD(SYM)\t%tie_ld(SYM)'
print '#define GL_STACK_SIZE\t64'
print '#endif'
print ''
print '#define GLOBL_FN(x) .globl x ; .type x, @function'
print '#define HIDDEN(x) .hidden x'
print ''
print '\t.register %g2, #scratch'
print '\t.register %g3, #scratch'
print ''
print '\t.text'
print ''
print '\tGLOBL_FN(__glapi_sparc_icache_flush)'
print '\tHIDDEN(__glapi_sparc_icache_flush)'
print '\t.type\t__glapi_sparc_icache_flush, @function'
print '__glapi_sparc_icache_flush: /* %o0 = insn_addr */'
print '\tflush\t%o0'
print '\tretl'
print '\t nop'
print ''
print '\t.align\t32'
print ''
print '\t.type\t__glapi_sparc_get_pc, @function'
print '__glapi_sparc_get_pc:'
print '\tretl'
print '\t add\t%o7, %g2, %g2'
print '\t.size\t__glapi_sparc_get_pc, .-__glapi_sparc_get_pc'
print ''
print '#ifdef GLX_USE_TLS'
print ''
print '\tGLOBL_FN(__glapi_sparc_get_dispatch)'
print '\tHIDDEN(__glapi_sparc_get_dispatch)'
print '__glapi_sparc_get_dispatch:'
print '\tmov\t%o7, %g1'
print '\tsethi\t%hi(_GLOBAL_OFFSET_TABLE_-4), %g2'
print '\tcall\t__glapi_sparc_get_pc'
print '\tadd\t%g2, %lo(_GLOBAL_OFFSET_TABLE_+4), %g2'
print '\tmov\t%g1, %o7'
print '\tsethi\t%tie_hi22(_glapi_tls_Dispatch), %g1'
print '\tadd\t%g1, %tie_lo10(_glapi_tls_Dispatch), %g1'
print '\tGL_LL\t[%g2 + %g1], %g2, GL_TIE_LD(_glapi_tls_Dispatch)'
print '\tretl'
print '\t mov\t%g2, %o0'
print ''
print '\t.data'
print '\t.align\t32'
print ''
print '\t/* --> sethi %hi(_glapi_tls_Dispatch), %g1 */'
print '\t/* --> or %g1, %lo(_glapi_tls_Dispatch), %g1 */'
print '\tGLOBL_FN(__glapi_sparc_tls_stub)'
print '\tHIDDEN(__glapi_sparc_tls_stub)'
print '__glapi_sparc_tls_stub: /* Call offset in %g3 */'
print '\tmov\t%o7, %g1'
print '\tsethi\t%hi(_GLOBAL_OFFSET_TABLE_-4), %g2'
print '\tcall\t__glapi_sparc_get_pc'
print '\tadd\t%g2, %lo(_GLOBAL_OFFSET_TABLE_+4), %g2'
print '\tmov\t%g1, %o7'
print '\tsrl\t%g3, 10, %g3'
print '\tsethi\t%tie_hi22(_glapi_tls_Dispatch), %g1'
print '\tadd\t%g1, %tie_lo10(_glapi_tls_Dispatch), %g1'
print '\tGL_LL\t[%g2 + %g1], %g2, GL_TIE_LD(_glapi_tls_Dispatch)'
print '\tGL_LL\t[%g7+%g2], %g1'
print '\tGL_LL\t[%g1 + %g3], %g1'
print '\tjmp\t%g1'
print '\t nop'
print '\t.size\t__glapi_sparc_tls_stub, .-__glapi_sparc_tls_stub'
print ''
print '#define GL_STUB(fn, off)\t\t\t\t\\'
print '\tGLOBL_FN(fn);\t\t\t\t\t\\'
print 'fn:\tba\t__glapi_sparc_tls_stub;\t\t\t\\'
print '\t sethi\tGL_OFF(off), %g3;\t\t\t\\'
print '\t.size\tfn,.-fn;'
print ''
print '#elif defined(HAVE_PTHREAD)'
print ''
print '\t/* 64-bit 0x00 --> sethi %hh(_glapi_Dispatch), %g1 */'
print '\t/* 64-bit 0x04 --> sethi %lm(_glapi_Dispatch), %g2 */'
print '\t/* 64-bit 0x08 --> or %g1, %hm(_glapi_Dispatch), %g1 */'
print '\t/* 64-bit 0x0c --> sllx %g1, 32, %g1 */'
print '\t/* 64-bit 0x10 --> add %g1, %g2, %g1 */'
print '\t/* 64-bit 0x14 --> ldx [%g1 + %lo(_glapi_Dispatch)], %g1 */'
print ''
print '\t/* 32-bit 0x00 --> sethi %hi(_glapi_Dispatch), %g1 */'
print '\t/* 32-bit 0x04 --> ld [%g1 + %lo(_glapi_Dispatch)], %g1 */'
print ''
print '\t.data'
print '\t.align\t32'
print ''
print '\tGLOBL_FN(__glapi_sparc_pthread_stub)'
print '\tHIDDEN(__glapi_sparc_pthread_stub)'
print '__glapi_sparc_pthread_stub: /* Call offset in %g3 */'
print '\tmov\t%o7, %g1'
print '\tsethi\t%hi(_GLOBAL_OFFSET_TABLE_-4), %g2'
print '\tcall\t__glapi_sparc_get_pc'
print '\t add\t%g2, %lo(_GLOBAL_OFFSET_TABLE_+4), %g2'
print '\tmov\t%g1, %o7'
print '\tsethi\t%hi(_glapi_Dispatch), %g1'
print '\tor\t%g1, %lo(_glapi_Dispatch), %g1'
print '\tsrl\t%g3, 10, %g3'
print '\tGL_LL\t[%g2+%g1], %g2'
print '\tGL_LL\t[%g2], %g1'
print '\tcmp\t%g1, 0'
print '\tbe\t2f'
print '\t nop'
print '1:\tGL_LL\t[%g1 + %g3], %g1'
print '\tjmp\t%g1'
print '\t nop'
print '2:\tsave\t%sp, GL_STACK_SIZE, %sp'
print '\tmov\t%g3, %l0'
print '\tcall\t_glapi_get_dispatch'
print '\t nop'
print '\tmov\t%o0, %g1'
print '\tmov\t%l0, %g3'
print '\tba\t1b'
print '\t restore %g0, %g0, %g0'
print '\t.size\t__glapi_sparc_pthread_stub, .-__glapi_sparc_pthread_stub'
print ''
print '#define GL_STUB(fn, off)\t\t\t\\'
print '\tGLOBL_FN(fn);\t\t\t\t\\'
print 'fn:\tba\t__glapi_sparc_pthread_stub;\t\\'
print '\t sethi\tGL_OFF(off), %g3;\t\t\\'
print '\t.size\tfn,.-fn;'
print ''
print '#else /* Non-threaded version. */'
print ''
print '\t.type __glapi_sparc_nothread_stub, @function'
print '__glapi_sparc_nothread_stub: /* Call offset in %g3 */'
print '\tmov\t%o7, %g1'
print '\tsethi\t%hi(_GLOBAL_OFFSET_TABLE_-4), %g2'
print '\tcall\t__glapi_sparc_get_pc'
print '\t add\t%g2, %lo(_GLOBAL_OFFSET_TABLE_+4), %g2'
print '\tmov\t%g1, %o7'
print '\tsrl\t%g3, 10, %g3'
print '\tsethi\t%hi(_glapi_Dispatch), %g1'
print '\tor\t%g1, %lo(_glapi_Dispatch), %g1'
print '\tGL_LL\t[%g2+%g1], %g2'
print '\tGL_LL\t[%g2], %g1'
print '\tGL_LL\t[%g1 + %g3], %g1'
print '\tjmp\t%g1'
print '\t nop'
print '\t.size\t__glapi_sparc_nothread_stub, .-__glapi_sparc_nothread_stub'
print ''
print '#define GL_STUB(fn, off)\t\t\t\\'
print '\tGLOBL_FN(fn);\t\t\t\t\\'
print 'fn:\tba\t__glapi_sparc_nothread_stub;\t\\'
print '\t sethi\tGL_OFF(off), %g3;\t\t\\'
print '\t.size\tfn,.-fn;'
print ''
print '#endif'
print ''
print '#define GL_STUB_ALIAS(fn, alias) \\'
print ' .globl fn; \\'
print ' .set fn, alias'
print ''
print '\t.text'
print '\t.align\t32'
print ''
print '\t.globl\tgl_dispatch_functions_start'
print '\tHIDDEN(gl_dispatch_functions_start)'
print 'gl_dispatch_functions_start:'
print ''
return
def printRealFooter(self):
print ''
print '\t.globl\tgl_dispatch_functions_end'
print '\tHIDDEN(gl_dispatch_functions_end)'
print 'gl_dispatch_functions_end:'
return
def printBody(self, api):
for f in api.functionIterateByOffset():
name = f.dispatch_name()
print '\tGL_STUB(gl%s, %d)' % (name, f.offset)
if not f.is_static_entry_point(f.name):
print '\tHIDDEN(gl%s)' % (name)
for f in api.functionIterateByOffset():
name = f.dispatch_name()
if f.is_static_entry_point(f.name):
for n in f.entry_points:
if n != f.name:
text = '\tGL_STUB_ALIAS(gl%s, gl%s)' % (n, f.name)
if f.has_different_protocol(n):
print '#ifndef GLX_INDIRECT_RENDERING'
print text
print '#endif'
else:
print text
return
def show_usage():
print "Usage: %s [-f input_file_name] [-m output_mode]" % sys.argv[0]
sys.exit(1)
if __name__ == '__main__':
file_name = "gl_API.xml"
mode = "generic"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "m:f:")
except Exception,e:
show_usage()
for (arg,val) in args:
if arg == '-m':
mode = val
elif arg == "-f":
file_name = val
if mode == "generic":
printer = PrintGenericStubs()
else:
print "ERROR: Invalid mode \"%s\" specified." % mode
show_usage()
api = gl_XML.parse_GL_API(file_name, glX_XML.glx_item_factory())
printer.Print(api)
|
guorendong/iridium-browser-ubuntu
|
third_party/mesa/src/src/mapi/glapi/gen/gl_SPARC_asm.py
|
Python
|
bsd-3-clause
| 9,011
|
[
"Brian"
] |
877050027a941467c97fa5c49defee20dcbdcc0543ffbccb3acc1d0e7855df21
|
"""Generated message classes for testing version v1.
Allows developers to run automated tests for their mobile applications on
Google infrastructure.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from protorpc import messages as _messages
from googlecloudapis.apitools.base.py import encoding
package = 'testing'
class AndroidDevice(_messages.Message):
"""A single Android device.
Fields:
androidModelId: The id of the Android device to be used. Use the
EnvironmentDiscoveryService to get supported options. Required
androidVersionId: The id of the Android OS version to be used. Use the
EnvironmentDiscoveryService to get supported options. Required
locale: The locale the test device used for testing. Use the
EnvironmentDiscoveryService to get supported options. Required
orientation: How the device is oriented during the test. Use the
EnvironmentDiscoveryService to get supported options. Required
"""
androidModelId = _messages.StringField(1)
androidVersionId = _messages.StringField(2)
locale = _messages.StringField(3)
orientation = _messages.StringField(4)
class AndroidDeviceCatalog(_messages.Message):
"""The currently supported Android devices.
Fields:
models: The set of supported Android device models. @OutputOnly
runtimeConfiguration: The set of supported runtime configurations.
@OutputOnly
versions: The set of supported Android OS versions. @OutputOnly
"""
models = _messages.MessageField('AndroidModel', 1, repeated=True)
runtimeConfiguration = _messages.MessageField('AndroidRuntimeConfiguration', 2)
versions = _messages.MessageField('AndroidVersion', 3, repeated=True)
class AndroidInstrumentationTest(_messages.Message):
"""A test of an Android application that can control an Android component
independently of its normal lifecycle. Android instrumentation tests run an
application APK and test APK inside the same process on a virtual or
physical AndroidDevice. They also specify a test runner class, such as
com.google.GoogleTestRunner, which can vary on the specific instrumentation
framework chosen. See
<http://developer.android.com/tools/testing/testing_android.html> for more
information on types of Android tests.
Fields:
appApk: The APK for the application under test. Required
appPackageId: The java package for the application under test. Optional,
default is determined by examining the application's manifest.
testApk: The APK containing the test code to be executed. Required
testPackageId: The java package for the test to be executed. Optional,
default is determined by examining the application's manifest.
testRunnerClass: The InstrumentationTestRunner class. Optional, default is
determined by examining the application's manifest.
testTargets: Each target must be fully qualified with the package name or
class name, in one of these formats: - "package package_name" - "class
package_name.class_name" - "class package_name.class_name#method_name"
If empty, all targets in the module will be run.
"""
appApk = _messages.MessageField('FileReference', 1)
appPackageId = _messages.StringField(2)
testApk = _messages.MessageField('FileReference', 3)
testPackageId = _messages.StringField(4)
testRunnerClass = _messages.StringField(5)
testTargets = _messages.StringField(6, repeated=True)
class AndroidMatrix(_messages.Message):
"""A set of Android device configuration permutations is defined by the the
cross-product of the given axes. Internally, the given AndroidMatrix will
be expanded into a set of AndroidDevices. Only supported permutations will
be instantiated. Invalid permutations (e.g., incompatible models/versions)
are ignored.
Fields:
androidModelIds: The ids of the set of Android device to be used. Use the
EnvironmentDiscoveryService to get supported options. Required
androidVersionIds: The ids of the set of Android OS version to be used.
Use the EnvironmentDiscoveryService to get supported options. Required
locales: The set of locales the test device will enable for testing. Use
the EnvironmentDiscoveryService to get supported options. Required
orientations: The set of orientations to test with. Use the
EnvironmentDiscoveryService to get supported options. Required
"""
androidModelIds = _messages.StringField(1, repeated=True)
androidVersionIds = _messages.StringField(2, repeated=True)
locales = _messages.StringField(3, repeated=True)
orientations = _messages.StringField(4, repeated=True)
class AndroidModel(_messages.Message):
"""A description of an Android device tests may be run on.
Enums:
FormValueValuesEnum: Whether this device is virtual or physical.
@OutputOnly
Fields:
brand: The company that this device is branded with. Example: "Google",
"Samsung" @OutputOnly
codename: The name of the industrial design. This corresponds to
android.os.Build.DEVICE @OutputOnly
form: Whether this device is virtual or physical. @OutputOnly
id: The unique opaque id for this model. Use this for invoking the
TestExecutionService. @OutputOnly
manufacturer: The manufacturer of this device. @OutputOnly
name: The human-readable marketing name for this device model. Examples:
"Nexus 5", "Galaxy S5" @OutputOnly
screenX: Screen size in the horizontal (X) dimension measured in pixels.
@OutputOnly
screenY: Screen size in the vertical (Y) dimension measured in pixels.
@OutputOnly
supportedVersionIds: The set of Android versions this device supports.
Note that not all of these are necessarily supported in physical
devices. @OutputOnly
tags: Tags for this dimension. Examples: "default", "preview",
"deprecated"
"""
class FormValueValuesEnum(_messages.Enum):
"""Whether this device is virtual or physical. @OutputOnly
Values:
DEVICE_FORM_UNSPECIFIED: Do not use. For proto versioning only.
VIRTUAL: A software stack that simulates the device
PHYSICAL: Actual hardware
"""
DEVICE_FORM_UNSPECIFIED = 0
VIRTUAL = 1
PHYSICAL = 2
brand = _messages.StringField(1)
codename = _messages.StringField(2)
form = _messages.EnumField('FormValueValuesEnum', 3)
id = _messages.StringField(4)
manufacturer = _messages.StringField(5)
name = _messages.StringField(6)
screenX = _messages.IntegerField(7, variant=_messages.Variant.INT32)
screenY = _messages.IntegerField(8, variant=_messages.Variant.INT32)
supportedVersionIds = _messages.StringField(9, repeated=True)
tags = _messages.StringField(10, repeated=True)
class AndroidMonkeyTest(_messages.Message):
"""A test of an Android application that uses the UI/Application Exerciser
Monkey from the Android SDK. (Not to be confused with the "monkeyrunner"
tool, which is also included in the SDK.) See
http://developer.android.com/tools/help/monkey.html for details.
Fields:
appApk: The APK for the application under test. Required
appPackageId: The java package for the application under test. Optional,
default is determined by examining the application's manifest.
eventCount: Number of random monkey events (e.g. clicks, touches) to
generate. Defaults to 2000.
eventDelay: Fixed delay between events. Defaults to 10ms.
randomSeed: Seed value for pseudo-random number generator. Note that,
although specifying a seed causes the monkey to generate the same
sequence of events, it does not guarantee that a particular outcome will
be reproducible across runs. Optional
"""
appApk = _messages.MessageField('FileReference', 1)
appPackageId = _messages.StringField(2)
eventCount = _messages.IntegerField(3, variant=_messages.Variant.INT32)
eventDelay = _messages.StringField(4)
randomSeed = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class AndroidRoboTest(_messages.Message):
"""A test of an android application that explores the application on a
virtual or physical Android Device, finding culprits and crashes as it goes.
Fields:
appApk: The APK for the application under test. Required
appInitialActivity: The initial activity that should be used to start the
app. Optional
appPackageId: The java package for the application under test. Optional,
default is determined by examining the application's manifest.
bootstrapApk: The APK used for bootstrapping (e.g., passing the login
screen). Optional
bootstrapPackageId: The java package for the bootstrap. Optional
bootstrapRunnerClass: The runner class for the bootstrap. Optional
maxDepth: The max depth of the traversal stack Robo can explore. Default
is 50. Optional
maxSteps: The max number of steps Robo can execute. Default is no limit.
Optional
randomizeSteps: Whether Robo follows a random order of steps on a given
activity state. Optional
"""
appApk = _messages.MessageField('FileReference', 1)
appInitialActivity = _messages.StringField(2)
appPackageId = _messages.StringField(3)
bootstrapApk = _messages.MessageField('FileReference', 4)
bootstrapPackageId = _messages.StringField(5)
bootstrapRunnerClass = _messages.StringField(6)
maxDepth = _messages.IntegerField(7, variant=_messages.Variant.INT32)
maxSteps = _messages.IntegerField(8, variant=_messages.Variant.INT32)
randomizeSteps = _messages.BooleanField(9)
class AndroidRuntimeConfiguration(_messages.Message):
"""Configuration that can be selected at the time a test is run.
Fields:
locales: The set of available locales. @OutputOnly
orientations: The set of available orientations. @OutputOnly
"""
locales = _messages.MessageField('Locale', 1, repeated=True)
orientations = _messages.MessageField('Orientation', 2, repeated=True)
class AndroidVersion(_messages.Message):
"""A version of the Android OS
Fields:
apiLevel: The API level for this Android version. Examples: 18, 19
@OutputOnly
codeName: The code name for this Android version. Examples: "JellyBean",
"KitKat" @OutputOnly
distribution: Market share for this version. @OutputOnly
id: An opaque id for this Android version. Use this id to invoke the
TestExecutionService. @OutputOnly
releaseDate: The date this Android version became available in the market.
@OutputOnly
tags: Tags for this dimension. Examples: "default", "preview",
"deprecated"
versionString: A string representing this version of the Android OS.
Examples: "4.3", "4.4" @OutputOnly
"""
apiLevel = _messages.IntegerField(1, variant=_messages.Variant.INT32)
codeName = _messages.StringField(2)
distribution = _messages.MessageField('Distribution', 3)
id = _messages.StringField(4)
releaseDate = _messages.MessageField('Date', 5)
tags = _messages.StringField(6, repeated=True)
versionString = _messages.StringField(7)
class BlobstoreFile(_messages.Message):
blobId = _messages.StringField(1)
md5Hash = _messages.StringField(2)
class Browser(_messages.Message):
"""An available browser.
Fields:
androidCatalog: The catalog of Android devices for which we offer this
browser. @OutputOnly
id: A human readable id for this Browser version. Use this id to invoke
the TestExecutionService. Examples: "chrome-stable-channel", "firefox-
beta-channel" @OutputOnly
linuxCatalog: The catalog of Linux machines which we offer this browser.
@OutputOnly
name: A string representing the browser name. Examples: "chrome",
"firefox", "ie" @OutputOnly
release: The release of the browser. Examples: "stable-channel", "beta-
channel", "10" (for ie), etc @OutputOnly
versionString: A string representing the version of the browser. Examples:
"42.12.34.1234", "37.01", "10.0.9200.16384" (for ie) @OutputOnly
windowsCatalog: The catalog of Windows machines which we offer this
browser. @OutputOnly
"""
androidCatalog = _messages.MessageField('AndroidDeviceCatalog', 1)
id = _messages.StringField(2)
linuxCatalog = _messages.MessageField('LinuxMachineCatalog', 3)
name = _messages.StringField(4)
release = _messages.StringField(5)
versionString = _messages.StringField(6)
windowsCatalog = _messages.MessageField('WindowsMachineCatalog', 7)
class CancelTestMatrixResponse(_messages.Message):
"""Response containing the state of a cancelled test matrix.
Enums:
TestStateValueValuesEnum: The rolled-up state of the test matrix just
before it was cancelled.
Fields:
testState: The rolled-up state of the test matrix just before it was
cancelled.
"""
class TestStateValueValuesEnum(_messages.Enum):
"""The rolled-up state of the test matrix just before it was cancelled.
Values:
TEST_STATE_UNSPECIFIED: Do not use. For proto versioning only.
VALIDATING: The execution or matrix is being validated.
PENDING: The execution or matrix is waiting for resources to become
available.
RUNNING: The execution or matrix is currently being processed.
FINISHED: The execution or matrix has terminated normally. On a matrix
this means that the matrix level processing completed normally, but
individual executions may be in an ERROR state.
ERROR: The execution or matrix has stopped because it encountered an
infrastructure failure.
UNSUPPORTED_ENVIRONMENT: The execution was not run because it
corresponds to a unsupported environment. Can only be set on an
execution.
INCOMPATIBLE_ENVIRONMENT: The execution was not run because the provided
inputs are incompatible with the requested environment. Example:
requested AndroidVersion is lower than APK's minSdkVersion Can only
be set on an execution.
INCOMPATIBLE_ARCHITECTURE: The execution was not run because the
provided inputs are incompatible with the requested architecture.
Example: requested device does not support running the native code in
the supplied APK Can only be set on an execution.
CANCELLED: The user cancelled the execution or matrix.
INVALID: The execution or matrix was not run because the provided inputs
are not valid. Examples: input file is not of the expected type, is
malformed/corrupt, or was flagged as malware
"""
TEST_STATE_UNSPECIFIED = 0
VALIDATING = 1
PENDING = 2
RUNNING = 3
FINISHED = 4
ERROR = 5
UNSUPPORTED_ENVIRONMENT = 6
INCOMPATIBLE_ENVIRONMENT = 7
INCOMPATIBLE_ARCHITECTURE = 8
CANCELLED = 9
INVALID = 10
testState = _messages.EnumField('TestStateValueValuesEnum', 1)
class ClientInfo(_messages.Message):
"""Information about the client which invoked the test.
Fields:
name: Client name, such as gcloud.
"""
name = _messages.StringField(1)
class ConnectionInfo(_messages.Message):
"""Information needed to connect to services running on the virtual device.
All of the fields in this message are provided by the backend. Absence of a
field means that the corresponding service is not provided.
Fields:
adbPort: Port for ADB (e.g. 5555) NOT user-specified
ipAddress: IP address of the device. NOT user-specified
sshPort: Port for SSH (e.g. 22) NOT user-specified
vncPassword: Password for VNC NOT user-specified
vncPort: Port for VNC (e.g. 6444) NOT user-specified
"""
adbPort = _messages.IntegerField(1, variant=_messages.Variant.INT32)
ipAddress = _messages.StringField(2)
sshPort = _messages.IntegerField(3, variant=_messages.Variant.INT32)
vncPassword = _messages.StringField(4)
vncPort = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class Date(_messages.Message):
"""Represents a whole calendar date, e.g. date of birth. The time of day and
time zone are either specified elsewhere or are not significant. The date is
relative to the Proleptic Gregorian Calendar. The day may be 0 to represent
a year and month where the day is not significant, e.g. credit card
expiration date. The year may be 0 to represent a month and day independent
of year, e.g. anniversary date. Related types are [google.type.TimeOfDay][]
and [google.protobuf.Timestamp][google.protobuf.Timestamp].
Fields:
day: Day of month. Must be from 1 to 31 and valid for the year and month,
or 0 if specifying a year/month where the day is not sigificant.
month: Month of year of date. Must be from 1 to 12.
year: Year of date. Must be from 1 to 9,999, or 0 if specifying a date
without a year.
"""
day = _messages.IntegerField(1, variant=_messages.Variant.INT32)
month = _messages.IntegerField(2, variant=_messages.Variant.INT32)
year = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class Device(_messages.Message):
"""A GCE virtual Android device instance.
Enums:
StateValueValuesEnum: State of the device. NOT user-specified
Fields:
androidDevice: The Android device configuration. user-specified
creationTime: The time this device was initially created. NOT user-
specified
deviceDetails: Information about the backing GCE instance and connection.
NOT user-specified
id: Unique id set by the backend. NOT user-specified
state: State of the device. NOT user-specified
stateDetails: Details about the state of the device. NOT user-specified
"""
class StateValueValuesEnum(_messages.Enum):
"""State of the device. NOT user-specified
Values:
DEVICE_UNSPECIFIED: Do not use. For proto versioning only.
PREPARING: The device is in the process of spinning up.
READY: The device is created and ready to use.
CLOSED: The device has been closed.
DEVICE_ERROR: There has been an error.
"""
DEVICE_UNSPECIFIED = 0
PREPARING = 1
READY = 2
CLOSED = 3
DEVICE_ERROR = 4
androidDevice = _messages.MessageField('AndroidDevice', 1)
creationTime = _messages.StringField(2)
deviceDetails = _messages.MessageField('DeviceDetails', 3)
id = _messages.StringField(4)
state = _messages.EnumField('StateValueValuesEnum', 5)
stateDetails = _messages.MessageField('DeviceStateDetails', 6)
class DeviceDetails(_messages.Message):
"""Details about the GCE instance and connection.
Fields:
connectionInfo: Details about the connection to the device.
gceInstanceDetails: Details about the GCE instance backing the device.
"""
connectionInfo = _messages.MessageField('ConnectionInfo', 1)
gceInstanceDetails = _messages.MessageField('GceInstanceDetails', 2)
class DeviceStateDetails(_messages.Message):
"""Additional details about the status of the device.
Fields:
errorDetails: If the DeviceState is ERROR, then this string may contain
human-readable details about the error.
progressDetails: A human-readable, detailed description of the device's
status. For example: "Starting Device\n Device Ready". During the
device's lifespan data may be appended to the progress.
"""
errorDetails = _messages.StringField(1)
progressDetails = _messages.StringField(2)
class Distribution(_messages.Message):
"""Data about the relative number of devices running a given configuration
of the Android platform.
Fields:
marketShare: The estimated fraction (0-1) of the total market with this
configuration. @OutputOnly
measurementTime: The time this distribution was measured. @OutputOnly
"""
marketShare = _messages.FloatField(1)
measurementTime = _messages.StringField(2)
class Empty(_messages.Message):
"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class Environment(_messages.Message):
"""The environment in which the test is run.
Fields:
androidDevice: An Android device which must be used with an Android test.
"""
androidDevice = _messages.MessageField('AndroidDevice', 1)
class EnvironmentMatrix(_messages.Message):
"""The matrix of environments in which the test is to be executed.
Fields:
androidMatrix: A matrix of Android devices
"""
androidMatrix = _messages.MessageField('AndroidMatrix', 1)
class FileReference(_messages.Message):
"""A reference to a file, used for user inputs.
Fields:
blob: A blob in Blobstore.
gcsPath: A path to a file in Google Cloud Storage. Example: gs://build-
app-1414623860166/app-debug-unaligned.apk
"""
blob = _messages.MessageField('BlobstoreFile', 1)
gcsPath = _messages.StringField(2)
class GceInstanceDetails(_messages.Message):
"""This information is provided for the user to look up additional details
of the backing GCE instance. It is assumed the user does not modify this
instance. If so, then the device service makes no guarantees about device
functionality.
Fields:
name: Desired instance name of the device. May be user-specified. If not,
the backend will choose a name.
projectId: The GCE project that contains the instance backing this device.
If user-specified, must be the same as the project_id in the
CreateDeviceRequest.
zone: Desired GCE zone for the device user-specified
"""
name = _messages.StringField(1)
projectId = _messages.StringField(2)
zone = _messages.StringField(3)
class GoogleCloudStorage(_messages.Message):
"""A storage location within Google cloud storage (GCS).
Fields:
gcsPath: The path to a directory in GCS that will eventually contain the
results for this test. The requesting user must have write access on the
bucket in the supplied path.
"""
gcsPath = _messages.StringField(1)
class LinuxMachineCatalog(_messages.Message):
"""The currently supported Linux machines.
Fields:
versions: The set of supported Linux versions. @OutputOnly
"""
versions = _messages.MessageField('LinuxVersion', 1, repeated=True)
class LinuxVersion(_messages.Message):
"""A verison of a Linux OS.
Fields:
id: A human readable id for this Linux version. Examples:
"debian-7-wheezy" @OutputOnly
versionString: A string representing this version of the Linux OS.
Examples: "debian-7-wheezy-v20150325" @OutputOnly
"""
id = _messages.StringField(1)
versionString = _messages.StringField(2)
class ListDevicesResponse(_messages.Message):
"""Response containing a list of devices. Supports pagination.
Fields:
devices: The GCE virtual Android devices to be returned.
nextPageToken: The pagination token to retrieve the next page of device
results.
"""
devices = _messages.MessageField('Device', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListTestMatricesResponse(_messages.Message):
"""Response contain a list of Test Matrices.
Fields:
testMatrices: The set of test matrices.
"""
testMatrices = _messages.MessageField('TestMatrix', 1, repeated=True)
class Locale(_messages.Message):
"""A location/region designation for language.
Fields:
id: The id for this locale. Example: "en_US" @OutputOnly
name: A human-friendly name for this language/locale. Example: "English"
@OutputOnly
region: A human-friendy string representing the region for this locale.
Example: "United States" Not present for every locale. @OutputOnly
tags: Tags for this dimension. Examples: "default"
"""
id = _messages.StringField(1)
name = _messages.StringField(2)
region = _messages.StringField(3)
tags = _messages.StringField(4, repeated=True)
class Orientation(_messages.Message):
"""Screen orientation of the device.
Fields:
id: The id for this orientation. Example: "portrait" @OutputOnly
name: A human-friendly name for this orientation. Example: "portrait"
@OutputOnly
tags: Tags for this dimension. Examples: "default"
"""
id = _messages.StringField(1)
name = _messages.StringField(2)
tags = _messages.StringField(3, repeated=True)
class ResultStorage(_messages.Message):
"""Locations where the results of running the test are stored.
Fields:
googleCloudStorage: Required.
toolResultsExecution: The tool results execution that results are written
to. @OutputOnly
toolResultsHistory: The tool results history that contains the tool
results execution that results are written to. If not provided the
service will choose an appropriate value.
"""
googleCloudStorage = _messages.MessageField('GoogleCloudStorage', 1)
toolResultsExecution = _messages.MessageField('ToolResultsExecution', 2)
toolResultsHistory = _messages.MessageField('ToolResultsHistory', 3)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" or "email:<ldap>" to
include in api requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
class TestDetails(_messages.Message):
"""Additional details about the progress of the running test.
Fields:
errorMessage: If the TestState is ERROR, then this string will contain
human-readable details about the error. @OutputOnly
progressMessages: Human-readable, detailed descriptions of the test's
progress. For example: "Provisioning a device", "Starting Test". During
the course of execution new data may be appended to the end of
progress_messages. @OutputOnly
"""
errorMessage = _messages.StringField(1)
progressMessages = _messages.StringField(2, repeated=True)
class TestEnvironmentCatalog(_messages.Message):
"""A description of a test environment.
Fields:
androidDeviceCatalog: Android devices suitable for running Android
Instrumentation Tests.
webDriverCatalog: WebDriver environments suitable for running web tests.
"""
androidDeviceCatalog = _messages.MessageField('AndroidDeviceCatalog', 1)
webDriverCatalog = _messages.MessageField('WebDriverCatalog', 2)
class TestExecution(_messages.Message):
"""Specifies a single test to be executed in a single environment.
Enums:
StateValueValuesEnum: Indicates the current progress of the test execution
(e.g., FINISHED). @OutputOnly
Fields:
environment: How the host machine(s) are configured. @OutputOnly
id: Unique id set by the backend. @OutputOnly
matrixId: Id of the containing TestMatrix. @OutputOnly
projectId: The cloud project that owns the test execution. @OutputOnly
state: Indicates the current progress of the test execution (e.g.,
FINISHED). @OutputOnly
testDetails: Additional details about the running test. @OutputOnly
testSpecification: How to run the test. @OutputOnly
timestamp: The time this test execution was initially created. @OutputOnly
toolResultsStep: Where the results for this execution are written.
@OutputOnly
"""
class StateValueValuesEnum(_messages.Enum):
"""Indicates the current progress of the test execution (e.g., FINISHED).
@OutputOnly
Values:
TEST_STATE_UNSPECIFIED: Do not use. For proto versioning only.
VALIDATING: The execution or matrix is being validated.
PENDING: The execution or matrix is waiting for resources to become
available.
RUNNING: The execution or matrix is currently being processed.
FINISHED: The execution or matrix has terminated normally. On a matrix
this means that the matrix level processing completed normally, but
individual executions may be in an ERROR state.
ERROR: The execution or matrix has stopped because it encountered an
infrastructure failure.
UNSUPPORTED_ENVIRONMENT: The execution was not run because it
corresponds to a unsupported environment. Can only be set on an
execution.
INCOMPATIBLE_ENVIRONMENT: The execution was not run because the provided
inputs are incompatible with the requested environment. Example:
requested AndroidVersion is lower than APK's minSdkVersion Can only
be set on an execution.
INCOMPATIBLE_ARCHITECTURE: The execution was not run because the
provided inputs are incompatible with the requested architecture.
Example: requested device does not support running the native code in
the supplied APK Can only be set on an execution.
CANCELLED: The user cancelled the execution or matrix.
INVALID: The execution or matrix was not run because the provided inputs
are not valid. Examples: input file is not of the expected type, is
malformed/corrupt, or was flagged as malware
"""
TEST_STATE_UNSPECIFIED = 0
VALIDATING = 1
PENDING = 2
RUNNING = 3
FINISHED = 4
ERROR = 5
UNSUPPORTED_ENVIRONMENT = 6
INCOMPATIBLE_ENVIRONMENT = 7
INCOMPATIBLE_ARCHITECTURE = 8
CANCELLED = 9
INVALID = 10
environment = _messages.MessageField('Environment', 1)
id = _messages.StringField(2)
matrixId = _messages.StringField(3)
projectId = _messages.StringField(4)
state = _messages.EnumField('StateValueValuesEnum', 5)
testDetails = _messages.MessageField('TestDetails', 6)
testSpecification = _messages.MessageField('TestSpecification', 7)
timestamp = _messages.StringField(8)
toolResultsStep = _messages.MessageField('ToolResultsStep', 9)
class TestMatrix(_messages.Message):
"""A group of one or more TestExecutions, built by taking a product of
values over a pre-defined set of axes.
Enums:
StateValueValuesEnum: Indicates the current progress of the test matrix
(e.g., FINISHED) @OutputOnly
Fields:
clientInfo: Information about the client which invoked the test.
environmentMatrix: How the host machine(s) are configured. Required
projectId: The cloud project that owns the test matrix. @OutputOnly
resultStorage: Where the results for the matrix are written. Required
state: Indicates the current progress of the test matrix (e.g., FINISHED)
@OutputOnly
testExecutions: The list of test executions that the service creates for
this matrix. @OutputOnly
testMatrixId: Unique id set by the service. @OutputOnly
testSpecification: How to run the test. Required
timestamp: The time this test matrix was initially created. @OutputOnly
"""
class StateValueValuesEnum(_messages.Enum):
"""Indicates the current progress of the test matrix (e.g., FINISHED)
@OutputOnly
Values:
TEST_STATE_UNSPECIFIED: Do not use. For proto versioning only.
VALIDATING: The execution or matrix is being validated.
PENDING: The execution or matrix is waiting for resources to become
available.
RUNNING: The execution or matrix is currently being processed.
FINISHED: The execution or matrix has terminated normally. On a matrix
this means that the matrix level processing completed normally, but
individual executions may be in an ERROR state.
ERROR: The execution or matrix has stopped because it encountered an
infrastructure failure.
UNSUPPORTED_ENVIRONMENT: The execution was not run because it
corresponds to a unsupported environment. Can only be set on an
execution.
INCOMPATIBLE_ENVIRONMENT: The execution was not run because the provided
inputs are incompatible with the requested environment. Example:
requested AndroidVersion is lower than APK's minSdkVersion Can only
be set on an execution.
INCOMPATIBLE_ARCHITECTURE: The execution was not run because the
provided inputs are incompatible with the requested architecture.
Example: requested device does not support running the native code in
the supplied APK Can only be set on an execution.
CANCELLED: The user cancelled the execution or matrix.
INVALID: The execution or matrix was not run because the provided inputs
are not valid. Examples: input file is not of the expected type, is
malformed/corrupt, or was flagged as malware
"""
TEST_STATE_UNSPECIFIED = 0
VALIDATING = 1
PENDING = 2
RUNNING = 3
FINISHED = 4
ERROR = 5
UNSUPPORTED_ENVIRONMENT = 6
INCOMPATIBLE_ENVIRONMENT = 7
INCOMPATIBLE_ARCHITECTURE = 8
CANCELLED = 9
INVALID = 10
clientInfo = _messages.MessageField('ClientInfo', 1)
environmentMatrix = _messages.MessageField('EnvironmentMatrix', 2)
projectId = _messages.StringField(3)
resultStorage = _messages.MessageField('ResultStorage', 4)
state = _messages.EnumField('StateValueValuesEnum', 5)
testExecutions = _messages.MessageField('TestExecution', 6, repeated=True)
testMatrixId = _messages.StringField(7)
testSpecification = _messages.MessageField('TestSpecification', 8)
timestamp = _messages.StringField(9)
class TestSpecification(_messages.Message):
"""A description of how to run the test.
Fields:
androidInstrumentationTest: An Android instrumentation test.
androidMonkeyTest: An Android monkey test.
androidRoboTest: An Android robo test.
testTimeout: Max time a test execution is allowed to run before it is
automatically cancelled.
"""
androidInstrumentationTest = _messages.MessageField('AndroidInstrumentationTest', 1)
androidMonkeyTest = _messages.MessageField('AndroidMonkeyTest', 2)
androidRoboTest = _messages.MessageField('AndroidRoboTest', 3)
testTimeout = _messages.StringField(4)
class TestingProjectsDevicesCreateRequest(_messages.Message):
"""A TestingProjectsDevicesCreateRequest object.
Fields:
device: A Device resource to be passed as the request body.
projectId: The GCE project under which to create the device.
sshPublicKey: The public key to be set on the device in order to SSH into
it.
"""
device = _messages.MessageField('Device', 1)
projectId = _messages.StringField(2, required=True)
sshPublicKey = _messages.StringField(3)
class TestingProjectsDevicesDeleteRequest(_messages.Message):
"""A TestingProjectsDevicesDeleteRequest object.
Fields:
deviceId: The GCE virtual Android device to be deleted.
projectId: The GCE project that contains the device to be deleted.
"""
deviceId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class TestingProjectsDevicesGetRequest(_messages.Message):
"""A TestingProjectsDevicesGetRequest object.
Fields:
deviceId: The id of the GCE Android virtual device.
projectId: The GCE project that contains this device instance.
"""
deviceId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class TestingProjectsDevicesKeepaliveRequest(_messages.Message):
"""A TestingProjectsDevicesKeepaliveRequest object.
Fields:
deviceId: The GCE virtual Android device to be issued the keep-alive.
projectId: The GCE project that contains the device to be issued the keep-
alive.
"""
deviceId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class TestingProjectsDevicesListRequest(_messages.Message):
"""A TestingProjectsDevicesListRequest object.
Fields:
pageSize: Used to specify the max number of device results to be returned.
pageToken: Used to request a specific page of the device results list.
projectId: The GCE project to list the devices from.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
class TestingProjectsTestMatricesCancelRequest(_messages.Message):
"""A TestingProjectsTestMatricesCancelRequest object.
Fields:
projectId: Cloud project that owns the test.
testMatrixId: Test matrix that will be canceled.
"""
projectId = _messages.StringField(1, required=True)
testMatrixId = _messages.StringField(2, required=True)
class TestingProjectsTestMatricesCreateRequest(_messages.Message):
"""A TestingProjectsTestMatricesCreateRequest object.
Fields:
projectId: The GCE project under which this job will run.
testMatrix: A TestMatrix resource to be passed as the request body.
"""
projectId = _messages.StringField(1, required=True)
testMatrix = _messages.MessageField('TestMatrix', 2)
class TestingProjectsTestMatricesDeleteRequest(_messages.Message):
"""A TestingProjectsTestMatricesDeleteRequest object.
Fields:
projectId: Cloud project that owns the test.
testMatrixId: Test matrix that will be canceled.
"""
projectId = _messages.StringField(1, required=True)
testMatrixId = _messages.StringField(2, required=True)
class TestingProjectsTestMatricesGetRequest(_messages.Message):
"""A TestingProjectsTestMatricesGetRequest object.
Fields:
projectId: Cloud project that owns the test matrix.
testMatrixId: Unique test matrix id which was assigned by the service.
"""
projectId = _messages.StringField(1, required=True)
testMatrixId = _messages.StringField(2, required=True)
class TestingProjectsTestMatricesListRequest(_messages.Message):
"""A TestingProjectsTestMatricesListRequest object.
Fields:
projectId: Cloud project that owns the tests.
"""
projectId = _messages.StringField(1, required=True)
class TestingTestEnvironmentCatalogGetRequest(_messages.Message):
"""A TestingTestEnvironmentCatalogGetRequest object.
Enums:
EnvironmentTypeValueValuesEnum: The type of environment that should be
listed.
Fields:
environmentType: The type of environment that should be listed.
"""
class EnvironmentTypeValueValuesEnum(_messages.Enum):
"""The type of environment that should be listed.
Values:
ENVIRONMENT_TYPE_UNSPECIFIED: <no description>
ANDROID: <no description>
WEBDRIVER: <no description>
"""
ENVIRONMENT_TYPE_UNSPECIFIED = 0
ANDROID = 1
WEBDRIVER = 2
environmentType = _messages.EnumField('EnvironmentTypeValueValuesEnum', 1, required=True)
class ToolResultsExecution(_messages.Message):
"""Represents a tool results execution resource. This has the results of a
TestMatrix.
Fields:
executionId: A tool results execution ID. @OutputOnly
historyId: A tool results history ID. @OutputOnly
projectId: The cloud project that owns the tool results execution.
@OutputOnly
"""
executionId = _messages.StringField(1)
historyId = _messages.StringField(2)
projectId = _messages.StringField(3)
class ToolResultsHistory(_messages.Message):
"""Represents a tool results history resource.
Fields:
historyId: A tool results history ID.
projectId: The cloud project that owns the tool results history.
"""
historyId = _messages.StringField(1)
projectId = _messages.StringField(2)
class ToolResultsStep(_messages.Message):
"""Represents a tool results step resource. This has the results of a
TestExecution.
Fields:
executionId: A tool results execution ID. @OutputOnly
historyId: A tool results history ID. @OutputOnly
projectId: The cloud project that owns the tool results step. @OutputOnly
stepId: A tool results step ID. @OutputOnly
"""
executionId = _messages.StringField(1)
historyId = _messages.StringField(2)
projectId = _messages.StringField(3)
stepId = _messages.StringField(4)
class WebDriverCatalog(_messages.Message):
"""The currently supported WebDriver VM resources.
Fields:
browsers: The set of supported browsers. @OutputOnly
"""
browsers = _messages.MessageField('Browser', 1, repeated=True)
class WindowsMachineCatalog(_messages.Message):
"""The currently supported Windows machines.
Fields:
versions: The set of supported Windows versions. @OutputOnly
"""
versions = _messages.MessageField('WindowsVersion', 1, repeated=True)
class WindowsVersion(_messages.Message):
"""A version of a Windows OS.
Fields:
id: A human readable id for this Windows version. Examples: "windows-
server" @OutputOnly
versionString: A string representing this version of the Windows OS.
Examples: "windows-server-2008-r2-dc-v20150331" @OutputOnly
"""
id = _messages.StringField(1)
versionString = _messages.StringField(2)
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1',
package=u'testing')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2',
package=u'testing')
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv',
package=u'testing')
|
wemanuel/smry
|
smry/server-auth/ls/google-cloud-sdk/lib/googlecloudapis/testing/v1/testing_v1_messages.py
|
Python
|
apache-2.0
| 43,063
|
[
"Galaxy"
] |
898b69899f6fa1e8d2875b30e78149f8fa41ee6ad5093321e4516c987afd4229
|
import numpy as np
import pytest
import mbuild as mb
from mbuild.formats.par_writer import write_par
from mbuild.tests.base_test import BaseTest
from mbuild.utils.io import get_fn, has_foyer
@pytest.mark.skipif(not has_foyer, reason="Foyer package not installed")
class TestPar(BaseTest):
@pytest.mark.skipif(not has_foyer, reason="Foyer package not installed")
def test_save_charmm(self):
cmpd = mb.load(get_fn("charmm_dihedral.mol2"))
for i in cmpd.particles():
i.name = "_{}".format(i.name)
structure = cmpd.to_parmed(
box=cmpd.get_boundingbox(),
residues=set([p.parent.name for p in cmpd.particles()]),
)
from foyer import Forcefield
ff = Forcefield(forcefield_files=[get_fn("charmm_truncated.xml")])
structure = ff.apply(structure, assert_dihedral_params=False)
write_par(structure, "charmm_dihedral.par")
@pytest.mark.skipif(not has_foyer, reason="Foyer package not installed")
def test_save_forcefield(self, ethane):
ethane.save(filename="ethane-opls.par", forcefield_name="oplsaa")
def test_par_parameters(self, ethane):
ethane.save(filename="ethane-opls.par", forcefield_name="oplsaa")
from parmed.charmm import CharmmParameterSet
pset = CharmmParameterSet.load_set(pfile="ethane-opls.par")
assert len(pset.bond_types) == 3
assert len(pset.angle_types) == 3
assert len(pset.atom_types) == 2
|
iModels/mbuild
|
mbuild/tests/test_par.py
|
Python
|
mit
| 1,485
|
[
"CHARMM"
] |
0b317c675b5c5a1c4a11dec8fea3d0d08657afb326dcce8f5403c8bceed92dfd
|
#!/usr/bin/env python
from __future__ import absolute_import, print_function
import argparse
import lsst.daf.persistence as daf_persistence
import lsst.afw.display as afw_display
def list_diffims(butler):
"""
Use the butler to figure out what data are available
Parameters
----------
butler : daf_persistence.Butler, used to interogate the data repository
"""
id_list = []
subset = butler.subset('deepDiff_differenceExp')
for dataId in subset.cache:
if butler.datasetExists('deepDiff_differenceExp', dataId=dataId):
id_list.append(dataId)
for i, dataId in enumerate(id_list):
print("%i: %s"%(i, " ".join(["%s=%s"%(key, value) for key, value in dataId.iteritems()])))
def show_diffim(butler, dataId):
"""
Construct a display of various difference imaging related views
Parameters
----------
butler : daf_persistence.Butler, used in interacting with the data repository
dataId : Dictionary, data identifiers to lookup specific data
"""
def display_image_and_srcs(display, image, title, *srcs):
"""
Display an image with a title and up to 4 source catalogs
Parameters
----------
display : afw_display.Display, used to display image and plot catalogs
image afw_image.Image-like, pixel data to send to the display backend
title str, title for the display frame
*srcs afw_tab;e.SourceCatalogs, points to plot
"""
if len(srcs) > 4:
print("WARNING: more than four source catalogs sent. Only plotting the first four.")
syms = ['o', '+', 'x', 't']
colors = ['green', 'red', 'blue', 'white']
with display.Buffering():
display.mtv(image, title=title)
for src, plot_sym, color in zip(srcs, syms, colors):
for s in src:
display.dot(plot_sym, s.getX(), s.getY(), size=5, ctype=color)
display0 = afw_display.getDisplay(frame=0, )
display0.setMaskTransparency(75)
display0.scale('linear', 'zscale')
display1 = afw_display.getDisplay(frame=1)
display1.setMaskTransparency(75)
display1.scale('linear', 'zscale')
display2 = afw_display.getDisplay(frame=2)
display2.setMaskTransparency(75)
display2.scale('linear', 'zscale')
exp = butler.get('calexp', dataId)
src = butler.get('src', dataId)
diasrc = butler.get('deepDiff_diaSrc', dataId)
diffexp = butler.get('deepDiff_differenceExp', dataId)
display_image_and_srcs(display0, exp, 'Direct', src, diasrc)
display_image_and_srcs(display1, diffexp, 'Diffim', src, diasrc)
im1 = exp.getMaskedImage().getImage()
im2 = diffexp.getMaskedImage().getImage()
im1 -= im2
display_image_and_srcs(display2, im1, 'Direct - Diffim', src, diasrc)
def make_dataId(id_list):
"""
Construct a proper dataId from a list of [key]=[value] strings
Parameters
----------
id_list : List of str, parsee into the dataId
Returns
-------
dictionary containting data identifiers
"""
dataId = {}
for el in id_list:
parts = el.split('=')
if not len(parts) == 2:
# id not in Key=Value syntax
raise RuntimeError("Not able to parse: --id: %s. Not in [key]=[value] syntax"%
" ".join([str(el) for el in id_list]))
# Try to cast to a number, fall back to string
try:
value = float(parts[1])
except Exception:
value = str(parts[1])
dataId[parts[0]] = value
return dataId
def run(repository, id_list=None, **kwargs):
"""
Figure out what to do with the inputs and call the appropriate methods
Parameters
----------
repository : str, path of the data repository
id_list(optional) : list of str, data identifier parts in [key]=[value] strings.
**kwargs : Ignored.
"""
try:
butler = daf_persistence.Butler(repository)
except RuntimeError:
raise RuntimeError('Could not open repository. Check the path: %s'%repository)
if not id_list:
list_diffims(butler)
else:
dataId = make_dataId(id_list)
try:
if butler.datasetExists('deepDiff_differenceExp', dataId=dataId):
show_diffim(butler, dataId)
else:
raise RuntimeError('Could not load data for id: %s. Dataset does not exist'%id_list)
except Exception:
raise
'''
raise RuntimeError("Unable to load data. To list possible data ids run"+
" with repository as only argument")
'''
if __name__ == '__main__':
description = """******\nVisualize difference images given a repository.
In order to use, make sure a display backend (display_ds9), afw,
and the correc obs (obs_lsstSim) package setup.
If using DS9, make sure it is in your $PATH.
--------------
Frame 0: Direct
Frame 1: Difference
Frame 2: Difference + Direct
In all frames DIA sources are plotted with the red plus, and direct sources in green circles
******
"""
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('repository', type=str,
help="Data repository to open.\n**List data if no other arguments are supplied.**")
parser.add_argument('--id', dest='id_list', type=str, nargs='+',
help='Id for data to fetch: i.e. --id visit=335 sensor=1,1')
args = parser.parse_args()
args = args.__dict__
run(**args)
|
DarkEnergyScienceCollaboration/Twinkles
|
bin/view_diffs.py
|
Python
|
mit
| 5,616
|
[
"VisIt"
] |
334c30d856d5a0ba236d6ccacdf064c15644ba68f87cf76cf529363033682bc7
|
import sys
sys.path.insert(1, "../../")
import h2o, tests
def offset_1388(ip, port):
print "Loading datasets..."
pros_hex = h2o.import_file(h2o.locate("smalldata/prostate/prostate.csv"))
pros_hex[1] = pros_hex[1].asfactor()
pros_hex[3] = pros_hex[3].asfactor()
pros_hex[4] = pros_hex[4].asfactor()
pros_hex[5] = pros_hex[5].asfactor()
pros_hex[8] = pros_hex[8].asfactor()
cars_hex = h2o.import_file(h2o.locate("smalldata/junit/cars.csv"))
cars_hex[0] = cars_hex[0].asfactor()
cars_hex[2] = cars_hex[2].asfactor()
print "Running Binomial Comparison..."
glm_bin_h2o = h2o.glm(x=pros_hex[2:9], y=pros_hex[1], training_frame=pros_hex, family="binomial", standardize=False,
offset_column="AGE", Lambda=[0], max_iterations=100)
print "binomial"
print "R:"
print "deviance: {0}".format(1464.9565781185)
print "null deviance: {0}".format(2014.93087862689)
print "aic: {0}".format(1494.9565781185)
print "H2O:"
print "deviance {0}".format(glm_bin_h2o.residual_deviance())
print "null deviance {0}".format(glm_bin_h2o.null_deviance())
print "aic {0}".format(glm_bin_h2o.aic())
assert abs(1464.9565781185 - glm_bin_h2o.residual_deviance()) < 0.1
assert abs(2014.93087862689 - glm_bin_h2o.null_deviance()) < 0.1
assert abs(1494.9565781185 - glm_bin_h2o.aic()) < 0.1
print "Running Regression Comparisons..."
glm_h2o = h2o.glm(x=cars_hex[2:8], y=cars_hex[1], training_frame=cars_hex, family="gaussian", standardize=False,
offset_column="year", Lambda = [0], max_iterations = 100)
print "gaussian"
print "R:"
print "deviance: {0}".format(4204.68399275449)
print "null deviance: {0}".format(16072.0955102041)
print "aic: {0}".format(2062.54330117177)
print "H2O:"
print "deviance {0}".format(glm_h2o.residual_deviance())
print "null deviance {0}".format(glm_h2o.null_deviance())
print "aic {0}".format(glm_h2o.aic())
assert abs(4204.68399275449 - glm_h2o.residual_deviance()) < 0.1
assert abs(16072.0955102041 - glm_h2o.null_deviance()) < 0.1
assert abs(2062.54330117177 - glm_h2o.aic()) < 0.1
glm_h2o = h2o.glm(x=cars_hex[2:8], y=cars_hex[1], training_frame=cars_hex, family="poisson", standardize=False,
offset_column="year", Lambda = [0], max_iterations = 100)
print "poisson"
print "R:"
print "deviance: {0}".format(54039.1725227918)
print "null deviance: {0}".format(59381.5624028358)
print "aic: {0}".format("Inf")
print "H2O:"
print "deviance {0}".format(glm_h2o.residual_deviance())
print "null deviance {0}".format(glm_h2o.null_deviance())
print "aic {0}".format(glm_h2o.aic())
assert abs(54039.1725227918 - glm_h2o.residual_deviance()) < 0.1
assert abs(59381.5624028358 - glm_h2o.null_deviance()) < 0.1
assert abs(float('inf') - glm_h2o.aic()) < 0.1
if __name__ == "__main__":
tests.run_test(sys.argv, offset_1388)
|
bospetersen/h2o-3
|
h2o-py/tests/testdir_jira/pyunit_NOPASS_pubdev_1388_offset_comparisons.py
|
Python
|
apache-2.0
| 3,007
|
[
"Gaussian"
] |
eaaefdfe33e8cd6cd8894c4f2ffc9383b85ccd7681517fc7a34305a70633f9b7
|
# Copyright (C) 2019-TODAY - Raphaël Valyi Akretion
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl-3.0.en.html).
import inspect
import logging
import sys
from odoo import SUPERUSER_ID, api, models
from .models.spec_models import SpecModel, StackedModel
_logger = logging.getLogger(__name__)
def post_init_hook(cr, registry, module_name, spec_module):
"""
Automatically generate access rules for spec models
"""
env = api.Environment(cr, SUPERUSER_ID, {})
remaining_models = get_remaining_spec_models(cr, registry, module_name, spec_module)
fields = [
"id",
"name",
"model_id/id",
"group_id/id",
"perm_read",
"perm_write",
"perm_create",
"perm_unlink",
]
access_data = []
for model in remaining_models:
underline_name = model.replace(".", "_")
model_id = "%s_spec.model_%s" % (
module_name,
underline_name,
)
access_data.append(
[
"access_%s_user" % (underline_name,),
underline_name,
model_id,
"%s.group_user" % (module_name,),
"1",
"0",
"0",
"0",
]
)
access_data.append(
[
"access_%s_manager" % (underline_name,),
underline_name,
model_id,
"%s.group_manager" % (module_name,),
"1",
"1",
"1",
"1",
]
)
env["ir.model.access"].load(fields, access_data)
def get_remaining_spec_models(cr, registry, module_name, spec_module):
"""
Figure out the list of spec models not injected into existing
Odoo models.
"""
cr.execute(
"""select ir_model.model from ir_model_data
join ir_model on res_id=ir_model.id
where ir_model_data.model='ir.model'
and module=%s;""",
(module_name,),
)
module_models = [
i[0]
for i in cr.fetchall()
if registry.get(i[0]) and not registry[i[0]]._abstract
]
injected_models = set()
for model in module_models:
base_class = registry[model]
# 1st classic Odoo classes
if hasattr(base_class, "_inherit"):
injected_models.add(base_class._name)
if isinstance(base_class._inherit, list):
injected_models = injected_models.union(set(base_class._inherit))
elif base_class._inherit is not None:
injected_models.add(base_class._inherit)
# visit_stack will now need the associated spec classes
injected_classes = set()
remaining_models = set()
# TODO when using a registry loading, use _stack_skip to find
# out which models to leave concrete, see later commented loop
for m in injected_models:
c = SpecModel._odoo_name_to_class(m, spec_module)
if c is not None:
injected_classes.add(c)
for model in module_models:
base_class = registry[model]
# 2nd StackedModel classes, that we will visit
if hasattr(base_class, "_stacked"):
node = SpecModel._odoo_name_to_class(base_class._stacked, spec_module)
base_class._visit_stack(
node, injected_classes, base_class._stacked.split(".")[-1], registry, cr
)
all_spec_models = {
c._name
for name, c in inspect.getmembers(sys.modules[spec_module], inspect.isclass)
}
remaining_models = remaining_models.union(
{i for i in all_spec_models if i not in [c._name for c in injected_classes]}
)
return remaining_models
def register_hook(env, module_name, spec_module):
"""
Called by Model#_register_hook once all modules are loaded.
Here we take all spec models that not injected in existing concrete
Odoo models and we make them concrete automatically with
their _auto_init method that will create their SQL DDL structure.
"""
remaining_models = get_remaining_spec_models(
env.cr, env.registry, module_name, spec_module
)
for name in remaining_models:
spec_class = StackedModel._odoo_name_to_class(name, spec_module)
spec_class._module = "fiscal" # TODO use python_module ?
fields = env[spec_class._name].fields_get_keys()
rec_name = next(
filter(
lambda x: (
x.startswith(env[spec_class._name]._field_prefix)
and "_choice" not in x
),
fields,
)
)
c = type(
name,
(SpecModel, spec_class),
{
"_name": spec_class._name,
"_inherit": [spec_class._inherit, "spec.mixin"],
"_original_module": "fiscal",
"_odoo_module": module_name,
"_spec_module": spec_module,
"_rec_name": rec_name,
},
)
models.MetaModel.module_to_models[module_name] += [c]
# now we init these models properly
# a bit like odoo.modules.loading#load_module_graph would do.
c._build_model(env.registry, env.cr)
env[name]._prepare_setup()
env[name]._setup_base()
env[name]._setup_fields()
env[name]._setup_complete()
hook_key = "_%s_need_hook" % (module_name,)
if hasattr(env.registry, hook_key) and getattr(env.registry, hook_key):
env.registry.init_models(env.cr, remaining_models, {"module": module_name})
setattr(env.registry, hook_key, False)
|
OCA/l10n-brazil
|
spec_driven_model/hooks.py
|
Python
|
agpl-3.0
| 5,709
|
[
"VisIt"
] |
77b2538fb1699fa4ccb143226cfeb352499240ef678b1dffd7a57a8a3e8ee2bf
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import locale
from collections import OrderedDict
from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.utils.functional import cached_property
from pootle.core.cache import make_method_key
from pootle.core.delegate import data_tool
from pootle.core.mixins import TreeItem
from pootle.core.url_helpers import get_editor_filter
from pootle.i18n.gettext import language_dir, tr_lang, ugettext_lazy as _
from staticpages.models import StaticPage
class LiveLanguageManager(models.Manager):
"""Manager that only considers `live` languages.
A live language is any language containing at least a project with
translatable files.
"""
def get_queryset(self):
"""Returns a queryset for all live languages for enabled projects."""
return super(LiveLanguageManager, self).get_queryset().filter(
translationproject__isnull=False,
translationproject__directory__obsolete=False,
translationproject__project__disabled=False,
).distinct()
def get_all_queryset(self):
"""Returns a queryset for all live languages for all projects."""
return super(LiveLanguageManager, self).get_queryset().filter(
translationproject__isnull=False,
translationproject__directory__obsolete=False,
).distinct()
def cached_dict(self, locale_code='en-us', show_all=False):
"""Retrieves a sorted list of live language codes and names.
By default only returns live languages for enabled projects, but it can
also return live languages for disabled projects if specified.
:param locale_code: the UI locale for which language full names need to
be localized.
:param show_all: tells whether to return all live languages (both for
disabled and enabled projects) or only live languages for enabled
projects.
:return: an `OrderedDict`
"""
key_prefix = 'all_cached_dict' if show_all else 'cached_dict'
key = make_method_key(self, key_prefix, locale_code)
languages = cache.get(key, None)
if languages is None:
qs = self.get_all_queryset() if show_all else self.get_queryset()
languages = OrderedDict(
sorted([(lang[0], tr_lang(lang[1]))
for lang in qs.values_list('code', 'fullname')],
cmp=locale.strcoll,
key=lambda x: x[1])
)
cache.set(key, languages, settings.POOTLE_CACHE_TIMEOUT)
return languages
class Language(models.Model, TreeItem):
# any changes to the `code` field may require updating the schema
# see migration 0002_case_insensitive_schema.py
code = models.CharField(
max_length=50, null=False, unique=True, db_index=True,
verbose_name=_("Code"),
help_text=_('ISO 639 language code for the language, possibly '
'followed by an underscore (_) and an ISO 3166 country '
'code. <a href="http://www.w3.org/International/'
'articles/language-tags/">More information</a>')
)
fullname = models.CharField(max_length=255, null=False,
verbose_name=_("Full Name"))
specialchars = models.CharField(
max_length=255, blank=True, verbose_name=_("Special Characters"),
help_text=_('Enter any special characters that users might find '
'difficult to type')
)
plurals_help_text = _('For more information, visit '
'<a href="http://docs.translatehouse.org/projects/'
'localization-guide/en/latest/l10n/'
'pluralforms.html">'
'our page</a> on plural forms.')
nplural_choices = (
(0, _('Unknown')), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)
)
nplurals = models.SmallIntegerField(
default=0, choices=nplural_choices,
verbose_name=_("Number of Plurals"), help_text=plurals_help_text
)
pluralequation = models.CharField(
max_length=255, blank=True, verbose_name=_("Plural Equation"),
help_text=plurals_help_text)
directory = models.OneToOneField('pootle_app.Directory', db_index=True,
editable=False)
objects = models.Manager()
live = LiveLanguageManager()
class Meta(object):
ordering = ['code']
db_table = 'pootle_app_language'
# # # # # # # # # # # # # # Properties # # # # # # # # # # # # # # # # # #
@cached_property
def data_tool(self):
return data_tool.get(self.__class__)(self)
@property
def pootle_path(self):
return '/%s/' % self.code
@property
def name(self):
"""Localized fullname for the language."""
return tr_lang(self.fullname)
@property
def direction(self):
"""Return the language direction."""
return language_dir(self.code)
# # # # # # # # # # # # # # Methods # # # # # # # # # # # # # # # # # # #
@classmethod
def get_canonical(cls, language_code):
"""Returns the canonical `Language` object matching `language_code`.
If no language can be matched, `None` will be returned.
:param language_code: the code of the language to retrieve.
"""
try:
return cls.objects.get(code__iexact=language_code)
except cls.DoesNotExist:
_lang_code = language_code
if "-" in language_code:
_lang_code = language_code.replace("-", "_")
elif "_" in language_code:
_lang_code = language_code.replace("_", "-")
try:
return cls.objects.get(code__iexact=_lang_code)
except cls.DoesNotExist:
return None
def __unicode__(self):
return u"%s - %s" % (self.name, self.code)
def __init__(self, *args, **kwargs):
super(Language, self).__init__(*args, **kwargs)
def __repr__(self):
return u'<%s: %s>' % (self.__class__.__name__, self.fullname)
def save(self, *args, **kwargs):
# create corresponding directory object
from pootle_app.models.directory import Directory
self.directory = Directory.objects.root.get_or_make_subdir(self.code)
super(Language, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
directory = self.directory
super(Language, self).delete(*args, **kwargs)
directory.delete()
def get_absolute_url(self):
return reverse('pootle-language-browse', args=[self.code])
def get_translate_url(self, **kwargs):
return u''.join([
reverse('pootle-language-translate', args=[self.code]),
get_editor_filter(**kwargs),
])
def clean(self):
super(Language, self).clean()
if self.fullname:
self.fullname = self.fullname.strip()
# # # TreeItem
def get_children(self):
return self.translationproject_set.live()
# # # /TreeItem
def get_children_for_user(self, user, select_related=None):
return self.translationproject_set.for_user(
user, select_related=select_related
).select_related(
"project"
).order_by('project__fullname')
def get_announcement(self, user=None):
"""Return the related announcement, if any."""
return StaticPage.get_announcement_for(self.pootle_path, user)
@receiver([post_delete, post_save])
def invalidate_language_list_cache(**kwargs):
instance = kwargs["instance"]
# XXX: maybe use custom signals or simple function calls?
if instance.__class__.__name__ not in ['Language', 'TranslationProject']:
return
key = make_method_key('LiveLanguageManager', 'cached_dict', '*')
cache.delete_pattern(key)
|
r-o-b-b-i-e/pootle
|
pootle/apps/pootle_language/models.py
|
Python
|
gpl-3.0
| 8,392
|
[
"VisIt"
] |
6db9a278a29f35f8fd04f00fa3d40a7e3f5ad4c4a124d639a862e7bcd7d093aa
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic.base import RedirectView
urlpatterns = [
url(r'^$', RedirectView.as_view(url='/accounts/login/', permanent=True),
name="login"),
url(r'^users/inventorydemo/', RedirectView.as_view(url='/dashboard/',
permanent=True), name="redirect"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("inventory.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
# Your stuff: custom urls includes go here
url(r'^apiv1/', include('apiv1.urls', namespace='apiv1')),
url(r'^dashboard/', include('dashboard.urls', namespace='apiv1')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
|
abirafdirp/inventory
|
inventory/config/urls.py
|
Python
|
bsd-3-clause
| 1,565
|
[
"VisIt"
] |
abb15f3c64bb95ee9bb6f19cbfc0885560f03109be05ba4266249f436d3f4e9f
|
#1/usr/bin/env python
#-*- coding:utf-8 -*-
'''
Created at 2010.01.14
@version: 0.0.3
@change:\n
- 2010.01.14.
- Create this file.
- 2010.01.18\n
- Test the function B{Get_rotate_matrix()}
- Add the function B{Write_rotate_matrix()}
- Add the function B{Rotate_2_vector()}
- Finished the function B{Get_parallel_result()}, but not test it.
- 2010.01.19\n
- Modified some bugs.
- Calculate the angle between the two vectors.
- Close the file after writting.
- modified a bug on generate rotation matirx
- modified a bug on calculate the origin vector for 2 and 4 base in a group.
- 2011.02.01\n
- Modified this module, so both *.pdb and *.gro are allowd for coordinate file.
- 2011.02.18\n
- Modified a bug. change all the reside to residue.
- 2011.03.18.\n
- Rewrite some functions.
- 2011.03.20.\n
- write functions for calculate RMSD.
- 2011.04.03.\n
- modified the output, change from print to usage.echo
'''
import Simple_atom
import time as Time
import sys
import os
import usage
import MDAnalysis
from MDAnalysis import coordinates
from math import cos, sin, sqrt
import math
import numpy
from numpy import matrix
import DNA_matrix
def Get_rise_fromTRJ(traj_file, coor_file, base_list_1, base_list_2, output_name,skip=1,dt=1,begin=0,end=-1):
'''
Reading the traj file and the coordinate file like *.pdb or *.gro. With the base serial choosed, get the
rotation matrix for this base. and write it's to a output file with some syntax.
base_list_1 format list(list())
base_list_2 format list(list())
output_name format list()
'''
print " init......"
# if coor_file.endswith('.top'):
# Atom_list=amber_top
START_TIME=Time.time()
LIST_NUM=len(base_list_1)
Atom_list=Simple_atom.Get_atom_list(coor_file)
residue_list=Simple_atom.Get_Residue_list(Atom_list)
base_name_list_1=list()
base_name_list_2=list()
base_atom_list_1=list()
base_atom_list_2=list()
for i in range(LIST_NUM):
if os.path.isfile(output_name[i]):
print "backup %s to %s" %(output_name[i],"#"+output_name[i]+"#")
try:
os.rename(output_name[i],"#"+output_name[i]+"#")
except OSError,e:
print e
print "the file %s will be overwrited!" %output_name[i]
fp = open(output_name[i], 'w')
fp.write("#Group 1: ")
for j in base_list_1[i]:
fp.write("%d\t " %j)
fp.write("\n")
fp.write("#Group 2: ")
for j in base_list_2[i]:
fp.write("%d\t " %j)
fp.write("\n")
fp.write("#skip:%d\n" %skip)
fp.write("#time(ns) distance(A)\t angle(degree) RMSD1(A)\t RMSD2(A)\n")
fp.close()
# base_name_list_1.append( [residue_list[j-1] for j in base_list_1[i]])
# base_name_list_2.append( [residue_list[j-1] for j in base_list_2[i]])
temp_list=list()
for m in base_list_1[i]:
temp_list.append(residue_list[m-1])
base_name_list_1.append(temp_list)
temp_list=list()
for m in base_list_2[i]:
temp_list.append(residue_list[m-1])
base_name_list_2.append(temp_list)
base_atom_list_1.append([DNA_matrix.Get_baseID_list(Atom_list,j) for j in base_list_1[i]])
base_atom_list_2.append([DNA_matrix.Get_baseID_list(Atom_list,j) for j in base_list_2[i]])
try:
u=MDAnalysis.Universe(coor_file,traj_file).trajectory
except:
if traj_file.endswith("mdcrd"):
u=coordinates.TRJ.TRJReader(traj_file,len(Atom_list),delta=dt)
else:
print "ERROR: The trajectory file %s not support new." %traj_file
sys.exit()
if traj_file.endswith("mdcrd") or traj_file.endswith("dcd"):
pass
else:
try:
dt=u.dt
except:
dt=0.0
for ts in u:
time=float((ts.frame-1)*dt)
if dt > 0.0:
if time < float(begin):
continue
if time > float(end) and end !=-1:
break
if ts.frame % skip == 0 :
for i in range(LIST_NUM):
r1=[]
'''the group 1 rotate list'''
r2=[]
'''the group 2 rotate list'''
c1=[]
'''the group 1 coordinate list'''
c2=[]
'''the group 2 coordinate list'''
for m in range(len(base_name_list_1[i])):
temp_list = [ [ts._x[x-1], ts._y[x-1], ts._z[x-1]] for x in base_atom_list_1[i][m] ]
result = DNA_matrix.Get_rotate_matrix(numpy.array(temp_list), base_name_list_1[i][m][0])
#base_name_list_1[index of the groups][index of the base of group 1][base_name,base_serial]
c1.append(numpy.array(temp_list))
r1.append(result)
for m in range(len(base_name_list_2[i])):
temp_list = [ [ts._x[x-1], ts._y[x-1], ts._z[x-1]] for x in base_atom_list_2[i][m] ]
result = DNA_matrix.Get_rotate_matrix(numpy.array(temp_list), base_name_list_2[i][m][0])
c2.append(numpy.array(temp_list))
r2.append(result)
orient_group_1,origin_group_1 = DNA_matrix.Get_group_rotmat(r1,len(base_name_list_1[i]))
orient_group_2,origin_group_2 = DNA_matrix.Get_group_rotmat(r2,len(base_name_list_2[i]))
RMSD1=DNA_matrix.Get_group_RMSD(base_name_list_1[i],c1,origin_group_1,orient_group_1)
RMSD2=DNA_matrix.Get_group_RMSD(base_name_list_2[i],c2,origin_group_2,orient_group_2)
if numpy.dot(orient_group_1, orient_group_2)<0:
orient_group_2 = orient_group_2*(-1)
orient_total = DNA_matrix.Rotate_2_vector(orient_group_1, orient_group_2)
dist_vector = numpy.array(origin_group_2)-numpy.array(origin_group_1)
dist = abs(numpy.dot(orient_total, dist_vector))
gamma = numpy.dot(orient_group_1, orient_group_2)
gamma = math.acos(gamma)*180/3.1416
if ts.frame % 10 ==0 and i==0:
NOW_TIME=Time.time()
if time < 1000:
usage.echo(" analysis frame %6d, time %8.1f ps, time used %8.2f s\r" %(ts.frame, time,NOW_TIME-START_TIME))
elif time > 1000 and ts.frame %200 == 0:
usage.echo(" analysis frame %6d, time %8.2f ns, time used %8.2f s\r" %(ts.frame, time/1000,NOW_TIME-START_TIME))
fp = open(output_name[i], 'a')
fp.write( " %7.4f\t %6.3f\t %6.3f\t %6.4f\t %6.4f\n" %(time/1000, dist, gamma,RMSD1,RMSD2))
fp.close()
print "The parallel analysis finished"
print "The result are in file: %s" %output_name
def Get_rise_fromTOP( coor_file, base_list_1, base_list_2):
'''
Get the parallel parameters from coor_file.
'''
# if coor_file.endswith('.top'):
# Atom_list=amber_top
atom_list=Simple_atom.Get_atom_list(coor_file)
residue_list=Simple_atom.Get_Residue_list(atom_list)
# atom_list=Simple_atom.Get_atom_list(coor_file)
base_name_list_1=list()
base_name_list_2=list()
base_atom_list_1=list()
base_atom_list_2=list()
for m in base_list_1:
base_name_list_1.append(residue_list[m-1])
for m in base_list_2:
base_name_list_2.append(residue_list[m-1])
base_atom_list_1=[DNA_matrix.Get_baseID_list(atom_list,j) for j in base_list_1]
base_atom_list_2=[DNA_matrix.Get_baseID_list(atom_list,j) for j in base_list_2]
# print base_atom_list_1
# print base_atom_list_2
r1=[]
'''the group 1 rotate list'''
r2=[]
'''the group 2 rotate list'''
c1=[]
'''the group 1 coordinate list'''
c2=[]
'''the group 2 coordinate list'''
for m in range(len(base_name_list_1)):
temp_list = [ [atom_list[x-1].atom_coor_x*10, atom_list[x-1].atom_coor_y*10,atom_list[x-1].atom_coor_z*10] \
for x in base_atom_list_1[m] ]
# print temp_list
result = DNA_matrix.Get_rotate_matrix(numpy.array(temp_list), base_name_list_1[m][0])
c1.append(numpy.array(temp_list))
r1.append(result)
for m in range(len(base_name_list_2)):
temp_list = [ [atom_list[x-1].atom_coor_x*10, atom_list[x-1].atom_coor_y*10,atom_list[x-1].atom_coor_z*10] \
for x in base_atom_list_2[m] ]
# print temp_list
result = DNA_matrix.Get_rotate_matrix(numpy.array(temp_list), base_name_list_2[m][0])
c2.append(numpy.array(temp_list))
r2.append(result)
orient_group_1,origin_group_1 = DNA_matrix.Get_group_rotmat(r1,len(base_name_list_1))
orient_group_2,origin_group_2 = DNA_matrix.Get_group_rotmat(r2,len(base_name_list_2))
RMSD1=DNA_matrix.Get_group_RMSD(base_name_list_1,c1,origin_group_1,orient_group_1)
RMSD2=DNA_matrix.Get_group_RMSD(base_name_list_2,c2,origin_group_2,orient_group_2)
if numpy.dot(orient_group_1, orient_group_2)<0:
orient_group_2 = orient_group_2*(-1)
orient_total = DNA_matrix.Rotate_2_vector(orient_group_1, orient_group_2)
dist_vector = numpy.array(origin_group_2)-numpy.array(origin_group_1)
dist = abs(numpy.dot(orient_total, dist_vector))
gamma = numpy.dot(orient_group_1, orient_group_2)
gamma = math.acos(gamma)*180/3.1416
print "rise: %6.3f\tangle: %6.3f\tRMSD_1: %6.4f\tRMSD_2: %6.4f\n" %(dist, gamma,RMSD1,RMSD2)
def Get_RMSD_fromTRJ(traj_file, coor_file, base_list, output_name,skip=1, dt=1,begin=0,end=-1):
'''
Reading the traj file and the coordinate file like *.pdb or *.gro. With the base serial choosed, get the
rotation matrix for this base. and write it's to a output file with some syntax.
'''
print " init......"
START_TIME=Time.time()
LIST_NUM=len(base_list)
Atom_list=Simple_atom.Get_atom_list(coor_file)
residue_list=Simple_atom.Get_Residue_list(Atom_list)
base_name_list=list()
base_atom_list=list()
for i in range(LIST_NUM):
if os.path.isfile(output_name[i]):
print "backup %s to %s" %(output_name[i],"#"+output_name[i]+"#")
try:
os.rename(output_name[i],"#"+output_name[i]+"#")
except OSError,e:
print e
print "the file %s will be overwrited!" %output_name[i]
fp = open(output_name[i], 'w')
fp.write("#Group list: ")
for j in base_list[i]:
fp.write("%d\t " %j)
fp.write("\n")
fp.write("#skip:%d\n" %skip)
fp.write("#time(ns) RMSD(A)\n")
# base_name_list.append( [residue_list[j-1] for j in base_list[i]])
temp_list=list()
for m in base_list[i]:
for n in residue_list:
if n[1]==m:
temp_list.append(n)
break
else:
pass
base_name_list.append(temp_list)
# print base_name_list
base_atom_list.append( [DNA_matrix.Get_baseID_list(Atom_list,j) for j in base_list[i]])
#u=MDAnalysis.Universe(coor_file,traj_file)
try:
u=MDAnalysis.Universe(coor_file,traj_file).trajectory
except:
if traj_file.endswith("mdcrd"):
u=coordinates.TRJ.TRJReader(traj_file,len(Atom_list),delta=dt)
else:
print "ERROR: The trajectory file %s not support new." %traj_file
sys.exit()
if traj_file.endswith("mdcrd") or traj_file.endswith("dcd"):
pass
else:
dt=u.dt
for ts in u:
time=(ts.frame-1)*dt
# print begin,end
if time < float(begin):
# print time
continue
if end > 0 and time > float(end):
break
if ts.frame % skip == 0 :
for i in range(LIST_NUM):
r1=[]
'''the group 1 rotate list'''
c1=[]
'''the group 1 coordinate list'''
for m in range(len(base_name_list[i])):
temp_list = [ [ts._x[x-1], ts._y[x-1], ts._z[x-1]] for x in base_atom_list[i][m] ]
result = DNA_matrix.Get_rotate_matrix(numpy.array(temp_list), base_name_list[i][m][0])
c1.append(numpy.array(temp_list))
r1.append(result)
orient_group,origin_group = DNA_matrix.Get_group_rotmat(r1,len(base_name_list[i]))
RMSD=DNA_matrix.Get_group_RMSD(base_name_list[i],c1,origin_group,orient_group)
time=(ts.frame-1)*dt
if ts.frame % 100 ==0:
NOW_TIME=Time.time()
usage.echo(" analysis frame %6d, time %8.1f ps, time used %8.2f s\r" %(ts.frame, time,NOW_TIME-START_TIME))
fp=open(output_name[i], 'a')
fp.write( " %7.4f\t %6.4f\n" %(time/1000.,RMSD))
fp.close()
print "Finished calculating the RMSD."
print "The result are in file: %s" %output_name
def Get_RMSD_fromTOP(coor_file, base_list):
'''
Calculate the RMSD from coor_file.
'''
LIST_NUM=len(base_list)
atom_list=Simple_atom.Get_atom_list(coor_file)
residue_list=Simple_atom.Get_Segment_list(atom_list)
base_name_list=list()
base_atom_list=list()
for i in range(LIST_NUM):
base_name_list.append( [residue_list[j-1] for j in base_list[i]])
base_atom_list.append( [DNA_matrix.Get_baseID_list(atom_list,j) for j in base_list[i]])
for i in range(LIST_NUM):
r1=[]
'''the group 1 rotate list'''
c1=[]
'''the group 1 coordinate list'''
for m in range(len(base_name_list[i])):
temp_list = [ [atom_list[x-1].atom_coor_x, atom_list[x-1].atom_coor_y, atom_list[x-1].atom_coor_z] \
for x in base_atom_list[i][m] ]
result = DNA_matrix.Get_rotate_matrix(numpy.array(temp_list), base_name_list[i][m][0])
c1.append(numpy.array(temp_list))
r1.append(result)
orient_group,origin_group = DNA_matrix.Get_group_rotmat(r1,len(base_name_list[i]))
RMSD=DNA_matrix.Get_group_RMSD(base_name_list[i],c1,origin_group,orient_group)
print "RMSD=%f" %RMSD
return
|
zhuhong/g4analysis
|
G4Analysis/G4_rise.py
|
Python
|
gpl-2.0
| 14,537
|
[
"MDAnalysis"
] |
1e3f044fdaaeb728a2420c0912ece79c7b2dc3f0074d058fb7b4aa24b570d181
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from Job import Job
import os, sys, subprocess, shutil, re
class PBSJob(Job):
def validParams():
params = Job.validParams()
params.addRequiredParam('chunks', "The number of PBS chunks.")
# Only one of either of the next two paramteres can be specified
params.addParam('mpi_procs', "The number of MPI processes per chunk.")
params.addParam('total_mpi_procs', "The total number of MPI processes to use divided evenly among chunks.")
params.addParam('place', 'scatter:excl', "The PBS job placement scheme to use.")
params.addParam('walltime', '4:00:00', "The requested walltime for this job.")
params.addParam('no_copy', "A list of files specifically not to copy")
params.addParam('no_copy_pattern', "A pattern of files not to copy")
params.addParam('copy_files', "A list of files specifically to copy")
params.addStringSubParam('pbs_o_workdir', 'PBS_O_WORKDIR', "Move to this directory")
params.addStringSubParam('pbs_project', '#PBS -P PBS_PROJECT', "Identify as PBS_PROJECT in the PBS queuing system")
params.addStringSubParam('pbs_stdout', '#PBS -o PBS_STDOUT', "Save stdout to this location")
params.addStringSubParam('pbs_stderr', '#PBS -e PBS_STDERR', "Save stderr to this location")
params.addStringSubParam('combine_streams', '#PBS -j oe', "Combine stdout and stderror into one file (needed for NO EXPECTED ERR)")
params.addStringSubParam('threads', '--n-threads=THREADS', "The number of threads to run per MPI process.")
params.addStringSubParam('queue', '#PBS -q QUEUE', "Which queue to submit this job to.")
params.addStringSubParam('cli_args', 'CLI_ARGS', "Any extra command line arguments to tack on.")
params.addStringSubParam('notifications', '#PBS -m NOTIFICATIONS', "The PBS notifications to enable: 'b' for begin, 'e' for end, 'a' for abort.")
params.addStringSubParam('notify_address', '#PBS -M NOTIFY_ADDRESS', "The email address to use for PBS notifications")
# Soft linked output during run
params.addParam('soft_link_output', False, "Create links to your STDOUT and STDERR files in your working directory during the run.")
params.addRequiredParam('moose_application', "The full path to the application to run.")
params.addRequiredParam('input_file', "The input file name.")
return params
validParams = staticmethod(validParams)
def __init__(self, name, params):
Job.__init__(self, name, params)
# Called from the current directory to copy files (usually from the parent)
def copyFiles(self, job_file):
params = self.specs
# Save current location as PBS_O_WORKDIR
params['pbs_o_workdir'] = os.getcwd()
# Create regexp object of no_copy_pattern
if params.isValid('no_copy_pattern'):
# Match no_copy_pattern value
pattern = re.compile(params['no_copy_pattern'])
else:
# Match nothing if not set. Better way?
pattern = re.compile(r'')
# Copy files (unless they are listed in "no_copy")
for file in os.listdir('../'):
if os.path.isfile('../' + file) and file != job_file and \
(not params.isValid('no_copy') or file not in params['no_copy']) and \
(not params.isValid('no_copy_pattern') or pattern.match(file) is None):
shutil.copy('../' + file, '.')
# Copy directories
if params.isValid('copy_files'):
for file in params['copy_files'].split():
print file
if os.path.isfile('../' + file):
shutil.copy('../' + file, '.')
elif os.path.isdir('../' + file):
shutil.copytree('../' + file, file)
def prepareJobScript(self):
f = open(self.specs['template_script'], 'r')
content = f.read()
f.close()
params = self.specs
# Error check
if params.isValid('mpi_procs') and params.isValid('total_mpi_procs'):
print "ERROR: 'mpi_procs' and 'total_mpi_procs' are exclusive. Only specify one!"
sys.exit(1)
# Do a few PBS job size calculations
if params.isValid('mpi_procs'):
params['mpi_procs_per_chunk'] = params['mpi_procs']
elif params.isValid('total_mpi_procs'):
params['mpi_procs_per_chunk'] = str(int(params['total_mpi_procs']) / int(params['chunks'])) # Need some more error checking here
else:
print "ERROR: You must specify either 'mpi_procs' or 'total_mpi_procs'"
sys.exit(1)
if params.isValid('threads'):
threads = int(params['threads'])
else:
threads = 1
params['ncpus_per_chunk'] = str(int(params['mpi_procs_per_chunk']) * threads)
# Soft Link output requires several substitutions in the template file
soft_link1 = ''
soft_link2 = ''
soft_link3 = ''
if params['soft_link_output'] == 'True':
soft_link1 = '#PBS -koe'
soft_link2 = 'ln -s $HOME/$PBS_JOBNAME.o$JOB_NUM $PBS_JOBNAME.o$JOB_NUM\nln -s $HOME/$PBS_JOBNAME.e$JOB_NUM $PBS_JOBNAME.e$JOB_NUM'
soft_link3 = 'rm $PBS_JOBNAME.o$JOB_NUM\nmv $HOME/$PBS_JOBNAME.o$JOB_NUM $PBS_JOBNAME.o$JOB_NUM\nmv $HOME/$PBS_JOBNAME.e$JOB_NUM $PBS_JOBNAME.e$JOB_NUM'
# Add substitutions on the fly
params.addStringSubParam('soft_link1', 'SOFT_LINK1', soft_link1, 'private')
params.addStringSubParam('soft_link2', 'SOFT_LINK2', soft_link2, 'private')
params.addStringSubParam('soft_link3', 'SOFT_LINK3', soft_link3, 'private')
f = open(os.path.split(params['template_script'])[1], 'w')
# Do all of the replacements for the valid parameters
for param in params.valid_keys():
if param in params.substitute:
params[param] = params.substitute[param].replace(param.upper(), params[param])
content = content.replace('<' + param.upper() + '>', str(params[param]))
# Make sure we strip out any string substitution parameters that were not supplied
for param in params.substitute_keys():
if not params.isValid(param):
content = content.replace('<' + param.upper() + '>', '')
f.write(content)
f.close()
def launch(self):
# Finally launch the job
my_process = subprocess.Popen('qsub ' + os.path.split(self.specs['template_script'])[1], stdout=subprocess.PIPE, shell=True)
print 'JOB_NAME:', self.specs['job_name'], 'JOB_ID:', my_process.communicate()[0].split('.')[0], 'TEST_NAME:', self.specs['test_name']
|
nuclear-wizard/moose
|
python/ClusterLauncher/PBSJob.py
|
Python
|
lgpl-2.1
| 7,056
|
[
"MOOSE"
] |
61c3df806692ea73f29fdcd121cd30b9c6ad21193e443afe30acf22858a2bbfb
|
# Maked by Mr. Have fun! Version 0.2
# Shadow Weapon Coupons contributed by BiTi for the Official L2J Datapack Project
# Visit http://forum.l2jdp.com for more details
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "228_TestOfMagus"
MARK_OF_MAGUS = 2840
RUKALS_LETTER = 2841
PARINAS_LETTER = 2842
LILAC_CHARM = 2843
GOLDEN_SEED1 = 2844
GOLDEN_SEED2 = 2845
GOLDEN_SEED3 = 2846
SCORE_OF_ELEMENTS = 2847
TONE_OF_WATER = 2856
TONE_OF_FIRE = 2857
TONE_OF_WIND = 2858
TONE_OF_EARTH = 2859
UNDINE_CHARM = 2862
DAZZLING_DROP = 2848
SALAMANDER_CHARM = 2860
FLAME_CRYSTAL = 2849
SYLPH_CHARM = 2861
HARPYS_FEATHER = 2850
WYRMS_WINGBONE = 2851
WINDSUS_MANE = 2852
SERPENT_CHARM = 2863
EN_MONSTEREYE_SHELL = 2853
EN_STONEGOLEM_POWDER = 2854
EN_IRONGOLEM_SCRAP = 2855
SHADOW_WEAPON_COUPON_CGRADE = 8870
#This handels all drops from mobs. npcId:[condition,maxcount,chance,item,part]
DROPLIST={
27095:[3,1,100,GOLDEN_SEED1,1],
27096:[3,1,100,GOLDEN_SEED2,1],
27097:[3,1,100,GOLDEN_SEED3,1],
27098:[7,5,50,FLAME_CRYSTAL,2],
20230:[7,20,30,DAZZLING_DROP,2],
20231:[7,20,30,DAZZLING_DROP,2],
20157:[7,20,30,DAZZLING_DROP,2],
20232:[7,20,40,DAZZLING_DROP,2],
20234:[7,20,50,DAZZLING_DROP,2],
20145:[7,20,50,HARPYS_FEATHER,2],
20176:[7,10,50,WYRMS_WINGBONE,2],
20553:[7,10,50,WINDSUS_MANE,2],
20564:[7,10,100,EN_MONSTEREYE_SHELL,2],
20565:[7,10,100,EN_STONEGOLEM_POWDER,2],
20566:[7,10,100,EN_IRONGOLEM_SCRAP,2]
}
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
if event == "1" :
htmltext = "30629-04.htm"
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
st.giveItems(RUKALS_LETTER,1)
elif event == "30629_1" :
htmltext = "30629-09.htm"
elif event == "30629_2" :
htmltext = "30629-10.htm"
st.takeItems(LILAC_CHARM,1)
st.takeItems(GOLDEN_SEED1,1)
st.takeItems(GOLDEN_SEED2,1)
st.takeItems(GOLDEN_SEED3,1)
st.giveItems(SCORE_OF_ELEMENTS,1)
st.playSound("ItemSound.quest_middle")
st.set("cond","6")
elif event == "30391_1" :
htmltext = "30391-02.htm"
st.giveItems(PARINAS_LETTER,1)
st.takeItems(RUKALS_LETTER,1)
st.playSound("ItemSound.quest_middle")
st.set("cond","2")
elif event == "30612_1" :
htmltext = "30612-02.htm"
st.giveItems(LILAC_CHARM,1)
st.takeItems(PARINAS_LETTER,1)
st.playSound("ItemSound.quest_middle")
st.set("cond","3")
elif event == "30412_1" :
htmltext = "30412-02.htm"
st.giveItems(SYLPH_CHARM,1)
st.playSound("ItemSound.quest_middle")
st.set("cond","7")
elif event == "30409_1" :
htmltext = "30409-02.htm"
elif event == "30409_2" :
htmltext = "30409-03.htm"
st.giveItems(SERPENT_CHARM,1)
st.playSound("ItemSound.quest_middle")
st.set("cond","7")
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
if npcId != 30629 and id != STARTED : return htmltext
if id == CREATED :
st.setState(STARTING)
st.set("cond","0")
st.set("onlyone","0")
st.set("id","0")
if npcId == 30629 :
if st.getInt("cond")==0 and st.getInt("onlyone")==0 :
if st.getInt("cond") < 15 :
if player.getClassId().getId() in [ 0x0b, 0x1a, 0x27] :
if player.getLevel() < 39 :
htmltext = "30629-02.htm"
else:
htmltext = "30629-03.htm"
else:
htmltext = "30629-01.htm"
st.exitQuest(1)
else:
htmltext = "30629-01.htm"
st.exitQuest(1)
elif st.getInt("cond")==0 and st.getInt("onlyone")==1 :
htmltext = "<html><body>This quest has already been completed.</body></html>"
elif st.getInt("cond")==1:
htmltext = "30629-05.htm"
elif st.getInt("cond")==2:
htmltext = "30629-06.htm"
elif st.getInt("cond")==3:
htmltext = "30629-07.htm"
elif st.getInt("cond")==5:
htmltext = "30629-08.htm"
elif st.getInt("cond") and st.getQuestItemsCount(SCORE_OF_ELEMENTS)==1 :
if st.getQuestItemsCount(TONE_OF_WATER) and st.getQuestItemsCount(TONE_OF_FIRE) and st.getQuestItemsCount(TONE_OF_WIND) and st.getQuestItemsCount(TONE_OF_EARTH) :
st.takeItems(SCORE_OF_ELEMENTS,1)
st.takeItems(TONE_OF_WATER,1)
st.takeItems(TONE_OF_FIRE,1)
st.takeItems(TONE_OF_WIND,1)
st.takeItems(TONE_OF_EARTH,1)
st.giveItems(MARK_OF_MAGUS,1)
st.giveItems(SHADOW_WEAPON_COUPON_CGRADE,15)
st.addExpAndSp(139039,40000)
htmltext = "30629-12.htm"
st.set("cond","0")
st.set("onlyone","1")
st.setState(COMPLETED)
st.playSound("ItemSound.quest_finish")
else:
htmltext = "30629-11.htm"
elif npcId == 30391:
if st.getInt("cond")==1:
htmltext = "30391-01.htm"
elif st.getInt("cond")==2:
htmltext = "30391-03.htm"
elif st.getInt("cond")<6 and st.getInt("cond")>2:
htmltext = "30391-04.htm"
elif st.getInt("cond")>5 :
htmltext = "30391-05.htm"
elif npcId == 30612:
if st.getInt("cond")==2 :
htmltext = "30612-01.htm"
elif st.getInt("cond")<5 and st.getInt("cond")>2:
htmltext = "30612-03.htm"
elif st.getInt("cond")==5:
htmltext = "30612-04.htm"
elif st.getInt("cond")>5:
htmltext = "30612-05.htm"
elif npcId == 30413:
if st.getInt("cond") and st.getQuestItemsCount(SCORE_OF_ELEMENTS)==1 and st.getQuestItemsCount(TONE_OF_WATER)==0 and st.getQuestItemsCount(UNDINE_CHARM)==0 :
htmltext = "30413-01.htm"
st.giveItems(UNDINE_CHARM,1)
st.set("cond","7")
elif st.getInt("cond") and st.getQuestItemsCount(SCORE_OF_ELEMENTS)==1 and st.getQuestItemsCount(UNDINE_CHARM)==1 :
if st.getQuestItemsCount(DAZZLING_DROP) < 20 :
htmltext = "30413-02.htm"
else:
htmltext = "30413-03.htm"
st.takeItems(DAZZLING_DROP,st.getQuestItemsCount(DAZZLING_DROP))
st.takeItems(UNDINE_CHARM,1)
st.giveItems(TONE_OF_WATER,1)
st.playSound("ItemSound.quest_middle")
elif st.getInt("cond") and st.getQuestItemsCount(SCORE_OF_ELEMENTS)==1 and st.getQuestItemsCount(TONE_OF_WATER)==1 and st.getQuestItemsCount(UNDINE_CHARM)==0 :
htmltext = "30413-04.htm"
elif npcId == 30411 :
if st.getInt("cond") and st.getQuestItemsCount(SCORE_OF_ELEMENTS)==1 and st.getQuestItemsCount(TONE_OF_FIRE)==0 and st.getQuestItemsCount(SALAMANDER_CHARM)==0 :
htmltext = "30411-01.htm"
st.giveItems(SALAMANDER_CHARM,1)
st.playSound("ItemSound.quest_middle")
st.set("cond","7")
elif st.getInt("cond") and st.getQuestItemsCount(SCORE_OF_ELEMENTS)==1 and st.getQuestItemsCount(SALAMANDER_CHARM)==1 :
if st.getQuestItemsCount(FLAME_CRYSTAL) < 5 :
htmltext = "30411-02.htm"
else:
htmltext = "30411-03.htm"
st.takeItems(FLAME_CRYSTAL,st.getQuestItemsCount(FLAME_CRYSTAL))
st.giveItems(TONE_OF_FIRE,1)
st.takeItems(SALAMANDER_CHARM,1)
st.playSound("ItemSound.quest_middle")
elif st.getInt("cond") and st.getQuestItemsCount(SCORE_OF_ELEMENTS)==1 and st.getQuestItemsCount(TONE_OF_FIRE)==1 and st.getQuestItemsCount(SALAMANDER_CHARM)==0 :
htmltext = "30411-04.htm"
elif npcId == 30412 :
if st.getInt("cond") and st.getQuestItemsCount(SCORE_OF_ELEMENTS)==1 and st.getQuestItemsCount(TONE_OF_WIND)==0 and st.getQuestItemsCount(SYLPH_CHARM)==0 :
htmltext = "30412-01.htm"
elif st.getInt("cond") and st.getQuestItemsCount(SCORE_OF_ELEMENTS)==1 and st.getQuestItemsCount(SYLPH_CHARM)==1 :
if st.getQuestItemsCount(HARPYS_FEATHER)+st.getQuestItemsCount(WYRMS_WINGBONE)+st.getQuestItemsCount(WINDSUS_MANE) < 40 :
htmltext = "30412-03.htm"
else:
htmltext = "30412-04.htm"
st.takeItems(HARPYS_FEATHER,st.getQuestItemsCount(HARPYS_FEATHER))
st.takeItems(WYRMS_WINGBONE,st.getQuestItemsCount(WYRMS_WINGBONE))
st.takeItems(WINDSUS_MANE,st.getQuestItemsCount(WINDSUS_MANE))
st.giveItems(TONE_OF_WIND,1)
st.takeItems(SYLPH_CHARM,1)
st.playSound("ItemSound.quest_middle")
elif st.getInt("cond") and st.getQuestItemsCount(SCORE_OF_ELEMENTS)==1 and st.getQuestItemsCount(TONE_OF_WIND)==1 and st.getQuestItemsCount(SYLPH_CHARM)==0 :
htmltext = "30412-05.htm"
elif npcId == 30409 :
if st.getInt("cond") and st.getQuestItemsCount(SCORE_OF_ELEMENTS)==1 and st.getQuestItemsCount(TONE_OF_EARTH)==0 and st.getQuestItemsCount(SERPENT_CHARM)==0 :
htmltext = "30409-01.htm"
elif st.getInt("cond") and st.getQuestItemsCount(SCORE_OF_ELEMENTS)==1 and st.getQuestItemsCount(SERPENT_CHARM)==1 :
if st.getQuestItemsCount(EN_MONSTEREYE_SHELL)+st.getQuestItemsCount(EN_STONEGOLEM_POWDER)+st.getQuestItemsCount(EN_IRONGOLEM_SCRAP) < 30 :
htmltext = "30409-04.htm"
else:
htmltext = "30409-05.htm"
st.takeItems(EN_MONSTEREYE_SHELL,st.getQuestItemsCount(EN_MONSTEREYE_SHELL))
st.takeItems(EN_STONEGOLEM_POWDER,st.getQuestItemsCount(EN_STONEGOLEM_POWDER))
st.takeItems(EN_IRONGOLEM_SCRAP,st.getQuestItemsCount(EN_IRONGOLEM_SCRAP))
st.giveItems(TONE_OF_EARTH,1)
st.takeItems(SERPENT_CHARM,1)
st.playSound("ItemSound.quest_middle")
elif st.getInt("cond") and st.getQuestItemsCount(SCORE_OF_ELEMENTS)==1 and st.getQuestItemsCount(TONE_OF_EARTH)==1 and st.getQuestItemsCount(SERPENT_CHARM)==0 :
htmltext = "30409-06.htm"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
npcId = npc.getNpcId()
condition,maxcount,chance,item,part = DROPLIST[npcId]
random = st.getRandom(100)
itemcount = st.getQuestItemsCount(item)
if st.getInt("cond") == condition and itemcount < maxcount and random < chance :
if itemcount == maxcount-1:
st.giveItems(item,1)
st.playSound("ItemSound.quest_middle")
if part==1:
count=0
for items in [GOLDEN_SEED1,GOLDEN_SEED2,GOLDEN_SEED3]:
count+=st.getQuestItemsCount(items)
if count>2:
st.set("cond","5")
else:
st.giveItems(item,1)
st.playSound("ItemSound.quest_itemget")
return
QUEST = Quest(228,qn,"Test Of Magus")
CREATED = State('Start', QUEST)
STARTING = State('Starting', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(30629)
QUEST.addTalkId(30629)
for npcId in [30391,30409,30411,30412,30413,30612]:
QUEST.addTalkId(npcId)
for mobId in [20145,20157,20176,20230,20231,20232,20234,27095,27096,27097,27098,20553,20564,20565,20566]:
QUEST.addKillId(mobId)
for item in range(2841,2864):
STARTED.addQuestDrop(30629,item,1)
|
zenn1989/scoria-interlude
|
L2Jscoria-Game/data/scripts/quests/228_TestOfMagus/__init__.py
|
Python
|
gpl-3.0
| 11,600
|
[
"VisIt"
] |
8e2c6752296981414d60d024ee0b5a3dd4a66deb187058682f9e7afec46bb68e
|
# -*- coding: utf-8 -*-
r""" ------------> ------------> ------------> ------------>
______ __ _ ____
/ ____/__ / /__ ____ (_)_ ______ ___ / _ \____ ________
\__ \/ _ \/ / _ \/ __ \/ / / / / __ `__ \/ /_) / __ \/ ___/ _ \
___/ / __/ / __/ / / / / /_/ / / / / / / /_) / (_/ /__ / __/
/____/\___/_/\___/_/ /_/_/\__,_/_/ /_/ /_/_____/\__,_/____/\___/
------------> ------------> ------------> ------------>
The BaseCase class is the main gateway for using The SeleniumBase Framework.
It inherits Python's unittest.TestCase class, and runs with Pytest or Nose.
All tests using BaseCase automatically launch WebDriver browsers for tests.
Usage:
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_anything(self):
# Write your code here. Example:
self.open("https://github.com/")
self.type("input.header-search-input", "SeleniumBase\n")
self.click('a[href="/seleniumbase/SeleniumBase"]')
self.assert_element("div.repository-content")
....
SeleniumBase methods expand and improve on existing WebDriver commands.
Improvements include making WebDriver more robust, reliable, and flexible.
Page elements are given enough time to load before WebDriver acts on them.
Code becomes greatly simplified and easier to maintain.
"""
import codecs
import json
import logging
import os
import re
import shutil
import sys
import time
import unittest
import urllib3
from selenium.common.exceptions import (
ElementClickInterceptedException as ECI_Exception,
ElementNotInteractableException as ENI_Exception,
MoveTargetOutOfBoundsException,
NoSuchElementException,
NoSuchWindowException,
StaleElementReferenceException,
WebDriverException,
)
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.remote.remote_connection import LOGGER
from seleniumbase import config as sb_config
from seleniumbase.config import settings
from seleniumbase.core import download_helper
from seleniumbase.core import log_helper
from seleniumbase.fixtures import constants
from seleniumbase.fixtures import css_to_xpath
from seleniumbase.fixtures import js_utils
from seleniumbase.fixtures import page_actions
from seleniumbase.fixtures import page_utils
from seleniumbase.fixtures import shared_utils
from seleniumbase.fixtures import xpath_to_css
logging.getLogger("requests").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
urllib3.disable_warnings()
LOGGER.setLevel(logging.WARNING)
python3 = True
if sys.version_info[0] < 3:
python3 = False
reload(sys) # noqa: F821
sys.setdefaultencoding("utf8")
selenium4 = False
if sys.version_info >= (3, 7):
selenium4 = True
class BaseCase(unittest.TestCase):
""" <Class seleniumbase.BaseCase> """
def __init__(self, *args, **kwargs):
super(BaseCase, self).__init__(*args, **kwargs)
self.driver = None
self.environment = None
self.env = None # Add a shortened version of self.environment
self.__page_sources = []
self.__extra_actions = []
self.__js_start_time = 0
self.__set_c_from_switch = False
self.__called_setup = False
self.__called_teardown = False
self.__start_time_ms = None
self.__requests_timeout = None
self.__screenshot_count = 0
self.__will_be_skipped = False
self.__passed_then_skipped = False
self.__visual_baseline_copies = []
self.__last_url_of_deferred_assert = "data:,"
self.__last_page_load_url = "data:,"
self.__last_page_screenshot = None
self.__last_page_screenshot_png = None
self.__last_page_url = None
self.__last_page_source = None
self.__skip_reason = None
self.__origins_to_save = []
self.__actions_to_save = []
self.__dont_record_open = False
self.__dont_record_js_click = False
self.__new_window_on_rec_open = True
self.__overrided_default_timeouts = False
self.__added_pytest_html_extra = None
self.__deferred_assert_count = 0
self.__deferred_assert_failures = []
self.__device_width = None
self.__device_height = None
self.__device_pixel_ratio = None
self.__driver_browser_map = {}
self.__changed_jqc_theme = False
self.__jqc_default_theme = None
self.__jqc_default_color = None
self.__jqc_default_width = None
# Requires self._* instead of self.__* for external class use
self._language = "English"
self._presentation_slides = {}
self._presentation_transition = {}
self._rec_overrides_switch = True # Recorder-Mode uses set_c vs switch
self._sb_test_identifier = None
self._html_report_extra = [] # (Used by pytest_plugin.py)
self._default_driver = None
self._drivers_list = []
self._chart_data = {}
self._chart_count = 0
self._chart_label = {}
self._chart_xcount = 0
self._chart_first_series = {}
self._chart_series_count = {}
self._tour_steps = {}
def open(self, url):
""" Navigates the current browser window to the specified page. """
self.__check_scope()
self.__check_browser()
pre_action_url = None
try:
pre_action_url = self.driver.current_url
except Exception:
pass
url = str(url).strip() # Remove leading and trailing whitespace
if not self.__looks_like_a_page_url(url):
# url should start with one of the following:
# "http:", "https:", "://", "data:", "file:",
# "about:", "chrome:", "opera:", or "edge:".
msg = 'Did you forget to prefix your URL with "http:" or "https:"?'
raise Exception('Invalid URL: "%s"\n%s' % (url, msg))
self.__last_page_load_url = None
js_utils.clear_out_console_logs(self.driver)
if url.startswith("://"):
# Convert URLs such as "://google.com" into "https://google.com"
url = "https" + url
if self.recorder_mode and not self.__dont_record_open:
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["_url_", origin, url, time_stamp]
self.__extra_actions.append(action)
if self.recorder_mode and self.__new_window_on_rec_open:
c_url = self.driver.current_url
if ("http:") in c_url or ("https:") in c_url or ("file:") in c_url:
if self.get_domain_url(url) != self.get_domain_url(c_url):
self.open_new_window(switch_to=True)
try:
self.driver.get(url)
except Exception as e:
if "ERR_CONNECTION_TIMED_OUT" in e.msg:
self.sleep(0.5)
self.driver.get(url)
else:
raise Exception(e.msg)
if (
self.driver.current_url == pre_action_url
and pre_action_url != url
):
time.sleep(0.1) # Make sure load happens
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
self.__demo_mode_pause_if_active()
def get(self, url):
"""If "url" looks like a page URL, open the URL in the web browser.
Otherwise, return self.get_element(URL_AS_A_SELECTOR)
Examples:
self.get("https://seleniumbase.io") # Navigates to the URL
self.get("input.class") # Finds and returns the WebElement
"""
self.__check_scope()
if self.__looks_like_a_page_url(url):
self.open(url)
else:
return self.get_element(url) # url is treated like a selector
def click(
self, selector, by=By.CSS_SELECTOR, timeout=None, delay=0, scroll=True
):
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
original_selector = selector
original_by = by
selector, by = self.__recalculate_selector(selector, by)
if delay and (type(delay) in [int, float]) and delay > 0:
time.sleep(delay)
if page_utils.is_link_text_selector(selector) or by == By.LINK_TEXT:
if not self.is_link_text_visible(selector):
# Handle a special case of links hidden in dropdowns
self.click_link_text(selector, timeout=timeout)
return
if (
page_utils.is_partial_link_text_selector(selector)
or by == By.PARTIAL_LINK_TEXT
):
if not self.is_partial_link_text_visible(selector):
# Handle a special case of partial links hidden in dropdowns
self.click_partial_link_text(selector, timeout=timeout)
return
if self.__is_shadow_selector(selector):
self.__shadow_click(selector, timeout)
return
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=timeout
)
self.__demo_mode_highlight_if_active(original_selector, original_by)
if scroll and not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
try:
if self.browser == "ie" and by == By.LINK_TEXT:
# An issue with clicking Link Text on IE means using jquery
self.__jquery_click(selector, by=by)
elif self.browser == "safari":
if by == By.LINK_TEXT:
self.__jquery_click(selector, by=by)
else:
self.__js_click(selector, by=by)
else:
href = None
new_tab = False
onclick = None
try:
if self.headless and element.tag_name == "a":
# Handle a special case of opening a new tab (headless)
href = element.get_attribute("href").strip()
onclick = element.get_attribute("onclick")
target = element.get_attribute("target")
if target == "_blank":
new_tab = True
if new_tab and self.__looks_like_a_page_url(href):
if onclick:
try:
self.execute_script(onclick)
except Exception:
pass
current_window = self.driver.current_window_handle
self.open_new_window()
try:
self.open(href)
except Exception:
pass
self.switch_to_window(current_window)
return
except Exception:
pass
# Normal click
element.click()
except StaleElementReferenceException:
self.wait_for_ready_state_complete()
time.sleep(0.16)
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=timeout
)
try:
self.__scroll_to_element(element, selector, by)
except Exception:
pass
if self.browser == "safari":
if by == By.LINK_TEXT:
self.__jquery_click(selector, by=by)
else:
self.__js_click(selector, by=by)
else:
element.click()
except ENI_Exception:
self.wait_for_ready_state_complete()
time.sleep(0.1)
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=timeout
)
href = None
new_tab = False
onclick = None
try:
if element.tag_name == "a":
# Handle a special case of opening a new tab (non-headless)
href = element.get_attribute("href").strip()
onclick = element.get_attribute("onclick")
target = element.get_attribute("target")
if target == "_blank":
new_tab = True
if new_tab and self.__looks_like_a_page_url(href):
if onclick:
try:
self.execute_script(onclick)
except Exception:
pass
current_window = self.driver.current_window_handle
self.open_new_window()
try:
self.open(href)
except Exception:
pass
self.switch_to_window(current_window)
return
except Exception:
pass
self.__scroll_to_element(element, selector, by)
if self.browser == "firefox" or self.browser == "safari":
if by == By.LINK_TEXT or "contains(" in selector:
self.__jquery_click(selector, by=by)
else:
self.__js_click(selector, by=by)
else:
element.click()
except (WebDriverException, MoveTargetOutOfBoundsException):
self.wait_for_ready_state_complete()
try:
self.__js_click(selector, by=by)
except Exception:
try:
self.__jquery_click(selector, by=by)
except Exception:
# One more attempt to click on the element
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=timeout
)
element.click()
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
if settings.WAIT_FOR_RSC_ON_CLICKS:
self.wait_for_ready_state_complete()
else:
# A smaller subset of self.wait_for_ready_state_complete()
self.wait_for_angularjs(timeout=settings.MINI_TIMEOUT)
if self.driver.current_url != pre_action_url:
self.__ad_block_as_needed()
if self.browser == "safari":
time.sleep(0.02)
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def slow_click(self, selector, by=By.CSS_SELECTOR, timeout=None):
"""Similar to click(), but pauses for a brief moment before clicking.
When used in combination with setting the user-agent, you can often
bypass bot-detection by tricking websites into thinking that you're
not a bot. (Useful on websites that block web automation tools.)
To set the user-agent, use: ``--agent=AGENT``.
Here's an example message from GitHub's bot-blocker:
``You have triggered an abuse detection mechanism...``
"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if not self.demo_mode and not self.slow_mode:
self.click(selector, by=by, timeout=timeout, delay=1.05)
elif self.slow_mode:
self.click(selector, by=by, timeout=timeout, delay=0.65)
else:
# Demo Mode already includes a small delay
self.click(selector, by=by, timeout=timeout, delay=0.25)
def double_click(self, selector, by=By.CSS_SELECTOR, timeout=None):
from selenium.webdriver.common.action_chains import ActionChains
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
original_selector = selector
original_by = by
selector, by = self.__recalculate_selector(selector, by)
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=timeout
)
self.__demo_mode_highlight_if_active(original_selector, original_by)
if not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
self.wait_for_ready_state_complete()
# Find the element one more time in case scrolling hid it
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=timeout
)
pre_action_url = self.driver.current_url
try:
if self.browser == "safari":
# Jump to the "except" block where the other script should work
raise Exception("This Exception will be caught.")
actions = ActionChains(self.driver)
actions.double_click(element).perform()
except Exception:
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
double_click_script = (
"""var targetElement1 = document.querySelector('%s');
var clickEvent1 = document.createEvent('MouseEvents');
clickEvent1.initEvent('dblclick', true, true);
targetElement1.dispatchEvent(clickEvent1);"""
% css_selector
)
if ":contains\\(" not in css_selector:
self.execute_script(double_click_script)
else:
double_click_script = (
"""jQuery('%s').dblclick();""" % css_selector
)
self.safe_execute_script(double_click_script)
if settings.WAIT_FOR_RSC_ON_CLICKS:
self.wait_for_ready_state_complete()
else:
# A smaller subset of self.wait_for_ready_state_complete()
self.wait_for_angularjs(timeout=settings.MINI_TIMEOUT)
if self.driver.current_url != pre_action_url:
self.__ad_block_as_needed()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def click_chain(
self, selectors_list, by=By.CSS_SELECTOR, timeout=None, spacing=0
):
"""This method clicks on a list of elements in succession.
@Params
selectors_list - The list of selectors to click on.
by - The type of selector to search by (Default: CSS_Selector).
timeout - How long to wait for the selector to be visible.
spacing - The amount of time to wait between clicks (in seconds).
"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
for selector in selectors_list:
self.click(selector, by=by, timeout=timeout)
if spacing > 0:
time.sleep(spacing)
def update_text(
self, selector, text, by=By.CSS_SELECTOR, timeout=None, retry=False
):
"""This method updates an element's text field with new text.
Has multiple parts:
* Waits for the element to be visible.
* Waits for the element to be interactive.
* Clears the text field.
* Types in the new text.
* Hits Enter/Submit (if the text ends in "\n").
@Params
selector - the selector of the text field
text - the new text to type into the text field
by - the type of selector to search by (Default: CSS Selector)
timeout - how long to wait for the selector to be visible
retry - if True, use JS if the Selenium text update fails
"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
self.__shadow_type(selector, text, timeout)
return
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
try:
element.clear() # May need https://stackoverflow.com/a/50691625
backspaces = Keys.BACK_SPACE * 42 # Is the answer to everything
element.send_keys(backspaces) # In case autocomplete keeps text
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.16)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
try:
element.clear()
except Exception:
pass # Clearing the text field first might not be necessary
except Exception:
pass # Clearing the text field first might not be necessary
self.__demo_mode_pause_if_active(tiny=True)
pre_action_url = self.driver.current_url
if type(text) is int or type(text) is float:
text = str(text)
try:
if not text.endswith("\n"):
element.send_keys(text)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
else:
element.send_keys(text[:-1])
element.send_keys(Keys.RETURN)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.16)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
element.clear()
if not text.endswith("\n"):
element.send_keys(text)
else:
element.send_keys(text[:-1])
element.send_keys(Keys.RETURN)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
if (
retry
and element.get_attribute("value") != text
and not text.endswith("\n")
):
logging.debug("update_text() is falling back to JavaScript!")
self.set_value(selector, text, by=by)
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def add_text(self, selector, text, by=By.CSS_SELECTOR, timeout=None):
"""The more-reliable version of driver.send_keys()
Similar to update_text(), but won't clear the text field first."""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
self.__shadow_type(selector, text, timeout, clear_first=False)
return
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
pre_action_url = self.driver.current_url
if type(text) is int or type(text) is float:
text = str(text)
try:
if not text.endswith("\n"):
element.send_keys(text)
else:
element.send_keys(text[:-1])
element.send_keys(Keys.RETURN)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.16)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
if not text.endswith("\n"):
element.send_keys(text)
else:
element.send_keys(text[:-1])
element.send_keys(Keys.RETURN)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def type(
self, selector, text, by=By.CSS_SELECTOR, timeout=None, retry=False
):
"""Same as self.update_text()
This method updates an element's text field with new text.
Has multiple parts:
* Waits for the element to be visible.
* Waits for the element to be interactive.
* Clears the text field.
* Types in the new text.
* Hits Enter/Submit (if the text ends in "\n").
@Params
selector - the selector of the text field
text - the new text to type into the text field
by - the type of selector to search by (Default: CSS Selector)
timeout - how long to wait for the selector to be visible
retry - if True, use JS if the Selenium text update fails
DO NOT confuse self.type() with Python type()! They are different!
"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.update_text(selector, text, by=by, timeout=timeout, retry=retry)
def submit(self, selector, by=By.CSS_SELECTOR):
""" Alternative to self.driver.find_element_by_*(SELECTOR).submit() """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
element.submit()
self.__demo_mode_pause_if_active()
def clear(self, selector, by=By.CSS_SELECTOR, timeout=None):
"""This method clears an element's text field.
A clear() is already included with most methods that type text,
such as self.type(), self.update_text(), etc.
Does not use Demo Mode highlights, mainly because we expect
that some users will be calling an unnecessary clear() before
calling a method that already includes clear() as part of it.
In case websites trigger an autofill after clearing a field,
add backspaces to make sure autofill doesn't undo the clear.
@Params
selector - the selector of the text field
by - the type of selector to search by (Default: CSS Selector)
timeout - how long to wait for the selector to be visible
"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
self.__shadow_clear(selector, timeout)
return
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
self.scroll_to(selector, by=by, timeout=timeout)
try:
element.clear()
backspaces = Keys.BACK_SPACE * 42 # Autofill Defense
element.send_keys(backspaces)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.16)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
element.clear()
try:
backspaces = Keys.BACK_SPACE * 42 # Autofill Defense
element.send_keys(backspaces)
except Exception:
pass
except Exception:
element.clear()
def focus(self, selector, by=By.CSS_SELECTOR, timeout=None):
"""Make the current page focus on an interactable element.
If the element is not interactable, only scrolls to it.
The "tab" key is another way of setting the page focus."""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
self.scroll_to(selector, by=by, timeout=timeout)
try:
element.send_keys(Keys.NULL)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.12)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
try:
element.send_keys(Keys.NULL)
except ENI_Exception:
# Non-interactable element. Skip focus and continue.
pass
self.__demo_mode_pause_if_active()
def refresh_page(self):
self.__check_scope()
self.__last_page_load_url = None
js_utils.clear_out_console_logs(self.driver)
self.driver.refresh()
self.wait_for_ready_state_complete()
def refresh(self):
""" The shorter version of self.refresh_page() """
self.refresh_page()
def get_current_url(self):
self.__check_scope()
current_url = self.driver.current_url
if "%" in current_url and python3:
try:
from urllib.parse import unquote
current_url = unquote(current_url, errors="strict")
except Exception:
pass
return current_url
def get_origin(self):
self.__check_scope()
return self.execute_script("return window.location.origin;")
def get_page_source(self):
self.wait_for_ready_state_complete()
return self.driver.page_source
def get_page_title(self):
self.wait_for_ready_state_complete()
self.wait_for_element_present("title", timeout=settings.SMALL_TIMEOUT)
time.sleep(0.03)
return self.driver.title
def get_title(self):
""" The shorter version of self.get_page_title() """
return self.get_page_title()
def get_user_agent(self):
self.__check_scope()
self.__check_browser()
user_agent = self.driver.execute_script("return navigator.userAgent;")
return user_agent
def get_locale_code(self):
self.__check_scope()
self.__check_browser()
locale_code = self.driver.execute_script(
"return navigator.language || navigator.languages[0];"
)
return locale_code
def go_back(self):
self.__check_scope()
self.__last_page_load_url = None
self.driver.back()
if self.browser == "safari":
self.wait_for_ready_state_complete()
self.driver.refresh()
self.wait_for_ready_state_complete()
self.__demo_mode_pause_if_active()
def go_forward(self):
self.__check_scope()
self.__last_page_load_url = None
self.driver.forward()
self.wait_for_ready_state_complete()
self.__demo_mode_pause_if_active()
def open_start_page(self):
"""Navigates the current browser window to the start_page.
You can set the start_page on the command-line in three ways:
'--start_page=URL', '--start-page=URL', or '--url=URL'.
If the start_page is not set, then "data:," will be used."""
self.__check_scope()
start_page = self.start_page
if type(start_page) is str:
start_page = start_page.strip() # Remove extra whitespace
if start_page and len(start_page) >= 4:
if page_utils.is_valid_url(start_page):
self.open(start_page)
else:
new_start_page = "https://" + start_page
if page_utils.is_valid_url(new_start_page):
self.__dont_record_open = True
self.open(new_start_page)
self.__dont_record_open = False
else:
logging.info('Invalid URL: "%s"!' % start_page)
self.open("data:,")
else:
self.open("data:,")
def open_if_not_url(self, url):
""" Opens the url in the browser if it's not the current url. """
self.__check_scope()
if self.driver.current_url != url:
self.open(url)
def is_element_present(self, selector, by=By.CSS_SELECTOR):
self.wait_for_ready_state_complete()
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__is_shadow_element_present(selector)
return page_actions.is_element_present(self.driver, selector, by)
def is_element_visible(self, selector, by=By.CSS_SELECTOR):
self.wait_for_ready_state_complete()
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__is_shadow_element_visible(selector)
return page_actions.is_element_visible(self.driver, selector, by)
def is_element_enabled(self, selector, by=By.CSS_SELECTOR):
self.wait_for_ready_state_complete()
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__is_shadow_element_enabled(selector)
return page_actions.is_element_enabled(self.driver, selector, by)
def is_text_visible(self, text, selector="html", by=By.CSS_SELECTOR):
self.wait_for_ready_state_complete()
time.sleep(0.01)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__is_shadow_text_visible(text, selector)
return page_actions.is_text_visible(self.driver, text, selector, by)
def is_attribute_present(
self, selector, attribute, value=None, by=By.CSS_SELECTOR
):
"""Returns True if the element attribute/value is found.
If the value is not specified, the attribute only needs to exist."""
self.wait_for_ready_state_complete()
time.sleep(0.01)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__is_shadow_attribute_present(
selector, attribute, value
)
return page_actions.is_attribute_present(
self.driver, selector, attribute, value, by
)
def is_link_text_visible(self, link_text):
self.wait_for_ready_state_complete()
time.sleep(0.01)
return page_actions.is_element_visible(
self.driver, link_text, by=By.LINK_TEXT
)
def is_partial_link_text_visible(self, partial_link_text):
self.wait_for_ready_state_complete()
time.sleep(0.01)
return page_actions.is_element_visible(
self.driver, partial_link_text, by=By.PARTIAL_LINK_TEXT
)
def is_link_text_present(self, link_text):
"""Returns True if the link text appears in the HTML of the page.
The element doesn't need to be visible,
such as elements hidden inside a dropdown selection."""
self.wait_for_ready_state_complete()
soup = self.get_beautiful_soup()
html_links = soup.find_all("a")
for html_link in html_links:
if html_link.text.strip() == link_text.strip():
return True
return False
def is_partial_link_text_present(self, link_text):
"""Returns True if the partial link appears in the HTML of the page.
The element doesn't need to be visible,
such as elements hidden inside a dropdown selection."""
self.wait_for_ready_state_complete()
soup = self.get_beautiful_soup()
html_links = soup.find_all("a")
for html_link in html_links:
if link_text.strip() in html_link.text.strip():
return True
return False
def get_link_attribute(self, link_text, attribute, hard_fail=True):
"""Finds a link by link text and then returns the attribute's value.
If the link text or attribute cannot be found, an exception will
get raised if hard_fail is True (otherwise None is returned)."""
self.wait_for_ready_state_complete()
soup = self.get_beautiful_soup()
html_links = soup.find_all("a")
for html_link in html_links:
if html_link.text.strip() == link_text.strip():
if html_link.has_attr(attribute):
attribute_value = html_link.get(attribute)
return attribute_value
if hard_fail:
raise Exception(
"Unable to find attribute {%s} from link text {%s}!"
% (attribute, link_text)
)
else:
return None
if hard_fail:
raise Exception("Link text {%s} was not found!" % link_text)
else:
return None
def get_link_text_attribute(self, link_text, attribute, hard_fail=True):
"""Same as self.get_link_attribute()
Finds a link by link text and then returns the attribute's value.
If the link text or attribute cannot be found, an exception will
get raised if hard_fail is True (otherwise None is returned)."""
return self.get_link_attribute(link_text, attribute, hard_fail)
def get_partial_link_text_attribute(
self, link_text, attribute, hard_fail=True
):
"""Finds a link by partial link text and then returns the attribute's
value. If the partial link text or attribute cannot be found, an
exception will get raised if hard_fail is True (otherwise None
is returned)."""
self.wait_for_ready_state_complete()
soup = self.get_beautiful_soup()
html_links = soup.find_all("a")
for html_link in html_links:
if link_text.strip() in html_link.text.strip():
if html_link.has_attr(attribute):
attribute_value = html_link.get(attribute)
return attribute_value
if hard_fail:
raise Exception(
"Unable to find attribute {%s} from "
"partial link text {%s}!" % (attribute, link_text)
)
else:
return None
if hard_fail:
raise Exception(
"Partial Link text {%s} was not found!" % link_text
)
else:
return None
def click_link_text(self, link_text, timeout=None):
""" This method clicks link text on a page """
# If using phantomjs, might need to extract and open the link directly
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
if self.browser == "phantomjs":
if self.is_link_text_visible(link_text):
element = self.wait_for_link_text_visible(
link_text, timeout=timeout
)
element.click()
return
self.open(self.__get_href_from_link_text(link_text))
return
if self.browser == "safari":
if self.demo_mode:
self.wait_for_link_text_present(link_text, timeout=timeout)
try:
self.__jquery_slow_scroll_to(link_text, by=By.LINK_TEXT)
except Exception:
element = self.wait_for_link_text_visible(
link_text, timeout=timeout
)
self.__slow_scroll_to_element(element)
o_bs = "" # original_box_shadow
loops = settings.HIGHLIGHTS
selector = self.convert_to_css_selector(
link_text, by=By.LINK_TEXT
)
selector = self.__make_css_match_first_element_only(selector)
try:
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
self.__highlight_with_jquery(selector, loops, o_bs)
except Exception:
pass # JQuery probably couldn't load. Skip highlighting.
self.__jquery_click(link_text, by=By.LINK_TEXT)
return
if not self.is_link_text_present(link_text):
self.wait_for_link_text_present(link_text, timeout=timeout)
pre_action_url = self.get_current_url()
try:
element = self.wait_for_link_text_visible(link_text, timeout=0.2)
self.__demo_mode_highlight_if_active(link_text, by=By.LINK_TEXT)
try:
element.click()
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.16)
element = self.wait_for_link_text_visible(
link_text, timeout=timeout
)
element.click()
except Exception:
found_css = False
text_id = self.get_link_attribute(link_text, "id", False)
if text_id:
link_css = '[id="%s"]' % link_text
found_css = True
if not found_css:
href = self.__get_href_from_link_text(link_text, False)
if href:
if href.startswith("/") or page_utils.is_valid_url(href):
link_css = '[href="%s"]' % href
found_css = True
if not found_css:
ngclick = self.get_link_attribute(link_text, "ng-click", False)
if ngclick:
link_css = '[ng-click="%s"]' % ngclick
found_css = True
if not found_css:
onclick = self.get_link_attribute(link_text, "onclick", False)
if onclick:
link_css = '[onclick="%s"]' % onclick
found_css = True
success = False
if found_css:
if self.is_element_visible(link_css):
self.click(link_css)
success = True
else:
# The link text might be hidden under a dropdown menu
success = self.__click_dropdown_link_text(
link_text, link_css
)
if not success:
element = self.wait_for_link_text_visible(
link_text, timeout=settings.MINI_TIMEOUT
)
element.click()
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def click_partial_link_text(self, partial_link_text, timeout=None):
""" This method clicks the partial link text on a page. """
# If using phantomjs, might need to extract and open the link directly
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if self.browser == "phantomjs":
if self.is_partial_link_text_visible(partial_link_text):
element = self.wait_for_partial_link_text(partial_link_text)
element.click()
return
soup = self.get_beautiful_soup()
html_links = soup.fetch("a")
for html_link in html_links:
if partial_link_text in html_link.text:
for html_attribute in html_link.attrs:
if html_attribute[0] == "href":
href = html_attribute[1]
if href.startswith("//"):
link = "http:" + href
elif href.startswith("/"):
url = self.driver.current_url
domain_url = self.get_domain_url(url)
link = domain_url + href
else:
link = href
self.open(link)
return
raise Exception(
"Could not parse link from partial link_text "
"{%s}" % partial_link_text
)
raise Exception(
"Partial link text {%s} was not found!" % partial_link_text
)
if not self.is_partial_link_text_present(partial_link_text):
self.wait_for_partial_link_text_present(
partial_link_text, timeout=timeout
)
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
try:
element = self.wait_for_partial_link_text(
partial_link_text, timeout=0.2
)
self.__demo_mode_highlight_if_active(
partial_link_text, by=By.LINK_TEXT
)
try:
element.click()
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.16)
element = self.wait_for_partial_link_text(
partial_link_text, timeout=timeout
)
element.click()
except Exception:
found_css = False
text_id = self.get_partial_link_text_attribute(
partial_link_text, "id", False
)
if text_id:
link_css = '[id="%s"]' % partial_link_text
found_css = True
if not found_css:
href = self.__get_href_from_partial_link_text(
partial_link_text, False
)
if href:
if href.startswith("/") or page_utils.is_valid_url(href):
link_css = '[href="%s"]' % href
found_css = True
if not found_css:
ngclick = self.get_partial_link_text_attribute(
partial_link_text, "ng-click", False
)
if ngclick:
link_css = '[ng-click="%s"]' % ngclick
found_css = True
if not found_css:
onclick = self.get_partial_link_text_attribute(
partial_link_text, "onclick", False
)
if onclick:
link_css = '[onclick="%s"]' % onclick
found_css = True
success = False
if found_css:
if self.is_element_visible(link_css):
self.click(link_css)
success = True
else:
# The link text might be hidden under a dropdown menu
success = self.__click_dropdown_partial_link_text(
partial_link_text, link_css
)
if not success:
element = self.wait_for_partial_link_text(
partial_link_text, timeout=settings.MINI_TIMEOUT
)
element.click()
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def get_text(self, selector, by=By.CSS_SELECTOR, timeout=None):
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__get_shadow_text(selector, timeout)
self.wait_for_ready_state_complete()
time.sleep(0.01)
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout
)
try:
element_text = element.text
if self.browser == "safari":
element_text = element.get_attribute("innerText")
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.14)
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout
)
element_text = element.text
if self.browser == "safari":
element_text = element.get_attribute("innerText")
return element_text
def get_attribute(
self,
selector,
attribute,
by=By.CSS_SELECTOR,
timeout=None,
hard_fail=True,
):
""" This method uses JavaScript to get the value of an attribute. """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
time.sleep(0.01)
if self.__is_shadow_selector(selector):
return self.__get_shadow_attribute(
selector, attribute, timeout=timeout
)
element = page_actions.wait_for_element_present(
self.driver, selector, by, timeout
)
try:
attribute_value = element.get_attribute(attribute)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.14)
element = page_actions.wait_for_element_present(
self.driver, selector, by, timeout
)
attribute_value = element.get_attribute(attribute)
if attribute_value is not None:
return attribute_value
else:
if hard_fail:
raise Exception(
"Element {%s} has no attribute {%s}!"
% (selector, attribute)
)
else:
return None
def set_attribute(
self,
selector,
attribute,
value,
by=By.CSS_SELECTOR,
timeout=None,
scroll=False,
):
"""This method uses JavaScript to set/update an attribute.
Only the first matching selector from querySelector() is used."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if scroll and self.is_element_visible(selector, by=by):
try:
self.scroll_to(selector, by=by, timeout=timeout)
except Exception:
pass
attribute = re.escape(attribute)
attribute = self.__escape_quotes_if_needed(attribute)
value = re.escape(value)
value = self.__escape_quotes_if_needed(value)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
script = (
"""document.querySelector('%s').setAttribute('%s','%s');"""
% (css_selector, attribute, value)
)
self.execute_script(script)
def set_attributes(self, selector, attribute, value, by=By.CSS_SELECTOR):
"""This method uses JavaScript to set/update a common attribute.
All matching selectors from querySelectorAll() are used.
Example => (Make all links on a website redirect to Google):
self.set_attributes("a", "href", "https://google.com")"""
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
attribute = re.escape(attribute)
attribute = self.__escape_quotes_if_needed(attribute)
value = re.escape(value)
value = self.__escape_quotes_if_needed(value)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
script = """var $elements = document.querySelectorAll('%s');
var index = 0, length = $elements.length;
for(; index < length; index++){
$elements[index].setAttribute('%s','%s');}""" % (
css_selector,
attribute,
value,
)
try:
self.execute_script(script)
except Exception:
pass
def set_attribute_all(
self, selector, attribute, value, by=By.CSS_SELECTOR
):
"""Same as set_attributes(), but using querySelectorAll naming scheme.
This method uses JavaScript to set/update a common attribute.
All matching selectors from querySelectorAll() are used.
Example => (Make all links on a website redirect to Google):
self.set_attribute_all("a", "href", "https://google.com")"""
self.set_attributes(selector, attribute, value, by=by)
def remove_attribute(
self, selector, attribute, by=By.CSS_SELECTOR, timeout=None
):
"""This method uses JavaScript to remove an attribute.
Only the first matching selector from querySelector() is used."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.is_element_visible(selector, by=by):
try:
self.scroll_to(selector, by=by, timeout=timeout)
except Exception:
pass
attribute = re.escape(attribute)
attribute = self.__escape_quotes_if_needed(attribute)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
script = """document.querySelector('%s').removeAttribute('%s');""" % (
css_selector,
attribute,
)
self.execute_script(script)
def remove_attributes(self, selector, attribute, by=By.CSS_SELECTOR):
"""This method uses JavaScript to remove a common attribute.
All matching selectors from querySelectorAll() are used."""
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
attribute = re.escape(attribute)
attribute = self.__escape_quotes_if_needed(attribute)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
script = """var $elements = document.querySelectorAll('%s');
var index = 0, length = $elements.length;
for(; index < length; index++){
$elements[index].removeAttribute('%s');}""" % (
css_selector,
attribute,
)
try:
self.execute_script(script)
except Exception:
pass
def get_property(
self, selector, property, by=By.CSS_SELECTOR, timeout=None
):
"""Returns the property value of an element.
This is not the same as self.get_property_value(), which returns
the value of an element's computed style using a different algorithm.
If no result is found, an empty string (instead of None) is returned.
Example:
html_text = self.get_property(SELECTOR, "textContent")
"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
time.sleep(0.01)
element = page_actions.wait_for_element_present(
self.driver, selector, by, timeout
)
try:
property_value = element.get_property(property)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.14)
element = page_actions.wait_for_element_present(
self.driver, selector, by, timeout
)
property_value = element.get_property(property)
if not property_value:
return ""
return property_value
def get_text_content(self, selector, by=By.CSS_SELECTOR, timeout=None):
"""Returns the text that appears in the HTML for an element.
This is different from "self.get_text(selector, by=By.CSS_SELECTOR)"
because that only returns the visible text on a page for an element,
rather than the HTML text that's being returned from this method."""
self.__check_scope()
return self.get_property(
selector, property="textContent", by=by, timeout=timeout
)
def get_property_value(
self, selector, property, by=By.CSS_SELECTOR, timeout=None
):
"""Returns the property value of a page element's computed style.
Example:
opacity = self.get_property_value("html body a", "opacity")
self.assertTrue(float(opacity) > 0, "Element not visible!")"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
page_actions.wait_for_element_present(
self.driver, selector, by, timeout
)
try:
selector = self.convert_to_css_selector(selector, by=by)
except Exception:
# Don't run action if can't convert to CSS_Selector for JavaScript
raise Exception(
"Exception: Could not convert {%s}(by=%s) to CSS_SELECTOR!"
% (selector, by)
)
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
script = """var $elm = document.querySelector('%s');
$val = window.getComputedStyle($elm).getPropertyValue('%s');
return $val;""" % (
selector,
property,
)
value = self.execute_script(script)
if value is not None:
return value
else:
return "" # Return an empty string if the property doesn't exist
def get_image_url(self, selector, by=By.CSS_SELECTOR, timeout=None):
""" Extracts the URL from an image element on the page. """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.get_attribute(
selector, attribute="src", by=by, timeout=timeout
)
def find_elements(self, selector, by=By.CSS_SELECTOR, limit=0):
"""Returns a list of matching WebElements.
Elements could be either hidden or visible on the page.
If "limit" is set and > 0, will only return that many elements."""
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
time.sleep(0.05)
elements = self.driver.find_elements(by=by, value=selector)
if limit and limit > 0 and len(elements) > limit:
elements = elements[:limit]
return elements
def find_visible_elements(self, selector, by=By.CSS_SELECTOR, limit=0):
"""Returns a list of matching WebElements that are visible.
If "limit" is set and > 0, will only return that many elements."""
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
time.sleep(0.05)
v_elems = page_actions.find_visible_elements(self.driver, selector, by)
if limit and limit > 0 and len(v_elems) > limit:
v_elems = v_elems[:limit]
return v_elems
def click_visible_elements(
self, selector, by=By.CSS_SELECTOR, limit=0, timeout=None
):
"""Finds all matching page elements and clicks visible ones in order.
If a click reloads or opens a new page, the clicking will stop.
If no matching elements appear, an Exception will be raised.
If "limit" is set and > 0, will only click that many elements.
Also clicks elements that become visible from previous clicks.
Works best for actions such as clicking all checkboxes on a page.
Example: self.click_visible_elements('input[type="checkbox"]')"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_element_present(selector, by=by, timeout=timeout)
elements = self.find_elements(selector, by=by)
if self.browser == "safari":
if not limit:
limit = 0
num_elements = len(elements)
if num_elements == 0:
raise Exception(
"No matching elements found for selector {%s}!" % selector
)
elif num_elements < limit or limit == 0:
limit = num_elements
selector, by = self.__recalculate_selector(selector, by)
css_selector = self.convert_to_css_selector(selector, by=by)
last_css_chunk = css_selector.split(" ")[-1]
if ":" in last_css_chunk:
self.__js_click_all(css_selector)
self.wait_for_ready_state_complete()
return
else:
for i in range(1, limit + 1):
new_selector = css_selector + ":nth-of-type(%s)" % str(i)
if self.is_element_visible(new_selector):
self.__js_click(new_selector)
self.wait_for_ready_state_complete()
return
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
click_count = 0
for element in elements:
if limit and limit > 0 and click_count >= limit:
return
try:
if element.is_displayed():
self.__scroll_to_element(element)
element.click()
click_count += 1
self.wait_for_ready_state_complete()
except ECI_Exception:
continue # ElementClickInterceptedException (Overlay likely)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.12)
try:
if element.is_displayed():
self.__scroll_to_element(element)
element.click()
click_count += 1
self.wait_for_ready_state_complete()
except (StaleElementReferenceException, ENI_Exception):
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
return # Probably on new page / Elements are all stale
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
def click_nth_visible_element(
self, selector, number, by=By.CSS_SELECTOR, timeout=None
):
"""Finds all matching page elements and clicks the nth visible one.
Example: self.click_nth_visible_element('[type="checkbox"]', 5)
(Clicks the 5th visible checkbox on the page.)"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
self.wait_for_element_present(selector, by=by, timeout=timeout)
elements = self.find_visible_elements(selector, by=by)
if len(elements) < number:
raise Exception(
"Not enough matching {%s} elements of type {%s} to "
"click number %s!" % (selector, by, number)
)
number = number - 1
if number < 0:
number = 0
element = elements[number]
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
try:
self.__scroll_to_element(element)
element.click()
except (StaleElementReferenceException, ENI_Exception):
time.sleep(0.12)
self.wait_for_ready_state_complete()
self.wait_for_element_present(selector, by=by, timeout=timeout)
elements = self.find_visible_elements(selector, by=by)
if len(elements) < number:
raise Exception(
"Not enough matching {%s} elements of type {%s} to "
"click number %s!" % (selector, by, number)
)
number = number - 1
if number < 0:
number = 0
element = elements[number]
element.click()
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
def click_if_visible(self, selector, by=By.CSS_SELECTOR):
"""If the page selector exists and is visible, clicks on the element.
This method only clicks on the first matching element found.
(Use click_visible_elements() to click all matching elements.)"""
self.wait_for_ready_state_complete()
if self.is_element_visible(selector, by=by):
self.click(selector, by=by)
def click_active_element(self):
self.wait_for_ready_state_complete()
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
self.execute_script("document.activeElement.click();")
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
if settings.WAIT_FOR_RSC_ON_CLICKS:
self.wait_for_ready_state_complete()
else:
# A smaller subset of self.wait_for_ready_state_complete()
self.wait_for_angularjs(timeout=settings.MINI_TIMEOUT)
if self.driver.current_url != pre_action_url:
self.__ad_block_as_needed()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def click_with_offset(
self, selector, x, y, by=By.CSS_SELECTOR, mark=None, timeout=None
):
"""
Click an element at an {X,Y}-offset location.
{0,0} is the top-left corner of the element.
If mark==True, will draw a dot at location. (Useful for debugging)
In Demo Mode, mark becomes True unless set to False. (Default: None)
"""
from selenium.webdriver.common.action_chains import ActionChains
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout
)
if self.demo_mode:
self.highlight(selector, by=by, loops=1)
elif self.slow_mode:
self.__slow_scroll_to_element(element)
if self.demo_mode and mark is None:
mark = True
if mark:
selector = self.convert_to_css_selector(selector, by=by)
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
px = x - 3
py = y - 3
script = (
"var canvas = document.querySelector('%s');"
"var ctx = canvas.getContext('2d');"
"ctx.fillStyle = '#F8F808';"
"ctx.fillRect(%s, %s, 7, 7);"
"ctx.fillStyle = '#F80808';"
"ctx.fillRect(%s+1, %s+1, 5, 5);"
% (selector, px, py, px, py)
)
self.execute_script(script)
try:
element_location = element.location["y"]
element_location = element_location - 130 + y
if element_location < 0:
element_location = 0
scroll_script = "window.scrollTo(0, %s);" % element_location
self.driver.execute_script(scroll_script)
self.sleep(0.1)
except Exception:
pass
try:
action_chains = ActionChains(self.driver)
action_chains.move_to_element_with_offset(element, x, y)
action_chains.click().perform()
except MoveTargetOutOfBoundsException:
message = (
"Target coordinates for click are out-of-bounds!\n"
"The offset must stay inside the target element!"
)
raise Exception(message)
if self.demo_mode:
self.__demo_mode_pause_if_active()
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def is_checked(self, selector, by=By.CSS_SELECTOR, timeout=None):
"""Determines if a checkbox or a radio button element is checked.
Returns True if the element is checked.
Returns False if the element is not checked.
If the element is not present on the page, raises an exception.
If the element is not a checkbox or radio, raises an exception."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
kind = self.get_attribute(selector, "type", by=by, timeout=timeout)
if kind != "checkbox" and kind != "radio":
raise Exception("Expecting a checkbox or a radio button element!")
is_checked = self.get_attribute(
selector, "checked", by=by, timeout=timeout, hard_fail=False
)
if is_checked:
return True
else: # (NoneType)
return False
def is_selected(self, selector, by=By.CSS_SELECTOR, timeout=None):
""" Same as is_checked() """
return self.is_checked(selector, by=by, timeout=timeout)
def check_if_unchecked(self, selector, by=By.CSS_SELECTOR):
""" If a checkbox or radio button is not checked, will check it. """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
if not self.is_checked(selector, by=by):
if self.is_element_visible(selector, by=by):
self.click(selector, by=by)
else:
selector = self.convert_to_css_selector(selector, by=by)
self.__dont_record_js_click = True
self.js_click(selector, by=By.CSS_SELECTOR)
self.__dont_record_js_click = False
def select_if_unselected(self, selector, by=By.CSS_SELECTOR):
""" Same as check_if_unchecked() """
self.check_if_unchecked(selector, by=by)
def uncheck_if_checked(self, selector, by=By.CSS_SELECTOR):
""" If a checkbox is checked, will uncheck it. """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
if self.is_checked(selector, by=by):
if self.is_element_visible(selector, by=by):
self.click(selector, by=by)
else:
selector = self.convert_to_css_selector(selector, by=by)
self.__dont_record_js_click = True
self.js_click(selector, by=By.CSS_SELECTOR)
self.__dont_record_js_click = False
def unselect_if_selected(self, selector, by=By.CSS_SELECTOR):
""" Same as uncheck_if_checked() """
self.uncheck_if_checked(selector, by=by)
def is_element_in_an_iframe(self, selector, by=By.CSS_SELECTOR):
"""Returns True if the selector's element is located in an iframe.
Otherwise returns False."""
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
if self.is_element_present(selector, by=by):
return False
soup = self.get_beautiful_soup()
iframe_list = soup.select("iframe")
for iframe in iframe_list:
iframe_identifier = None
if iframe.has_attr("name") and len(iframe["name"]) > 0:
iframe_identifier = iframe["name"]
elif iframe.has_attr("id") and len(iframe["id"]) > 0:
iframe_identifier = iframe["id"]
elif iframe.has_attr("class") and len(iframe["class"]) > 0:
iframe_class = " ".join(iframe["class"])
iframe_identifier = '[class="%s"]' % iframe_class
else:
continue
self.switch_to_frame(iframe_identifier)
if self.is_element_present(selector, by=by):
self.switch_to_default_content()
return True
self.switch_to_default_content()
return False
def switch_to_frame_of_element(self, selector, by=By.CSS_SELECTOR):
"""Set driver control to the iframe containing element (assuming the
element is in a single-nested iframe) and returns the iframe name.
If element is not in an iframe, returns None, and nothing happens.
May not work if multiple iframes are nested within each other."""
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
if self.is_element_present(selector, by=by):
return None
soup = self.get_beautiful_soup()
iframe_list = soup.select("iframe")
for iframe in iframe_list:
iframe_identifier = None
if iframe.has_attr("name") and len(iframe["name"]) > 0:
iframe_identifier = iframe["name"]
elif iframe.has_attr("id") and len(iframe["id"]) > 0:
iframe_identifier = iframe["id"]
elif iframe.has_attr("class") and len(iframe["class"]) > 0:
iframe_class = " ".join(iframe["class"])
iframe_identifier = '[class="%s"]' % iframe_class
else:
continue
try:
self.switch_to_frame(iframe_identifier, timeout=1)
if self.is_element_present(selector, by=by):
return iframe_identifier
except Exception:
pass
self.switch_to_default_content()
try:
self.switch_to_frame(selector, timeout=1)
return selector
except Exception:
if self.is_element_present(selector, by=by):
return ""
raise Exception(
"Could not switch to iframe containing "
"element {%s}!" % selector
)
def hover_on_element(self, selector, by=By.CSS_SELECTOR):
self.__check_scope()
original_selector = selector
original_by = by
selector, by = self.__recalculate_selector(selector, by)
if page_utils.is_xpath_selector(selector):
selector = self.convert_to_css_selector(selector, By.XPATH)
by = By.CSS_SELECTOR
self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
self.__demo_mode_highlight_if_active(original_selector, original_by)
self.scroll_to(selector, by=by)
time.sleep(0.05) # Settle down from scrolling before hovering
if self.browser != "chrome":
return page_actions.hover_on_element(self.driver, selector)
# Using Chrome
# (Pure hover actions won't work on early chromedriver versions)
try:
return page_actions.hover_on_element(self.driver, selector)
except WebDriverException as e:
driver_capabilities = self.driver.capabilities
if "version" in driver_capabilities:
chrome_version = driver_capabilities["version"]
else:
chrome_version = driver_capabilities["browserVersion"]
major_chrome_version = chrome_version.split(".")[0]
chrome_dict = self.driver.capabilities["chrome"]
chromedriver_version = chrome_dict["chromedriverVersion"]
chromedriver_version = chromedriver_version.split(" ")[0]
major_chromedriver_version = chromedriver_version.split(".")[0]
install_sb = (
"seleniumbase install chromedriver %s" % major_chrome_version
)
if major_chromedriver_version < major_chrome_version:
# Upgrading the driver is required for performing hover actions
message = (
"\n"
"You need a newer chromedriver to perform hover actions!\n"
"Your version of chromedriver is: %s\n"
"And your version of Chrome is: %s\n"
"You can fix this issue by running:\n>>> %s\n"
% (chromedriver_version, chrome_version, install_sb)
)
raise Exception(message)
else:
raise Exception(e)
def hover_and_click(
self,
hover_selector,
click_selector,
hover_by=By.CSS_SELECTOR,
click_by=By.CSS_SELECTOR,
timeout=None,
):
"""When you want to hover over an element or dropdown menu,
and then click an element that appears after that."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
original_selector = hover_selector
original_by = hover_by
hover_selector, hover_by = self.__recalculate_selector(
hover_selector, hover_by
)
hover_selector = self.convert_to_css_selector(hover_selector, hover_by)
hover_by = By.CSS_SELECTOR
click_selector, click_by = self.__recalculate_selector(
click_selector, click_by
)
dropdown_element = self.wait_for_element_visible(
hover_selector, by=hover_by, timeout=timeout
)
self.__demo_mode_highlight_if_active(original_selector, original_by)
self.scroll_to(hover_selector, by=hover_by)
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
the_selectors = [hover_selector, click_selector]
action = ["ho_cl", the_selectors, origin, time_stamp]
self.__extra_actions.append(action)
outdated_driver = False
element = None
try:
if self.mobile_emulator:
# On mobile, click to hover the element
dropdown_element.click()
elif self.browser == "safari":
# Use the workaround for hover-clicking on Safari
raise Exception("This Exception will be caught.")
else:
page_actions.hover_element(self.driver, dropdown_element)
except Exception:
outdated_driver = True
element = self.wait_for_element_present(
click_selector, click_by, timeout
)
if click_by == By.LINK_TEXT:
self.open(self.__get_href_from_link_text(click_selector))
elif click_by == By.PARTIAL_LINK_TEXT:
self.open(
self.__get_href_from_partial_link_text(click_selector)
)
else:
self.__dont_record_js_click = True
self.js_click(click_selector, by=click_by)
self.__dont_record_js_click = False
if outdated_driver:
pass # Already did the click workaround
elif self.mobile_emulator:
self.click(click_selector, by=click_by)
elif not outdated_driver:
element = page_actions.hover_and_click(
self.driver,
hover_selector,
click_selector,
hover_by,
click_by,
timeout,
)
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
return element
def hover_and_double_click(
self,
hover_selector,
click_selector,
hover_by=By.CSS_SELECTOR,
click_by=By.CSS_SELECTOR,
timeout=None,
):
"""When you want to hover over an element or dropdown menu,
and then double-click an element that appears after that."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
original_selector = hover_selector
original_by = hover_by
hover_selector, hover_by = self.__recalculate_selector(
hover_selector, hover_by
)
hover_selector = self.convert_to_css_selector(hover_selector, hover_by)
hover_by = By.CSS_SELECTOR
click_selector, click_by = self.__recalculate_selector(
click_selector, click_by
)
dropdown_element = self.wait_for_element_visible(
hover_selector, by=hover_by, timeout=timeout
)
self.__demo_mode_highlight_if_active(original_selector, original_by)
self.scroll_to(hover_selector, by=hover_by)
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
outdated_driver = False
element = None
try:
page_actions.hover_element(self.driver, dropdown_element)
except Exception:
outdated_driver = True
element = self.wait_for_element_present(
click_selector, click_by, timeout
)
if click_by == By.LINK_TEXT:
self.open(self.__get_href_from_link_text(click_selector))
elif click_by == By.PARTIAL_LINK_TEXT:
self.open(
self.__get_href_from_partial_link_text(click_selector)
)
else:
self.__dont_record_js_click = True
self.js_click(click_selector, click_by)
self.__dont_record_js_click = False
if not outdated_driver:
element = page_actions.hover_element_and_double_click(
self.driver,
dropdown_element,
click_selector,
click_by=By.CSS_SELECTOR,
timeout=timeout,
)
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
return element
def drag_and_drop(
self,
drag_selector,
drop_selector,
drag_by=By.CSS_SELECTOR,
drop_by=By.CSS_SELECTOR,
timeout=None,
):
""" Drag and drop an element from one selector to another. """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
drag_selector, drag_by = self.__recalculate_selector(
drag_selector, drag_by
)
drop_selector, drop_by = self.__recalculate_selector(
drop_selector, drop_by
)
drag_element = self.wait_for_element_visible(
drag_selector, by=drag_by, timeout=timeout
)
self.__demo_mode_highlight_if_active(drag_selector, drag_by)
self.wait_for_element_visible(
drop_selector, by=drop_by, timeout=timeout
)
self.__demo_mode_highlight_if_active(drop_selector, drop_by)
self.scroll_to(drag_selector, by=drag_by)
drag_selector = self.convert_to_css_selector(drag_selector, drag_by)
drop_selector = self.convert_to_css_selector(drop_selector, drop_by)
drag_and_drop_script = js_utils.get_drag_and_drop_script()
self.safe_execute_script(
drag_and_drop_script
+ (
"$('%s').simulateDragDrop("
"{dropTarget: "
"'%s'});" % (drag_selector, drop_selector)
)
)
if self.demo_mode:
self.__demo_mode_pause_if_active()
elif self.slow_mode:
self.__slow_mode_pause_if_active()
return drag_element
def drag_and_drop_with_offset(
self, selector, x, y, by=By.CSS_SELECTOR, timeout=None
):
""" Drag and drop an element to an {X,Y}-offset location. """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
css_selector = self.convert_to_css_selector(selector, by=by)
element = self.wait_for_element_visible(css_selector, timeout=timeout)
self.__demo_mode_highlight_if_active(css_selector, By.CSS_SELECTOR)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
script = js_utils.get_drag_and_drop_with_offset_script(
css_selector, x, y
)
self.safe_execute_script(script)
if self.demo_mode:
self.__demo_mode_pause_if_active()
elif self.slow_mode:
self.__slow_mode_pause_if_active()
return element
def __select_option(
self,
dropdown_selector,
option,
dropdown_by=By.CSS_SELECTOR,
option_by="text",
timeout=None,
):
"""Selects an HTML <select> option by specification.
Option specifications are by "text", "index", or "value".
Defaults to "text" if option_by is unspecified or unknown."""
from selenium.webdriver.support.ui import Select
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
dropdown_selector, dropdown_by = self.__recalculate_selector(
dropdown_selector, dropdown_by
)
self.wait_for_ready_state_complete()
element = self.wait_for_element_present(
dropdown_selector, by=dropdown_by, timeout=timeout
)
if self.is_element_visible(dropdown_selector, by=dropdown_by):
self.__demo_mode_highlight_if_active(
dropdown_selector, dropdown_by
)
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
try:
if option_by == "index":
Select(element).select_by_index(option)
elif option_by == "value":
Select(element).select_by_value(option)
else:
Select(element).select_by_visible_text(option)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.14)
element = self.wait_for_element_present(
dropdown_selector, by=dropdown_by, timeout=timeout
)
if option_by == "index":
Select(element).select_by_index(option)
elif option_by == "value":
Select(element).select_by_value(option)
else:
Select(element).select_by_visible_text(option)
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
if settings.WAIT_FOR_RSC_ON_CLICKS:
self.wait_for_ready_state_complete()
else:
# A smaller subset of self.wait_for_ready_state_complete()
self.wait_for_angularjs(timeout=settings.MINI_TIMEOUT)
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def select_option_by_text(
self,
dropdown_selector,
option,
dropdown_by=By.CSS_SELECTOR,
timeout=None,
):
"""Selects an HTML <select> option by option text.
@Params
dropdown_selector - the <select> selector.
option - the text of the option.
"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.__select_option(
dropdown_selector,
option,
dropdown_by=dropdown_by,
option_by="text",
timeout=timeout,
)
def select_option_by_index(
self,
dropdown_selector,
option,
dropdown_by=By.CSS_SELECTOR,
timeout=None,
):
"""Selects an HTML <select> option by option index.
@Params
dropdown_selector - the <select> selector.
option - the index number of the option.
"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.__select_option(
dropdown_selector,
option,
dropdown_by=dropdown_by,
option_by="index",
timeout=timeout,
)
def select_option_by_value(
self,
dropdown_selector,
option,
dropdown_by=By.CSS_SELECTOR,
timeout=None,
):
"""Selects an HTML <select> option by option value.
@Params
dropdown_selector - the <select> selector.
option - the value property of the option.
"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.__select_option(
dropdown_selector,
option,
dropdown_by=dropdown_by,
option_by="value",
timeout=timeout,
)
def load_html_string(self, html_string, new_page=True):
"""Loads an HTML string into the web browser.
If new_page==True, the page will switch to: "data:text/html,"
If new_page==False, will load HTML into the current page."""
self.__check_scope()
soup = self.get_beautiful_soup(html_string)
found_base = False
links = soup.findAll("link")
href = None
for link in links:
if link.get("rel") == ["canonical"] and link.get("href"):
found_base = True
href = link.get("href")
href = self.get_domain_url(href)
if (
found_base
and html_string.count("<head>") == 1
and html_string.count("<base") == 0
):
html_string = html_string.replace(
"<head>", '<head><base href="%s">' % href
)
elif not found_base:
bases = soup.findAll("base")
for base in bases:
if base.get("href"):
href = base.get("href")
if href:
html_string = html_string.replace('base: "."', 'base: "%s"' % href)
soup = self.get_beautiful_soup(html_string)
scripts = soup.findAll("script")
for script in scripts:
if script.get("type") != "application/json":
html_string = html_string.replace(str(script), "")
soup = self.get_beautiful_soup(html_string)
found_head = False
found_body = False
html_head = None
html_body = None
if soup.head and len(str(soup.head)) > 12:
found_head = True
html_head = str(soup.head)
html_head = re.escape(html_head)
html_head = self.__escape_quotes_if_needed(html_head)
html_head = html_head.replace("\\ ", " ")
if soup.body and len(str(soup.body)) > 12:
found_body = True
html_body = str(soup.body)
html_body = html_body.replace("\xc2\xa0", " ")
html_body = html_body.replace("\xc2\xa1", "¡")
html_body = html_body.replace("\xc2\xa9", "©")
html_body = html_body.replace("\xc2\xb7", "·")
html_body = html_body.replace("\xc2\xbf", "¿")
html_body = html_body.replace("\xc3\x97", "×")
html_body = html_body.replace("\xc3\xb7", "÷")
html_body = re.escape(html_body)
html_body = self.__escape_quotes_if_needed(html_body)
html_body = html_body.replace("\\ ", " ")
html_string = re.escape(html_string)
html_string = self.__escape_quotes_if_needed(html_string)
html_string = html_string.replace("\\ ", " ")
if new_page:
self.open("data:text/html,")
inner_head = """document.getElementsByTagName("head")[0].innerHTML"""
inner_body = """document.getElementsByTagName("body")[0].innerHTML"""
if not found_body:
self.execute_script('''%s = \"%s\"''' % (inner_body, html_string))
elif found_body and not found_head:
self.execute_script('''%s = \"%s\"''' % (inner_body, html_body))
elif found_body and found_head:
self.execute_script('''%s = \"%s\"''' % (inner_head, html_head))
self.execute_script('''%s = \"%s\"''' % (inner_body, html_body))
else:
raise Exception("Logic Error!")
for script in scripts:
js_code = script.string
js_src = script.get("src")
if js_code and script.get("type") != "application/json":
js_code_lines = js_code.split("\n")
new_lines = []
for line in js_code_lines:
line = line.strip()
new_lines.append(line)
js_code = "\n".join(new_lines)
js_code = re.escape(js_code)
js_utils.add_js_code(self.driver, js_code)
elif js_src:
js_utils.add_js_link(self.driver, js_src)
else:
pass
def set_content(self, html_string, new_page=False):
""" Same as load_html_string(), but "new_page" defaults to False. """
self.load_html_string(html_string, new_page=new_page)
def load_html_file(self, html_file, new_page=True):
"""Loads a local html file into the browser from a relative file path.
If new_page==True, the page will switch to: "data:text/html,"
If new_page==False, will load HTML into the current page.
Local images and other local src content WILL BE IGNORED.
"""
self.__check_scope()
if self.__looks_like_a_page_url(html_file):
self.open(html_file)
return
if len(html_file) < 6 or not html_file.endswith(".html"):
raise Exception('Expecting a ".html" file!')
abs_path = os.path.abspath(".")
file_path = None
if abs_path in html_file:
file_path = html_file
else:
file_path = abs_path + "/%s" % html_file
html_string = None
with open(file_path, "r") as f:
html_string = f.read().strip()
self.load_html_string(html_string, new_page)
def open_html_file(self, html_file):
"""Opens a local html file into the browser from a relative file path.
The URL displayed in the web browser will start with "file://".
"""
self.__check_scope()
if self.__looks_like_a_page_url(html_file):
self.open(html_file)
return
if len(html_file) < 6 or not html_file.endswith(".html"):
raise Exception('Expecting a ".html" file!')
abs_path = os.path.abspath(".")
file_path = None
if abs_path in html_file:
file_path = html_file
else:
file_path = abs_path + "/%s" % html_file
self.open("file://" + file_path)
def execute_script(self, script, *args, **kwargs):
self.__check_scope()
self.__check_browser()
if not python3:
script = unicode(script.decode('latin-1')) # noqa: F821
return self.driver.execute_script(script, *args, **kwargs)
def execute_async_script(self, script, timeout=None):
self.__check_scope()
self.__check_browser()
if not timeout:
timeout = settings.EXTREME_TIMEOUT
return js_utils.execute_async_script(self.driver, script, timeout)
def safe_execute_script(self, script, *args, **kwargs):
"""When executing a script that contains a jQuery command,
it's important that the jQuery library has been loaded first.
This method will load jQuery if it wasn't already loaded."""
self.__check_scope()
self.__check_browser()
if not js_utils.is_jquery_activated(self.driver):
self.activate_jquery()
return self.driver.execute_script(script, *args, **kwargs)
def set_window_rect(self, x, y, width, height):
self.__check_scope()
self.driver.set_window_rect(x, y, width, height)
self.__demo_mode_pause_if_active()
def set_window_size(self, width, height):
self.__check_scope()
self.driver.set_window_size(width, height)
self.__demo_mode_pause_if_active()
def maximize_window(self):
self.__check_scope()
self.driver.maximize_window()
self.__demo_mode_pause_if_active()
def switch_to_frame(self, frame, timeout=None):
"""Wait for an iframe to appear, and switch to it. This should be
usable as a drop-in replacement for driver.switch_to.frame().
The iframe identifier can be a selector, an index, an id, a name,
or a web element, but scrolling to the iframe first will only occur
for visible iframes with a string selector.
@Params
frame - the frame element, name, id, index, or selector
timeout - the time to wait for the alert in seconds
"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if type(frame) is str and self.is_element_visible(frame):
try:
self.scroll_to(frame, timeout=1)
except Exception:
pass
if self.recorder_mode and self._rec_overrides_switch:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
r_a = self.get_session_storage_item("recorder_activated")
if r_a == "yes":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["sk_op", "", origin, time_stamp]
self.__extra_actions.append(action)
self.__set_c_from_switch = True
self.set_content_to_frame(frame, timeout=timeout)
self.__set_c_from_switch = False
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["sw_fr", frame, origin, time_stamp]
self.__extra_actions.append(action)
return
page_actions.switch_to_frame(self.driver, frame, timeout)
def switch_to_default_content(self):
"""Brings driver control outside the current iframe.
(If the driver control is inside an iframe, the driver control
will be set to one level above the current frame. If the driver
control is not currently in an iframe, nothing will happen.)"""
self.__check_scope()
if self.recorder_mode and self._rec_overrides_switch:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
r_a = self.get_session_storage_item("recorder_activated")
if r_a == "yes":
self.__set_c_from_switch = True
self.set_content_to_default()
self.__set_c_from_switch = False
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["sw_dc", "", origin, time_stamp]
self.__extra_actions.append(action)
return
self.driver.switch_to.default_content()
def set_content_to_frame(self, frame, timeout=None):
"""Replaces the page html with an iframe's html from that page.
If the iFrame contains an "src" field that includes a valid URL,
then instead of replacing the current html, this method will then
open up the "src" URL of the iFrame in a new browser tab.
To return to default content, use: self.set_content_to_default().
This method also sets the state of the browser window so that the
self.set_content_to_default() method can bring the user back to
the original content displayed, which is similar to how the methods
self.switch_to_frame(frame) and self.switch_to_default_content()
work together to get the user into frames and out of all of them.
"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
current_url = self.get_current_url()
c_tab = self.driver.current_window_handle
current_page_source = self.get_page_source()
self.execute_script("document.cframe_swap = 0;")
page_actions.switch_to_frame(self.driver, frame, timeout)
iframe_html = self.get_page_source()
self.driver.switch_to.default_content()
self.wait_for_ready_state_complete()
frame_found = False
o_frame = frame
if self.is_element_present(frame):
frame_found = True
elif " " not in frame:
frame = 'iframe[name="%s"]' % frame
if self.is_element_present(frame):
frame_found = True
url = None
if frame_found:
url = self.execute_script(
"""return document.querySelector('%s').src;""" % frame
)
if not python3:
url = str(url)
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
pass
else:
url = None
cframe_tab = False
if url:
cframe_tab = True
self.__page_sources.append([current_url, current_page_source, c_tab])
if self.recorder_mode and not self.__set_c_from_switch:
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["sk_op", "", origin, time_stamp]
self.__extra_actions.append(action)
if cframe_tab:
self.execute_script("document.cframe_tab = 1;")
self.open_new_window(switch_to=True)
self.open(url)
self.execute_script("document.cframe_tab = 1;")
else:
self.set_content(iframe_html)
if not self.execute_script("return document.cframe_swap;"):
self.execute_script("document.cframe_swap = 1;")
else:
self.execute_script("document.cframe_swap += 1;")
if self.recorder_mode and not self.__set_c_from_switch:
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["s_c_f", o_frame, origin, time_stamp]
self.__extra_actions.append(action)
def set_content_to_default(self, nested=True):
"""After using self.set_content_to_frame(), this reverts the page back.
If self.set_content_to_frame() hasn't been called here, only refreshes.
If "nested" is set to False when the content was set to nested iFrames,
then the control will only move above the last iFrame that was entered.
"""
self.__check_scope()
swap_cnt = self.execute_script("return document.cframe_swap;")
tab_sta = self.execute_script("return document.cframe_tab;")
if self.recorder_mode and not self.__set_c_from_switch:
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["sk_op", "", origin, time_stamp]
self.__extra_actions.append(action)
if nested:
if (
len(self.__page_sources) > 0
and (
(swap_cnt and int(swap_cnt) > 0)
or (tab_sta and int(tab_sta) > 0)
)
):
past_content = self.__page_sources[0]
past_url = past_content[0]
past_source = past_content[1]
past_tab = past_content[2]
current_tab = self.driver.current_window_handle
if not current_tab == past_tab:
if past_tab in self.driver.window_handles:
self.switch_to_window(past_tab)
url_of_past_tab = self.get_current_url()
if url_of_past_tab == past_url:
self.set_content(past_source)
else:
self.refresh_page()
else:
self.refresh_page()
self.execute_script("document.cframe_swap = 0;")
self.__page_sources = []
else:
just_refresh = False
if swap_cnt and int(swap_cnt) > 0 and len(self.__page_sources) > 0:
self.execute_script("document.cframe_swap -= 1;")
current_url = self.get_current_url()
past_content = self.__page_sources.pop()
past_url = past_content[0]
past_source = past_content[1]
if current_url == past_url:
self.set_content(past_source)
else:
just_refresh = True
elif tab_sta and int(tab_sta) > 0 and len(self.__page_sources) > 0:
past_content = self.__page_sources.pop()
past_tab = past_content[2]
if past_tab in self.driver.window_handles:
self.switch_to_window(past_tab)
else:
just_refresh = True
else:
just_refresh = True
if just_refresh:
self.refresh_page()
self.execute_script("document.cframe_swap = 0;")
self.__page_sources = []
if self.recorder_mode and not self.__set_c_from_switch:
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["s_c_d", nested, origin, time_stamp]
self.__extra_actions.append(action)
def open_new_window(self, switch_to=True):
""" Opens a new browser tab/window and switches to it by default. """
self.__check_scope()
self.__check_browser() # Current window must exist to open a new one
self.driver.execute_script("window.open('');")
time.sleep(0.01)
if switch_to:
self.switch_to_newest_window()
time.sleep(0.01)
if self.browser == "safari":
self.wait_for_ready_state_complete()
def switch_to_window(self, window, timeout=None):
""" Switches control of the browser to the specified window.
The window can be an integer: 0 -> 1st tab, 1 -> 2nd tab, etc...
Or it can be a list item from self.driver.window_handles """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
page_actions.switch_to_window(self.driver, window, timeout)
def switch_to_default_window(self):
self.switch_to_window(0)
def __switch_to_newest_window_if_not_blank(self):
current_window = self.driver.current_window_handle
try:
self.switch_to_window(len(self.driver.window_handles) - 1)
if self.get_current_url() == "about:blank":
self.switch_to_window(current_window)
except Exception:
self.switch_to_window(current_window)
def switch_to_newest_window(self):
self.switch_to_window(len(self.driver.window_handles) - 1)
def get_new_driver(
self,
browser=None,
headless=None,
locale_code=None,
protocol=None,
servername=None,
port=None,
proxy=None,
proxy_bypass_list=None,
agent=None,
switch_to=True,
cap_file=None,
cap_string=None,
recorder_ext=None,
disable_csp=None,
enable_ws=None,
enable_sync=None,
use_auto_ext=None,
no_sandbox=None,
disable_gpu=None,
incognito=None,
guest_mode=None,
devtools=None,
remote_debug=None,
swiftshader=None,
ad_block_on=None,
block_images=None,
chromium_arg=None,
firefox_arg=None,
firefox_pref=None,
user_data_dir=None,
extension_zip=None,
extension_dir=None,
external_pdf=None,
is_mobile=None,
d_width=None,
d_height=None,
d_p_r=None,
):
"""This method spins up an extra browser for tests that require
more than one. The first browser is already provided by tests
that import base_case.BaseCase from seleniumbase. If parameters
aren't specified, the method uses the same as the default driver.
@Params
browser - the browser to use. (Ex: "chrome", "firefox")
headless - the option to run webdriver in headless mode
locale_code - the Language Locale Code for the web browser
protocol - if using a Selenium Grid, set the host protocol here
servername - if using a Selenium Grid, set the host address here
port - if using a Selenium Grid, set the host port here
proxy - if using a proxy server, specify the "host:port" combo here
proxy_bypass_list - ";"-separated hosts to bypass (Eg. "*.foo.com")
switch_to - the option to switch to the new driver (default = True)
cap_file - the file containing desired capabilities for the browser
cap_string - the string with desired capabilities for the browser
recorder_ext - the option to enable the SBase Recorder extension
disable_csp - an option to disable Chrome's Content Security Policy
enable_ws - the option to enable the Web Security feature (Chrome)
enable_sync - the option to enable the Chrome Sync feature (Chrome)
use_auto_ext - the option to enable Chrome's Automation Extension
no_sandbox - the option to enable the "No-Sandbox" feature (Chrome)
disable_gpu - the option to enable Chrome's "Disable GPU" feature
incognito - the option to enable Chrome's Incognito mode (Chrome)
guest - the option to enable Chrome's Guest mode (Chrome)
devtools - the option to open Chrome's DevTools on start (Chrome)
remote_debug - the option to enable Chrome's Remote Debugger
swiftshader - the option to use Chrome's swiftshader (Chrome-only)
ad_block_on - the option to block ads from loading (Chromium-only)
block_images - the option to block images from loading (Chrome)
chromium_arg - the option to add a Chromium arg to Chrome/Edge
firefox_arg - the option to add a Firefox arg to Firefox runs
firefox_pref - the option to add a Firefox pref:value set (Firefox)
user_data_dir - Chrome's User Data Directory to use (Chrome-only)
extension_zip - A Chrome Extension ZIP file to use (Chrome-only)
extension_dir - A Chrome Extension folder to use (Chrome-only)
external_pdf - "plugins.always_open_pdf_externally": True. (Chrome)
is_mobile - the option to use the mobile emulator (Chrome-only)
d_width - the device width of the mobile emulator (Chrome-only)
d_height - the device height of the mobile emulator (Chrome-only)
d_p_r - the device pixel ratio of the mobile emulator (Chrome-only)
"""
self.__check_scope()
if self.browser == "remote" and self.servername == "localhost":
raise Exception(
'Cannot use "remote" browser driver on localhost!'
" Did you mean to connect to a remote Grid server"
" such as BrowserStack or Sauce Labs? In that"
' case, you must specify the "server" and "port"'
" parameters on the command line! "
"Example: "
"--server=user:key@hub.browserstack.com --port=80"
)
browserstack_ref = "https://browserstack.com/automate/capabilities"
sauce_labs_ref = (
"https://wiki.saucelabs.com/display/DOCS/Platform+Configurator#/"
)
if self.browser == "remote" and not (self.cap_file or self.cap_string):
raise Exception(
"Need to specify a desired capabilities file when "
'using "--browser=remote". Add "--cap_file=FILE". '
"File should be in the Python format used by: "
"%s OR "
"%s "
"See SeleniumBase/examples/sample_cap_file_BS.py "
"and SeleniumBase/examples/sample_cap_file_SL.py"
% (browserstack_ref, sauce_labs_ref)
)
if browser is None:
browser = self.browser
browser_name = browser
if headless is None:
headless = self.headless
if locale_code is None:
locale_code = self.locale_code
if protocol is None:
protocol = self.protocol
if servername is None:
servername = self.servername
if port is None:
port = self.port
use_grid = False
if servername != "localhost":
# Use Selenium Grid (Use "127.0.0.1" for localhost Grid)
use_grid = True
proxy_string = proxy
if proxy_string is None:
proxy_string = self.proxy_string
if proxy_bypass_list is None:
proxy_bypass_list = self.proxy_bypass_list
user_agent = agent
if user_agent is None:
user_agent = self.user_agent
if recorder_ext is None:
recorder_ext = self.recorder_ext
if disable_csp is None:
disable_csp = self.disable_csp
if enable_ws is None:
enable_ws = self.enable_ws
if enable_sync is None:
enable_sync = self.enable_sync
if use_auto_ext is None:
use_auto_ext = self.use_auto_ext
if no_sandbox is None:
no_sandbox = self.no_sandbox
if disable_gpu is None:
disable_gpu = self.disable_gpu
if incognito is None:
incognito = self.incognito
if guest_mode is None:
guest_mode = self.guest_mode
if devtools is None:
devtools = self.devtools
if remote_debug is None:
remote_debug = self.remote_debug
if swiftshader is None:
swiftshader = self.swiftshader
if ad_block_on is None:
ad_block_on = self.ad_block_on
if block_images is None:
block_images = self.block_images
if chromium_arg is None:
chromium_arg = self.chromium_arg
if firefox_arg is None:
firefox_arg = self.firefox_arg
if firefox_pref is None:
firefox_pref = self.firefox_pref
if user_data_dir is None:
user_data_dir = self.user_data_dir
if extension_zip is None:
extension_zip = self.extension_zip
if extension_dir is None:
extension_dir = self.extension_dir
if external_pdf is None:
external_pdf = self.external_pdf
test_id = self.__get_test_id()
if cap_file is None:
cap_file = self.cap_file
if cap_string is None:
cap_string = self.cap_string
if is_mobile is None:
is_mobile = self.mobile_emulator
if d_width is None:
d_width = self.__device_width
if d_height is None:
d_height = self.__device_height
if d_p_r is None:
d_p_r = self.__device_pixel_ratio
valid_browsers = constants.ValidBrowsers.valid_browsers
if browser_name not in valid_browsers:
raise Exception(
"Browser: {%s} is not a valid browser option. "
"Valid options = {%s}" % (browser, valid_browsers)
)
# Launch a web browser
from seleniumbase.core import browser_launcher
new_driver = browser_launcher.get_driver(
browser_name=browser_name,
headless=headless,
locale_code=locale_code,
use_grid=use_grid,
protocol=protocol,
servername=servername,
port=port,
proxy_string=proxy_string,
proxy_bypass_list=proxy_bypass_list,
user_agent=user_agent,
cap_file=cap_file,
cap_string=cap_string,
recorder_ext=recorder_ext,
disable_csp=disable_csp,
enable_ws=enable_ws,
enable_sync=enable_sync,
use_auto_ext=use_auto_ext,
no_sandbox=no_sandbox,
disable_gpu=disable_gpu,
incognito=incognito,
guest_mode=guest_mode,
devtools=devtools,
remote_debug=remote_debug,
swiftshader=swiftshader,
ad_block_on=ad_block_on,
block_images=block_images,
chromium_arg=chromium_arg,
firefox_arg=firefox_arg,
firefox_pref=firefox_pref,
user_data_dir=user_data_dir,
extension_zip=extension_zip,
extension_dir=extension_dir,
external_pdf=external_pdf,
test_id=test_id,
mobile_emulator=is_mobile,
device_width=d_width,
device_height=d_height,
device_pixel_ratio=d_p_r,
)
self._drivers_list.append(new_driver)
self.__driver_browser_map[new_driver] = browser_name
if switch_to:
self.driver = new_driver
self.browser = browser_name
if self.headless or self.xvfb:
# Make sure the invisible browser window is big enough
width = settings.HEADLESS_START_WIDTH
height = settings.HEADLESS_START_HEIGHT
try:
self.driver.set_window_size(width, height)
self.wait_for_ready_state_complete()
except Exception:
# This shouldn't fail, but in case it does,
# get safely through setUp() so that
# WebDrivers can get closed during tearDown().
pass
else:
if self.browser == "chrome" or self.browser == "edge":
width = settings.CHROME_START_WIDTH
height = settings.CHROME_START_HEIGHT
try:
if self.maximize_option:
self.driver.maximize_window()
else:
self.driver.set_window_size(width, height)
self.wait_for_ready_state_complete()
except Exception:
pass # Keep existing browser resolution
elif self.browser == "firefox":
width = settings.CHROME_START_WIDTH
try:
if self.maximize_option:
self.driver.maximize_window()
else:
self.driver.set_window_size(width, 720)
self.wait_for_ready_state_complete()
except Exception:
pass # Keep existing browser resolution
elif self.browser == "safari":
width = settings.CHROME_START_WIDTH
if self.maximize_option:
try:
self.driver.maximize_window()
self.wait_for_ready_state_complete()
except Exception:
pass # Keep existing browser resolution
else:
try:
self.driver.set_window_rect(10, 30, width, 630)
except Exception:
pass
elif self.browser == "opera":
width = settings.CHROME_START_WIDTH
if self.maximize_option:
try:
self.driver.maximize_window()
self.wait_for_ready_state_complete()
except Exception:
pass # Keep existing browser resolution
else:
try:
self.driver.set_window_rect(10, 30, width, 700)
except Exception:
pass
if self.start_page and len(self.start_page) >= 4:
if page_utils.is_valid_url(self.start_page):
self.open(self.start_page)
else:
new_start_page = "https://" + self.start_page
if page_utils.is_valid_url(new_start_page):
self.__dont_record_open = True
self.open(new_start_page)
self.__dont_record_open = False
return new_driver
def switch_to_driver(self, driver):
"""Switches control of the browser to the specified driver.
Also sets the self.driver variable to the specified driver.
You may need this if using self.get_new_driver() in your code."""
self.__check_scope()
self.driver = driver
if self.driver in self.__driver_browser_map:
self.browser = self.__driver_browser_map[self.driver]
self.bring_active_window_to_front()
def switch_to_default_driver(self):
""" Sets self.driver to the default/original driver. """
self.__check_scope()
self.driver = self._default_driver
if self.driver in self.__driver_browser_map:
self.browser = self.__driver_browser_map[self.driver]
self.bring_active_window_to_front()
def save_screenshot(
self, name, folder=None, selector=None, by=By.CSS_SELECTOR
):
"""
Saves a screenshot of the current page.
If no folder is specified, uses the folder where pytest was called.
The screenshot will include the entire page unless a selector is given.
If a provided selector is not found, then takes a full-page screenshot.
If the folder provided doesn't exist, it will get created.
The screenshot will be in PNG format: (*.png)
"""
self.wait_for_ready_state_complete()
if selector and by:
selector, by = self.__recalculate_selector(selector, by)
if page_actions.is_element_present(self.driver, selector, by):
return page_actions.save_screenshot(
self.driver, name, folder, selector, by
)
return page_actions.save_screenshot(self.driver, name, folder)
def save_screenshot_to_logs(
self, name=None, selector=None, by=By.CSS_SELECTOR
):
"""Saves a screenshot of the current page to the "latest_logs" folder.
Naming is automatic:
If NO NAME provided: "_1_screenshot.png", "_2_screenshot.png", etc.
If NAME IS provided, it becomes: "_1_name.png", "_2_name.png", etc.
The screenshot will include the entire page unless a selector is given.
If a provided selector is not found, then takes a full-page screenshot.
(The last_page / failure screenshot is always "screenshot.png")
The screenshot will be in PNG format."""
self.wait_for_ready_state_complete()
test_logpath = os.path.join(self.log_path, self.__get_test_id())
self.__create_log_path_as_needed(test_logpath)
if name:
name = str(name)
self.__screenshot_count += 1
if not name or len(name) == 0:
name = "_%s_screenshot.png" % self.__screenshot_count
else:
pre_name = "_%s_" % self.__screenshot_count
if len(name) >= 4 and name[-4:].lower() == ".png":
name = name[:-4]
if len(name) == 0:
name = "screenshot"
name = "%s%s.png" % (pre_name, name)
if selector and by:
selector, by = self.__recalculate_selector(selector, by)
if page_actions.is_element_present(self.driver, selector, by):
return page_actions.save_screenshot(
self.driver, name, test_logpath, selector, by
)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["ss_tl", "", origin, time_stamp]
self.__extra_actions.append(action)
return page_actions.save_screenshot(self.driver, name, test_logpath)
def save_page_source(self, name, folder=None):
"""Saves the page HTML to the current directory (or given subfolder).
If the folder specified doesn't exist, it will get created.
@Params
name - The file name to save the current page's HTML to.
folder - The folder to save the file to. (Default = current folder)
"""
self.wait_for_ready_state_complete()
return page_actions.save_page_source(self.driver, name, folder)
def save_cookies(self, name="cookies.txt"):
""" Saves the page cookies to the "saved_cookies" folder. """
self.wait_for_ready_state_complete()
cookies = self.driver.get_cookies()
json_cookies = json.dumps(cookies)
if name.endswith("/"):
raise Exception("Invalid filename for Cookies!")
if "/" in name:
name = name.split("/")[-1]
if len(name) < 1:
raise Exception("Filename for Cookies is too short!")
if not name.endswith(".txt"):
name = name + ".txt"
folder = constants.SavedCookies.STORAGE_FOLDER
abs_path = os.path.abspath(".")
file_path = abs_path + "/%s" % folder
if not os.path.exists(file_path):
os.makedirs(file_path)
cookies_file_path = "%s/%s" % (file_path, name)
cookies_file = codecs.open(cookies_file_path, "w+", encoding="utf-8")
cookies_file.writelines(json_cookies)
cookies_file.close()
def load_cookies(self, name="cookies.txt"):
""" Loads the page cookies from the "saved_cookies" folder. """
self.wait_for_ready_state_complete()
if name.endswith("/"):
raise Exception("Invalid filename for Cookies!")
if "/" in name:
name = name.split("/")[-1]
if len(name) < 1:
raise Exception("Filename for Cookies is too short!")
if not name.endswith(".txt"):
name = name + ".txt"
folder = constants.SavedCookies.STORAGE_FOLDER
abs_path = os.path.abspath(".")
file_path = abs_path + "/%s" % folder
cookies_file_path = "%s/%s" % (file_path, name)
json_cookies = None
with open(cookies_file_path, "r") as f:
json_cookies = f.read().strip()
cookies = json.loads(json_cookies)
for cookie in cookies:
if "expiry" in cookie:
del cookie["expiry"]
self.driver.add_cookie(cookie)
def delete_all_cookies(self):
"""Deletes all cookies in the web browser.
Does NOT delete the saved cookies file."""
self.wait_for_ready_state_complete()
self.driver.delete_all_cookies()
def delete_saved_cookies(self, name="cookies.txt"):
"""Deletes the cookies file from the "saved_cookies" folder.
Does NOT delete the cookies from the web browser."""
self.wait_for_ready_state_complete()
if name.endswith("/"):
raise Exception("Invalid filename for Cookies!")
if "/" in name:
name = name.split("/")[-1]
if len(name) < 1:
raise Exception("Filename for Cookies is too short!")
if not name.endswith(".txt"):
name = name + ".txt"
folder = constants.SavedCookies.STORAGE_FOLDER
abs_path = os.path.abspath(".")
file_path = abs_path + "/%s" % folder
cookies_file_path = "%s/%s" % (file_path, name)
if os.path.exists(cookies_file_path):
if cookies_file_path.endswith(".txt"):
os.remove(cookies_file_path)
def __ad_block_as_needed(self):
""" This is an internal method for handling ad-blocking.
Use "pytest --ad-block" to enable this during tests.
When not Chromium or in headless mode, use the hack. """
if self.ad_block_on and (self.headless or not self.is_chromium()):
# (Chromium browsers in headed mode use the extension instead)
current_url = self.get_current_url()
if not current_url == self.__last_page_load_url:
if page_actions.is_element_present(
self.driver, "iframe", By.CSS_SELECTOR
):
self.ad_block()
self.__last_page_load_url = current_url
def wait_for_ready_state_complete(self, timeout=None):
""" Waits for the "readyState" of the page to be "complete".
Returns True when the method completes. """
self.__check_scope()
self.__check_browser()
if not timeout:
timeout = settings.EXTREME_TIMEOUT
if self.timeout_multiplier and timeout == settings.EXTREME_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
js_utils.wait_for_ready_state_complete(self.driver, timeout)
self.wait_for_angularjs(timeout=settings.MINI_TIMEOUT)
if self.js_checking_on:
self.assert_no_js_errors()
self.__ad_block_as_needed()
return True
def wait_for_angularjs(self, timeout=None, **kwargs):
""" Waits for Angular components of the page to finish loading.
Returns True when the method completes. """
self.__check_scope()
if not timeout:
timeout = settings.MINI_TIMEOUT
if self.timeout_multiplier and timeout == settings.MINI_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
js_utils.wait_for_angularjs(self.driver, timeout, **kwargs)
return True
def sleep(self, seconds):
self.__check_scope()
if (
not hasattr(sb_config, "time_limit")
or (hasattr(sb_config, "time_limit") and not sb_config.time_limit)
):
time.sleep(seconds)
elif seconds < 0.4:
shared_utils.check_if_time_limit_exceeded()
time.sleep(seconds)
shared_utils.check_if_time_limit_exceeded()
else:
start_ms = time.time() * 1000.0
stop_ms = start_ms + (seconds * 1000.0)
for x in range(int(seconds * 5)):
shared_utils.check_if_time_limit_exceeded()
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.2)
def install_addon(self, xpi_file):
"""Installs a Firefox add-on instantly at run-time.
@Params
xpi_file - A file archive in .xpi format.
"""
self.wait_for_ready_state_complete()
if self.browser != "firefox":
raise Exception(
"install_addon(xpi_file) is for Firefox ONLY!\n"
"To load a Chrome extension, use the comamnd-line:\n"
"--extension_zip=CRX_FILE OR --extension_dir=DIR"
)
xpi_path = os.path.abspath(xpi_file)
self.driver.install_addon(xpi_path, temporary=True)
def activate_demo_mode(self):
self.demo_mode = True
def deactivate_demo_mode(self):
self.demo_mode = False
def activate_design_mode(self):
# Activate Chrome's Design Mode, which lets you edit a site directly.
# See: https://twitter.com/sulco/status/1177559150563344384
self.wait_for_ready_state_complete()
script = """document.designMode = 'on';"""
self.execute_script(script)
def deactivate_design_mode(self):
# Deactivate Chrome's Design Mode.
self.wait_for_ready_state_complete()
script = """document.designMode = 'off';"""
self.execute_script(script)
def activate_recorder(self):
from seleniumbase.js_code.recorder_js import recorder_js
if not self.is_chromium():
raise Exception(
"The Recorder is only for Chromium browsers: (Chrome or Edge)")
url = self.driver.current_url
if (
url.startswith("data:") or url.startswith("about:")
or url.startswith("chrome:") or url.startswith("edge:")
):
message = (
'The URL in Recorder-Mode cannot start with: '
'"data:", "about:", "chrome:", or "edge:"!')
print("\n" + message)
return
if self.recorder_ext:
return # The Recorder extension is already active
try:
recorder_on = self.get_session_storage_item("recorder_activated")
if not recorder_on == "yes":
self.execute_script(recorder_js)
self.recorder_mode = True
message = "Recorder Mode ACTIVE. [ESC]: Pause. [~`]: Resume."
print("\n" + message)
p_msg = "Recorder Mode ACTIVE.<br>[ESC]: Pause. [~`]: Resume."
self.post_message(p_msg, pause=False, style="error")
except Exception:
pass
def save_recorded_actions(self):
"""(When using Recorder Mode, use this method if you plan on
navigating to a different domain/origin in the same tab.)
This method saves recorded actions from the active tab so that
a complete recording can be exported as a SeleniumBase file at the
end of the test. This is only needed in special cases because most
actions that result in a new origin, (such as clicking on a link),
should automatically open a new tab while Recorder Mode is enabled."""
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
origin = self.get_origin()
self.__origins_to_save.append(origin)
tab_actions = self.__get_recorded_actions_on_active_tab()
self.__actions_to_save.append(tab_actions)
def __get_recorded_actions_on_active_tab(self):
url = self.driver.current_url
if (
url.startswith("data:") or url.startswith("about:")
or url.startswith("chrome:") or url.startswith("edge:")
):
return []
actions = self.get_session_storage_item('recorded_actions')
if actions:
actions = json.loads(actions)
return actions
else:
return []
def __process_recorded_actions(self):
import colorama
raw_actions = [] # All raw actions from sessionStorage
srt_actions = []
cleaned_actions = []
sb_actions = []
used_actions = []
action_dict = {}
for window in self.driver.window_handles:
self.switch_to_window(window)
tab_actions = self.__get_recorded_actions_on_active_tab()
for action in tab_actions:
if action not in used_actions:
used_actions.append(action)
raw_actions.append(action)
for tab_actions in self.__actions_to_save:
for action in tab_actions:
if action not in used_actions:
used_actions.append(action)
raw_actions.append(action)
for action in self.__extra_actions:
if action not in used_actions:
used_actions.append(action)
raw_actions.append(action)
for action in raw_actions:
if self._reuse_session:
if int(action[3]) < int(self.__js_start_time):
continue
# Use key for sorting and preventing duplicates
key = str(action[3]) + "-" + str(action[0])
action_dict[key] = action
for key in sorted(action_dict):
# print(action_dict[key]) # For debugging purposes
srt_actions.append(action_dict[key])
for n in range(len(srt_actions)):
if (
(srt_actions[n][0] == "begin" or srt_actions[n][0] == "_url_")
and n > 0
and srt_actions[n - 1][0] == "sk_op"
):
srt_actions[n][0] = "_skip"
for n in range(len(srt_actions)):
if (
(srt_actions[n][0] == "begin" or srt_actions[n][0] == "_url_")
and n > 1
and srt_actions[n - 1][0] == "_skip"
and srt_actions[n - 2][0] == "sk_op"
and srt_actions[n][2] == srt_actions[n - 1][2]
):
srt_actions[n][0] = "_skip"
for n in range(len(srt_actions)):
if (
(srt_actions[n][0] == "begin" or srt_actions[n][0] == "_url_")
and n > 0
and (
srt_actions[n - 1][0] == "click"
or srt_actions[n - 1][0] == "js_cl"
or srt_actions[n - 1][0] == "js_ca"
)
):
url1 = srt_actions[n - 1][2]
if (
srt_actions[n - 1][0] == "js_cl"
or srt_actions[n - 1][0] == "js_ca"
):
url1 = srt_actions[n - 1][2][0]
if url1.endswith("/#/"):
url1 = url1[:-3]
elif url1.endswith("/"):
url1 = url1[:-1]
url2 = srt_actions[n][2]
if url2.endswith("/#/"):
url2 = url1[:-3]
elif url2.endswith("/"):
url2 = url2[:-1]
if (
url1 == url2
or url1 == url2.replace("www.", "")
or (len(url1) > 0
and (url2.startswith(url1) or "?search" in url1)
and (int(srt_actions[n][3]) - int(
srt_actions[n - 1][3]) < 6500))
):
srt_actions[n][0] = "f_url"
for n in range(len(srt_actions)):
if (
(srt_actions[n][0] == "begin" or srt_actions[n][0] == "_url_")
and n > 0
and (
srt_actions[n - 1][0] == "begin"
or srt_actions[n - 1][0] == "_url_"
)
):
url1 = srt_actions[n - 1][2]
if url1.endswith("/#/"):
url1 = url1[:-3]
elif url1.endswith("/"):
url1 = url1[:-1]
url2 = srt_actions[n][2]
if url2.endswith("/#/"):
url2 = url1[:-3]
elif url2.endswith("/"):
url2 = url2[:-1]
if url1.replace("www.", "") == url2.replace("www.", ""):
srt_actions[n - 1][0] = "_skip"
elif url2.startswith(url1):
srt_actions[n][0] = "f_url"
for n in range(len(srt_actions)):
if (
srt_actions[n][0] == "input"
and n > 0
and srt_actions[n - 1][0] == "input"
and srt_actions[n - 1][2] == ""
):
srt_actions[n - 1][0] = "_skip"
elif (
srt_actions[n][0] == "input"
and n > 1
and srt_actions[n - 2][0] == "input"
and srt_actions[n - 1][0] == "submi"
and srt_actions[n - 2][1].startswith("textarea")
and srt_actions[n - 2][1] == srt_actions[n][1]
):
srt_actions[n - 2][0] = "_skip"
for n in range(len(srt_actions)):
if (
(srt_actions[n][0] == "begin" or srt_actions[n][0] == "_url_")
and n > 0
and (
srt_actions[n - 1][0] == "click"
or srt_actions[n - 1][0] == "js_cl"
or srt_actions[n - 1][0] == "js_ca"
or srt_actions[n - 1][0] == "input"
)
and (
int(srt_actions[n][3]) - int(srt_actions[n - 1][3]) < 6500
)
):
if (
srt_actions[n - 1][0] == "click"
or srt_actions[n - 1][0] == "js_cl"
or srt_actions[n - 1][0] == "js_ca"
):
if (
srt_actions[n - 1][1].startswith("input")
or srt_actions[n - 1][1].startswith("button")
):
srt_actions[n][0] = "f_url"
elif srt_actions[n - 1][0] == "input":
if srt_actions[n - 1][2].endswith("\n"):
srt_actions[n][0] = "f_url"
for n in range(len(srt_actions)):
if (
srt_actions[n][0] == "cho_f"
and n > 0
and srt_actions[n - 1][0] == "chfil"
):
srt_actions[n - 1][0] = "_skip"
srt_actions[n][2] = srt_actions[n - 1][1][1]
for n in range(len(srt_actions)):
if (
srt_actions[n][0] == "input"
and n > 0
and srt_actions[n - 1][0] == "e_mfa"
):
srt_actions[n][0] = "_skip"
for n in range(len(srt_actions)):
if (
(srt_actions[n][0] == "begin" or srt_actions[n][0] == "_url_")
and n > 0
and (
srt_actions[n - 1][0] == "submi"
or srt_actions[n - 1][0] == "e_mfa"
)
):
srt_actions[n][0] = "f_url"
origins = []
for n in range(len(srt_actions)):
if (
srt_actions[n][0] == "begin"
or srt_actions[n][0] == "_url_"
or srt_actions[n][0] == "f_url"
):
origin = srt_actions[n][1]
if origin.endswith("/"):
origin = origin[0:-1]
if origin not in origins:
origins.append(origin)
for origin in self.__origins_to_save:
origins.append(origin)
for n in range(len(srt_actions)):
if (
srt_actions[n][0] == "click"
and n > 0
and srt_actions[n - 1][0] == "ho_cl"
and srt_actions[n - 1][2] in origins
):
srt_actions[n - 1][0] = "_skip"
srt_actions[n][0] = "h_clk"
srt_actions[n][1] = srt_actions[n - 1][1][0]
srt_actions[n][2] = srt_actions[n - 1][1][1]
for n in range(len(srt_actions)):
if srt_actions[n][0] == "chfil" and srt_actions[n][2] in origins:
srt_actions[n][0] = "cho_f"
srt_actions[n][2] = srt_actions[n][1][1]
srt_actions[n][1] = srt_actions[n][1][0]
for n in range(len(srt_actions)):
if (
srt_actions[n][0] == "sh_fc"
and n > 0
and srt_actions[n - 1][0] == "sh_fc"
):
srt_actions[n - 1][0] = "_skip"
ext_actions = []
ext_actions.append("_url_")
ext_actions.append("js_cl")
ext_actions.append("js_ca")
ext_actions.append("js_ty")
ext_actions.append("as_el")
ext_actions.append("as_ep")
ext_actions.append("asenv")
ext_actions.append("hi_li")
ext_actions.append("as_lt")
ext_actions.append("as_ti")
ext_actions.append("as_df")
ext_actions.append("do_fi")
ext_actions.append("as_at")
ext_actions.append("as_te")
ext_actions.append("as_et")
ext_actions.append("sw_fr")
ext_actions.append("sw_dc")
ext_actions.append("s_c_f")
ext_actions.append("s_c_d")
ext_actions.append("sh_fc")
ext_actions.append("c_l_s")
ext_actions.append("e_mfa")
ext_actions.append("ss_tl")
for n in range(len(srt_actions)):
if srt_actions[n][0] in ext_actions:
origin = srt_actions[n][2]
if (
srt_actions[n][0] == "js_cl"
or srt_actions[n][0] == "js_ca"
):
origin = srt_actions[n][2][1]
if origin.endswith("/"):
origin = origin[0:-1]
if srt_actions[n][0] == "js_ty":
srt_actions[n][2] = srt_actions[n][1][1]
srt_actions[n][1] = srt_actions[n][1][0]
if srt_actions[n][0] == "e_mfa":
srt_actions[n][2] = srt_actions[n][1][1]
srt_actions[n][1] = srt_actions[n][1][0]
if srt_actions[n][0] == "_url_" and origin not in origins:
origins.append(origin)
if origin not in origins:
srt_actions[n][0] = "_skip"
for n in range(len(srt_actions)):
if (
srt_actions[n][0] == "input"
and n > 0
and srt_actions[n - 1][0] == "js_ty"
and srt_actions[n][2] == srt_actions[n - 1][2]
):
srt_actions[n][0] = "_skip"
for n in range(len(srt_actions)):
cleaned_actions.append(srt_actions[n])
for action in srt_actions:
if action[0] == "begin" or action[0] == "_url_":
if "%" in action[2] and python3:
try:
from urllib.parse import unquote
action[2] = unquote(action[2], errors="strict")
except Exception:
pass
sb_actions.append('self.open("%s")' % action[2])
elif action[0] == "f_url":
if "%" in action[2] and python3:
try:
from urllib.parse import unquote
action[2] = unquote(action[2], errors="strict")
except Exception:
pass
sb_actions.append('self.open_if_not_url("%s")' % action[2])
elif action[0] == "click":
method = "click"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "js_cl":
method = "js_click"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "js_ca":
method = "js_click_all"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "canva":
method = "click_with_offset"
selector = action[1][0]
p_x = action[1][1]
p_y = action[1][2]
if '"' not in selector:
sb_actions.append(
'self.%s("%s", %s, %s)' % (method, selector, p_x, p_y)
)
else:
sb_actions.append(
"self.%s('%s', %s, %s)" % (method, selector, p_x, p_y)
)
elif action[0] == "input" or action[0] == "js_ty":
method = "type"
if action[0] == "js_ty":
method = "js_type"
text = action[2].replace("\n", "\\n")
if '"' not in action[1] and '"' not in text:
sb_actions.append('self.%s("%s", "%s")' % (
method, action[1], text))
elif '"' not in action[1] and '"' in text:
sb_actions.append('self.%s("%s", \'%s\')' % (
method, action[1], text))
elif '"' in action[1] and '"' not in text:
sb_actions.append('self.%s(\'%s\', "%s")' % (
method, action[1], text))
elif '"' in action[1] and '"' in text:
sb_actions.append("self.%s('%s', '%s')" % (
method, action[1], text))
elif action[0] == "e_mfa":
method = "enter_mfa_code"
text = action[2].replace("\n", "\\n")
if '"' not in action[1] and '"' not in text:
sb_actions.append('self.%s("%s", "%s")' % (
method, action[1], text))
elif '"' not in action[1] and '"' in text:
sb_actions.append('self.%s("%s", \'%s\')' % (
method, action[1], text))
elif '"' in action[1] and '"' not in text:
sb_actions.append('self.%s(\'%s\', "%s")' % (
method, action[1], text))
elif '"' in action[1] and '"' in text:
sb_actions.append("self.%s('%s', '%s')" % (
method, action[1], text))
elif action[0] == "h_clk":
method = "hover_and_click"
if '"' not in action[1] and '"' not in action[2]:
sb_actions.append('self.%s("%s", "%s")' % (
method, action[1], action[2]))
elif '"' not in action[1] and '"' in action[2]:
sb_actions.append('self.%s("%s", \'%s\')' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' not in action[2]:
sb_actions.append('self.%s(\'%s\', "%s")' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' in action[2]:
sb_actions.append("self.%s('%s', '%s')" % (
method, action[1], action[2]))
elif action[0] == "ddrop":
method = "drag_and_drop"
if '"' not in action[1] and '"' not in action[2]:
sb_actions.append('self.%s("%s", "%s")' % (
method, action[1], action[2]))
elif '"' not in action[1] and '"' in action[2]:
sb_actions.append('self.%s("%s", \'%s\')' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' not in action[2]:
sb_actions.append('self.%s(\'%s\', "%s")' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' in action[2]:
sb_actions.append("self.%s('%s', '%s')" % (
method, action[1], action[2]))
elif action[0] == "s_opt":
method = "select_option_by_text"
if '"' not in action[1] and '"' not in action[2]:
sb_actions.append('self.%s("%s", "%s")' % (
method, action[1], action[2]))
elif '"' not in action[1] and '"' in action[2]:
sb_actions.append('self.%s("%s", \'%s\')' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' not in action[2]:
sb_actions.append('self.%s(\'%s\', "%s")' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' in action[2]:
sb_actions.append("self.%s('%s', '%s')" % (
method, action[1], action[2]))
elif action[0] == "set_v":
method = "set_value"
if '"' not in action[1] and '"' not in action[2]:
sb_actions.append('self.%s("%s", "%s")' % (
method, action[1], action[2]))
elif '"' not in action[1] and '"' in action[2]:
sb_actions.append('self.%s("%s", \'%s\')' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' not in action[2]:
sb_actions.append('self.%s(\'%s\', "%s")' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' in action[2]:
sb_actions.append("self.%s('%s', '%s')" % (
method, action[1], action[2]))
elif action[0] == "cho_f":
method = "choose_file"
action[2] = action[2].replace("\\", "\\\\")
if '"' not in action[1] and '"' not in action[2]:
sb_actions.append('self.%s("%s", "%s")' % (
method, action[1], action[2]))
elif '"' not in action[1] and '"' in action[2]:
sb_actions.append('self.%s("%s", \'%s\')' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' not in action[2]:
sb_actions.append('self.%s(\'%s\', "%s")' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' in action[2]:
sb_actions.append("self.%s('%s', '%s')" % (
method, action[1], action[2]))
elif action[0] == "sw_fr":
method = "switch_to_frame"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "sw_dc":
sb_actions.append("self.switch_to_default_content()")
elif action[0] == "s_c_f":
method = "set_content_to_frame"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "s_c_d":
method = "set_content_to_default"
nested = action[1]
if nested:
sb_actions.append("self.%s()" % method)
else:
sb_actions.append("self.%s(nested=False)" % method)
elif action[0] == "as_el":
method = "assert_element"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "as_ep":
method = "assert_element_present"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "asenv":
method = "assert_element_not_visible"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "hi_li":
method = "highlight"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "as_lt":
method = "assert_link_text"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "as_ti":
method = "assert_title"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "as_df":
method = "assert_downloaded_file"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "do_fi":
method = "download_file"
file_url = action[1][0]
dest = action[1][1]
if not dest:
sb_actions.append('self.%s("%s")' % (
method, file_url))
else:
sb_actions.append('self.%s("%s", "%s")' % (
method, file_url, dest))
elif action[0] == "as_at":
method = "assert_attribute"
if ('"' not in action[1][0]) and action[1][2]:
sb_actions.append('self.%s("%s", "%s", "%s")' % (
method, action[1][0], action[1][1], action[1][2]))
elif ('"' not in action[1][0]) and not action[1][2]:
sb_actions.append('self.%s("%s", "%s")' % (
method, action[1][0], action[1][1]))
elif ('"' in action[1][0]) and action[1][2]:
sb_actions.append('self.%s(\'%s\', "%s", "%s")' % (
method, action[1][0], action[1][1], action[1][2]))
else:
sb_actions.append('self.%s(\'%s\', "%s")' % (
method, action[1][0], action[1][1]))
elif action[0] == "as_te" or action[0] == "as_et":
import unicodedata
action[1][0] = unicodedata.normalize("NFKC", action[1][0])
method = "assert_text"
if action[0] == "as_et":
method = "assert_exact_text"
if action[1][1] != "html":
if '"' not in action[1][0] and '"' not in action[1][1]:
sb_actions.append('self.%s("%s", "%s")' % (
method, action[1][0], action[1][1]))
elif '"' not in action[1][0] and '"' in action[1][1]:
sb_actions.append('self.%s("%s", \'%s\')' % (
method, action[1][0], action[1][1]))
elif '"' in action[1] and '"' not in action[1][1]:
sb_actions.append('self.%s(\'%s\', "%s")' % (
method, action[1][0], action[1][1]))
elif '"' in action[1] and '"' in action[1][1]:
sb_actions.append("self.%s('%s', '%s')" % (
method, action[1][0], action[1][1]))
else:
if '"' not in action[1][0]:
sb_actions.append('self.%s("%s")' % (
method, action[1][0]))
else:
sb_actions.append("self.%s('%s')" % (
method, action[1][0]))
elif action[0] == "ss_tl":
method = "save_screenshot_to_logs"
sb_actions.append('self.%s()' % method)
elif action[0] == "sh_fc":
method = "show_file_choosers"
sb_actions.append('self.%s()' % method)
elif action[0] == "c_l_s":
sb_actions.append("self.clear_local_storage()")
elif action[0] == "c_box":
method = "check_if_unchecked"
if action[2] == "no":
method = "uncheck_if_checked"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
filename = self.__get_filename()
new_file = False
data = []
if filename not in sb_config._recorded_actions:
new_file = True
sb_config._recorded_actions[filename] = []
data.append("from seleniumbase import BaseCase")
data.append("")
data.append("")
data.append("class %s(BaseCase):" % self.__class__.__name__)
else:
data = sb_config._recorded_actions[filename]
data.append(" def %s(self):" % self._testMethodName)
if len(sb_actions) > 0:
for action in sb_actions:
data.append(" " + action)
else:
data.append(" pass")
data.append("")
sb_config._recorded_actions[filename] = data
recordings_folder = constants.Recordings.SAVED_FOLDER
if recordings_folder.endswith("/"):
recordings_folder = recordings_folder[:-1]
if not os.path.exists(recordings_folder):
try:
os.makedirs(recordings_folder)
except Exception:
pass
file_name = self.__class__.__module__.split(".")[-1] + "_rec.py"
file_path = "%s/%s" % (recordings_folder, file_name)
out_file = codecs.open(file_path, "w+", "utf-8")
out_file.writelines("\r\n".join(data))
out_file.close()
rec_message = ">>> RECORDING SAVED as: "
if not new_file:
rec_message = ">>> RECORDING ADDED to: "
star_len = len(rec_message) + len(file_path)
try:
terminal_size = os.get_terminal_size().columns
if terminal_size > 30 and star_len > terminal_size:
star_len = terminal_size
except Exception:
pass
stars = "*" * star_len
c1 = ""
c2 = ""
cr = ""
if "linux" not in sys.platform:
colorama.init(autoreset=True)
c1 = colorama.Fore.RED + colorama.Back.LIGHTYELLOW_EX
c2 = colorama.Fore.LIGHTRED_EX + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
rec_message = rec_message.replace(">>>", c2 + ">>>" + cr)
print("\n\n%s%s%s%s\n%s" % (rec_message, c1, file_path, cr, stars))
def activate_jquery(self):
"""If "jQuery is not defined", use this method to activate it for use.
This happens because jQuery is not always defined on web sites."""
self.wait_for_ready_state_complete()
js_utils.activate_jquery(self.driver)
self.wait_for_ready_state_complete()
def __are_quotes_escaped(self, string):
return js_utils.are_quotes_escaped(string)
def __escape_quotes_if_needed(self, string):
return js_utils.escape_quotes_if_needed(string)
def __is_in_frame(self):
return js_utils.is_in_frame(self.driver)
def bring_active_window_to_front(self):
"""Brings the active browser window to the front.
This is useful when multiple drivers are being used."""
self.__check_scope()
try:
if not self.__is_in_frame():
# Only bring the window to the front if not in a frame
# because the driver resets itself to default content.
self.switch_to_window(self.driver.current_window_handle)
except Exception:
pass
def bring_to_front(self, selector, by=By.CSS_SELECTOR):
"""Updates the Z-index of a page element to bring it into view.
Useful when getting a WebDriverException, such as the one below:
{ Element is not clickable at point (#, #).
Other element would receive the click: ... }"""
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
try:
selector = self.convert_to_css_selector(selector, by=by)
except Exception:
# Don't run action if can't convert to CSS_Selector for JavaScript
return
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
script = (
"""document.querySelector('%s').style.zIndex = '999999';"""
% selector
)
self.execute_script(script)
def highlight_click(
self, selector, by=By.CSS_SELECTOR, loops=3, scroll=True
):
self.__check_scope()
if not self.demo_mode:
self.highlight(selector, by=by, loops=loops, scroll=scroll)
self.click(selector, by=by)
def highlight_update_text(
self, selector, text, by=By.CSS_SELECTOR, loops=3, scroll=True
):
"""Highlights the element and then types text into the field."""
self.__check_scope()
if not self.demo_mode:
self.highlight(selector, by=by, loops=loops, scroll=scroll)
self.update_text(selector, text, by=by)
def highlight_type(
self, selector, text, by=By.CSS_SELECTOR, loops=3, scroll=True
):
"""Same as self.highlight_update_text()
As above, highlights the element and then types text into the field."""
self.__check_scope()
if not self.demo_mode:
self.highlight(selector, by=by, loops=loops, scroll=scroll)
self.update_text(selector, text, by=by)
def highlight(self, selector, by=By.CSS_SELECTOR, loops=None, scroll=True):
"""This method uses fancy JavaScript to highlight an element.
Used during demo_mode.
@Params
selector - the selector of the element to find
by - the type of selector to search by (Default: CSS)
loops - # of times to repeat the highlight animation
(Default: 4. Each loop lasts for about 0.18s)
scroll - the option to scroll to the element first (Default: True)
"""
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by, xp_ok=False)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
if not loops:
loops = settings.HIGHLIGHTS
if scroll:
try:
if self.browser != "safari":
scroll_distance = js_utils.get_scroll_distance_to_element(
self.driver, element
)
if abs(scroll_distance) > constants.Values.SSMD:
self.__jquery_slow_scroll_to(selector, by)
else:
self.__slow_scroll_to_element(element)
else:
self.__jquery_slow_scroll_to(selector, by)
except Exception:
self.wait_for_ready_state_complete()
time.sleep(0.12)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
self.__slow_scroll_to_element(element)
try:
selector = self.convert_to_css_selector(selector, by=by)
except Exception:
# Don't highlight if can't convert to CSS_SELECTOR
return
if self.highlights:
loops = self.highlights
if self.browser == "ie":
loops = 1 # Override previous setting because IE is slow
loops = int(loops)
o_bs = "" # original_box_shadow
try:
style = element.get_attribute("style")
except Exception:
self.wait_for_ready_state_complete()
time.sleep(0.12)
element = self.wait_for_element_visible(
selector, by=By.CSS_SELECTOR, timeout=settings.SMALL_TIMEOUT
)
style = element.get_attribute("style")
if style:
if "box-shadow: " in style:
box_start = style.find("box-shadow: ")
box_end = style.find(";", box_start) + 1
original_box_shadow = style[box_start:box_end]
o_bs = original_box_shadow
orig_selector = selector
if ":contains" not in selector and ":first" not in selector:
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
self.__highlight_with_js(selector, loops, o_bs)
else:
selector = self.__make_css_match_first_element_only(selector)
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
try:
self.__highlight_with_jquery(selector, loops, o_bs)
except Exception:
pass # JQuery probably couldn't load. Skip highlighting.
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["hi_li", orig_selector, origin, time_stamp]
self.__extra_actions.append(action)
time.sleep(0.065)
def __highlight_with_js(self, selector, loops, o_bs):
self.wait_for_ready_state_complete()
js_utils.highlight_with_js(self.driver, selector, loops, o_bs)
def __highlight_with_jquery(self, selector, loops, o_bs):
self.wait_for_ready_state_complete()
js_utils.highlight_with_jquery(self.driver, selector, loops, o_bs)
def press_up_arrow(self, selector="html", times=1, by=By.CSS_SELECTOR):
"""Simulates pressing the UP Arrow on the keyboard.
By default, "html" will be used as the CSS Selector target.
You can specify how many times in-a-row the action happens."""
self.__check_scope()
if times < 1:
return
element = self.wait_for_element_present(selector)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
for i in range(int(times)):
try:
element.send_keys(Keys.ARROW_UP)
except Exception:
self.wait_for_ready_state_complete()
element = self.wait_for_element_visible(selector)
element.send_keys(Keys.ARROW_UP)
time.sleep(0.01)
if self.slow_mode:
time.sleep(0.1)
def press_down_arrow(self, selector="html", times=1, by=By.CSS_SELECTOR):
"""Simulates pressing the DOWN Arrow on the keyboard.
By default, "html" will be used as the CSS Selector target.
You can specify how many times in-a-row the action happens."""
self.__check_scope()
if times < 1:
return
element = self.wait_for_element_present(selector)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
for i in range(int(times)):
try:
element.send_keys(Keys.ARROW_DOWN)
except Exception:
self.wait_for_ready_state_complete()
element = self.wait_for_element_visible(selector)
element.send_keys(Keys.ARROW_DOWN)
time.sleep(0.01)
if self.slow_mode:
time.sleep(0.1)
def press_left_arrow(self, selector="html", times=1, by=By.CSS_SELECTOR):
"""Simulates pressing the LEFT Arrow on the keyboard.
By default, "html" will be used as the CSS Selector target.
You can specify how many times in-a-row the action happens."""
self.__check_scope()
if times < 1:
return
element = self.wait_for_element_present(selector)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
for i in range(int(times)):
try:
element.send_keys(Keys.ARROW_LEFT)
except Exception:
self.wait_for_ready_state_complete()
element = self.wait_for_element_visible(selector)
element.send_keys(Keys.ARROW_LEFT)
time.sleep(0.01)
if self.slow_mode:
time.sleep(0.1)
def press_right_arrow(self, selector="html", times=1, by=By.CSS_SELECTOR):
"""Simulates pressing the RIGHT Arrow on the keyboard.
By default, "html" will be used as the CSS Selector target.
You can specify how many times in-a-row the action happens."""
self.__check_scope()
if times < 1:
return
element = self.wait_for_element_present(selector)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
for i in range(int(times)):
try:
element.send_keys(Keys.ARROW_RIGHT)
except Exception:
self.wait_for_ready_state_complete()
element = self.wait_for_element_visible(selector)
element.send_keys(Keys.ARROW_RIGHT)
time.sleep(0.01)
if self.slow_mode:
time.sleep(0.1)
def scroll_to(self, selector, by=By.CSS_SELECTOR, timeout=None):
""" Fast scroll to destination """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if self.demo_mode or self.slow_mode:
self.slow_scroll_to(selector, by=by, timeout=timeout)
return
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
try:
self.__scroll_to_element(element, selector, by)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.12)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
self.__scroll_to_element(element, selector, by)
def scroll_to_element(self, selector, by=By.CSS_SELECTOR, timeout=None):
self.scroll_to(selector, by=by, timeout=timeout)
def slow_scroll_to(self, selector, by=By.CSS_SELECTOR, timeout=None):
""" Slow motion scroll to destination """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
try:
scroll_distance = js_utils.get_scroll_distance_to_element(
self.driver, element
)
if abs(scroll_distance) > constants.Values.SSMD:
self.__jquery_slow_scroll_to(selector, by)
else:
self.__slow_scroll_to_element(element)
except Exception:
self.wait_for_ready_state_complete()
time.sleep(0.12)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
self.__slow_scroll_to_element(element)
def slow_scroll_to_element(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
self.slow_scroll_to(selector, by=by, timeout=timeout)
def scroll_to_top(self):
""" Scroll to the top of the page. """
self.__check_scope()
scroll_script = "window.scrollTo(0, 0);"
try:
self.execute_script(scroll_script)
time.sleep(0.012)
return True
except Exception:
return False
def scroll_to_bottom(self):
""" Scroll to the bottom of the page. """
self.__check_scope()
scroll_script = "window.scrollTo(0, 10000);"
try:
self.execute_script(scroll_script)
time.sleep(0.012)
return True
except Exception:
return False
def click_xpath(self, xpath):
# Technically self.click() will automatically detect an xpath selector,
# so self.click_xpath() is just a longer name for the same action.
self.click(xpath, by=By.XPATH)
def js_click(
self, selector, by=By.CSS_SELECTOR, all_matches=False, scroll=True
):
"""Clicks an element using JavaScript.
Can be used to click hidden / invisible elements.
If "all_matches" is False, only the first match is clicked.
If "scroll" is False, won't scroll unless running in Demo Mode."""
self.wait_for_ready_state_complete()
selector, by = self.__recalculate_selector(selector, by, xp_ok=False)
if by == By.LINK_TEXT:
message = (
"Pure JavaScript doesn't support clicking by Link Text. "
"You may want to use self.jquery_click() instead, which "
"allows this with :contains(), assuming jQuery isn't blocked. "
"For now, self.js_click() will use a regular WebDriver click."
)
logging.debug(message)
self.click(selector, by=by)
return
element = self.wait_for_element_present(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
if self.is_element_visible(selector, by=by):
self.__demo_mode_highlight_if_active(selector, by)
if scroll and not self.demo_mode and not self.slow_mode:
success = js_utils.scroll_to_element(self.driver, element)
if not success:
self.wait_for_ready_state_complete()
timeout = settings.SMALL_TIMEOUT
element = page_actions.wait_for_element_present(
self.driver, selector, by, timeout=timeout
)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
action = None
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
if self.recorder_mode and not self.__dont_record_js_click:
time_stamp = self.execute_script("return Date.now();")
tag_name = None
href = ""
if ":contains\\(" not in css_selector:
tag_name = self.execute_script(
"return document.querySelector('%s').tagName.toLowerCase()"
% css_selector
)
if tag_name == "a":
href = self.execute_script(
"return document.querySelector('%s').href" % css_selector
)
origin = self.get_origin()
href_origin = [href, origin]
action = ["js_cl", selector, href_origin, time_stamp]
if all_matches:
action[0] = "js_ca"
if not all_matches:
if ":contains\\(" not in css_selector:
self.__js_click(selector, by=by)
else:
click_script = """jQuery('%s')[0].click();""" % css_selector
self.safe_execute_script(click_script)
else:
if ":contains\\(" not in css_selector:
self.__js_click_all(selector, by=by)
else:
click_script = """jQuery('%s').click();""" % css_selector
self.safe_execute_script(click_script)
if self.recorder_mode and action:
self.__extra_actions.append(action)
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
self.wait_for_ready_state_complete()
self.__demo_mode_pause_if_active()
def js_click_all(self, selector, by=By.CSS_SELECTOR):
""" Clicks all matching elements using pure JS. (No jQuery) """
self.js_click(selector, by=By.CSS_SELECTOR, all_matches=True)
def jquery_click(self, selector, by=By.CSS_SELECTOR):
"""Clicks an element using jQuery. (Different from using pure JS.)
Can be used to click hidden / invisible elements."""
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by, xp_ok=False)
self.wait_for_element_present(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
if self.is_element_visible(selector, by=by):
self.__demo_mode_highlight_if_active(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
click_script = """jQuery('%s')[0].click();""" % selector
self.safe_execute_script(click_script)
self.__demo_mode_pause_if_active()
def jquery_click_all(self, selector, by=By.CSS_SELECTOR):
""" Clicks all matching elements using jQuery. """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by, xp_ok=False)
self.wait_for_element_present(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
if self.is_element_visible(selector, by=by):
self.__demo_mode_highlight_if_active(selector, by)
css_selector = self.convert_to_css_selector(selector, by=by)
click_script = """jQuery('%s').click();""" % css_selector
self.safe_execute_script(click_script)
self.__demo_mode_pause_if_active()
def hide_element(self, selector, by=By.CSS_SELECTOR):
""" Hide the first element on the page that matches the selector. """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
hide_script = """jQuery('%s').hide();""" % selector
self.safe_execute_script(hide_script)
def hide_elements(self, selector, by=By.CSS_SELECTOR):
""" Hide all elements on the page that match the selector. """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
hide_script = """jQuery('%s').hide();""" % selector
self.safe_execute_script(hide_script)
def show_element(self, selector, by=By.CSS_SELECTOR):
""" Show the first element on the page that matches the selector. """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
show_script = """jQuery('%s').show(0);""" % selector
self.safe_execute_script(show_script)
def show_elements(self, selector, by=By.CSS_SELECTOR):
""" Show all elements on the page that match the selector. """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
show_script = """jQuery('%s').show(0);""" % selector
self.safe_execute_script(show_script)
def remove_element(self, selector, by=By.CSS_SELECTOR):
""" Remove the first element on the page that matches the selector. """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
remove_script = """jQuery('%s').remove();""" % selector
self.safe_execute_script(remove_script)
def remove_elements(self, selector, by=By.CSS_SELECTOR):
""" Remove all elements on the page that match the selector. """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
remove_script = """jQuery('%s').remove();""" % selector
self.safe_execute_script(remove_script)
def ad_block(self):
""" Block ads that appear on the current web page. """
from seleniumbase.config import ad_block_list
self.__check_scope() # Using wait_for_RSC would cause an infinite loop
for css_selector in ad_block_list.AD_BLOCK_LIST:
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
script = (
"""var $elements = document.querySelectorAll('%s');
var index = 0, length = $elements.length;
for(; index < length; index++){
$elements[index].remove();}"""
% css_selector
)
try:
self.execute_script(script)
except Exception:
pass # Don't fail test if ad_blocking fails
def show_file_choosers(self):
"""Display hidden file-chooser input fields on sites if present."""
css_selector = 'input[type="file"]'
try:
self.show_elements(css_selector)
except Exception:
pass
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
script = (
"""var $elements = document.querySelectorAll('%s');
var index = 0, length = $elements.length;
for(; index < length; index++){
the_class = $elements[index].getAttribute('class');
new_class = the_class.replaceAll('hidden', 'visible');
$elements[index].setAttribute('class', new_class);}"""
% css_selector
)
try:
self.execute_script(script)
except Exception:
pass
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["sh_fc", "", origin, time_stamp]
self.__extra_actions.append(action)
def get_domain_url(self, url):
self.__check_scope()
return page_utils.get_domain_url(url)
def get_beautiful_soup(self, source=None):
"""BeautifulSoup is a toolkit for dissecting an HTML document
and extracting what you need. It's great for screen-scraping!
See: https://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
from bs4 import BeautifulSoup
if not source:
source = self.get_page_source()
soup = BeautifulSoup(source, "html.parser")
return soup
def get_unique_links(self):
"""Get all unique links in the html of the page source.
Page links include those obtained from:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src".
"""
page_url = self.get_current_url()
soup = self.get_beautiful_soup(self.get_page_source())
links = page_utils._get_unique_links(page_url, soup)
return links
def get_link_status_code(
self,
link,
allow_redirects=False,
timeout=5,
verify=False,
):
"""Get the status code of a link.
If the timeout is set to less than 1, it becomes 1.
If the timeout is exceeded by requests.get(), it will return a 404.
If "verify" is False, will ignore certificate errors.
For a list of available status codes, see:
https://en.wikipedia.org/wiki/List_of_HTTP_status_codes
"""
if self.__requests_timeout:
timeout = self.__requests_timeout
if timeout < 1:
timeout = 1
status_code = page_utils._get_link_status_code(
link,
allow_redirects=allow_redirects,
timeout=timeout,
verify=verify,
)
return status_code
def assert_link_status_code_is_not_404(self, link):
status_code = str(self.get_link_status_code(link))
bad_link_str = 'Error: "%s" returned a 404!' % link
self.assertNotEqual(status_code, "404", bad_link_str)
def __get_link_if_404_error(self, link):
status_code = str(self.get_link_status_code(link))
if status_code == "404":
# Verify again to be sure. (In case of multi-threading overload.)
status_code = str(self.get_link_status_code(link))
if status_code == "404":
return link
else:
return None
else:
return None
def assert_no_404_errors(self, multithreaded=True, timeout=None):
"""Assert no 404 errors from page links obtained from:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src".
Timeout is on a per-link basis using the "requests" library.
(A 404 error represents a broken link on a web page.)
"""
all_links = self.get_unique_links()
links = []
for link in all_links:
if (
"data:" not in link
and "mailto:" not in link
and "javascript:" not in link
and "://fonts.gstatic.com" not in link
and "://fonts.googleapis.com" not in link
and "://googleads.g.doubleclick.net" not in link
):
links.append(link)
if timeout:
if not type(timeout) is int and not type(timeout) is float:
raise Exception('Expecting a numeric value for "timeout"!')
if timeout < 0:
raise Exception('The "timeout" cannot be a negative number!')
self.__requests_timeout = timeout
broken_links = []
if multithreaded:
from multiprocessing.dummy import Pool as ThreadPool
pool = ThreadPool(10)
results = pool.map(self.__get_link_if_404_error, links)
pool.close()
pool.join()
for result in results:
if result:
broken_links.append(result)
else:
broken_links = []
for link in links:
if self.__get_link_if_404_error(link):
broken_links.append(link)
self.__requests_timeout = None # Reset the requests.get() timeout
if len(broken_links) > 0:
broken_links = sorted(broken_links)
bad_links_str = "\n".join(broken_links)
if len(broken_links) == 1:
self.fail("Broken link detected:\n%s" % bad_links_str)
elif len(broken_links) > 1:
self.fail("Broken links detected:\n%s" % bad_links_str)
if self.demo_mode:
a_t = "ASSERT NO 404 ERRORS"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert_no_404_errors(self._language)
messenger_post = "%s" % a_t
self.__highlight_with_assert_success(messenger_post, "html")
def print_unique_links_with_status_codes(self):
"""Finds all unique links in the html of the page source
and then prints out those links with their status codes.
Format: ["link" -> "status_code"] (per line)
Page links include those obtained from:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src".
"""
page_url = self.get_current_url()
soup = self.get_beautiful_soup(self.get_page_source())
page_utils._print_unique_links_with_status_codes(page_url, soup)
def __fix_unicode_conversion(self, text):
""" Fixing Chinese characters when converting from PDF to HTML. """
text = text.replace("\u2f8f", "\u884c")
text = text.replace("\u2f45", "\u65b9")
text = text.replace("\u2f08", "\u4eba")
text = text.replace("\u2f70", "\u793a")
text = text.replace("\xe2\xbe\x8f", "\xe8\xa1\x8c")
text = text.replace("\xe2\xbd\xb0", "\xe7\xa4\xba")
text = text.replace("\xe2\xbe\x8f", "\xe8\xa1\x8c")
text = text.replace("\xe2\xbd\x85", "\xe6\x96\xb9")
return text
def get_pdf_text(
self,
pdf,
page=None,
maxpages=None,
password=None,
codec="utf-8",
wrap=False,
nav=False,
override=False,
caching=True,
):
"""Gets text from a PDF file.
PDF can be either a URL or a file path on the local file system.
@Params
pdf - The URL or file path of the PDF file.
page - The page number (or a list of page numbers) of the PDF.
If a page number is provided, looks only at that page.
(1 is the first page, 2 is the second page, etc.)
If no page number is provided, returns all PDF text.
maxpages - Instead of providing a page number, you can provide
the number of pages to use from the beginning.
password - If the PDF is password-protected, enter it here.
codec - The compression format for character encoding.
(The default codec used by this method is 'utf-8'.)
wrap - Replaces ' \n' with ' ' so that individual sentences
from a PDF don't get broken up into separate lines when
getting converted into text format.
nav - If PDF is a URL, navigates to the URL in the browser first.
(Not needed because the PDF will be downloaded anyway.)
override - If the PDF file to be downloaded already exists in the
downloaded_files/ folder, that PDF will be used
instead of downloading it again.
caching - If resources should be cached via pdfminer."""
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
from pdfminer.high_level import extract_text
if not password:
password = ""
if not maxpages:
maxpages = 0
if not pdf.lower().endswith(".pdf"):
raise Exception("%s is not a PDF file! (Expecting a .pdf)" % pdf)
file_path = None
if page_utils.is_valid_url(pdf):
downloads_folder = download_helper.get_downloads_folder()
if nav:
if self.get_current_url() != pdf:
self.open(pdf)
file_name = pdf.split("/")[-1]
file_path = downloads_folder + "/" + file_name
if not os.path.exists(file_path):
self.download_file(pdf)
elif override:
self.download_file(pdf)
else:
if not os.path.exists(pdf):
raise Exception("%s is not a valid URL or file path!" % pdf)
file_path = os.path.abspath(pdf)
page_search = None # (Pages are delimited by '\x0c')
if type(page) is list:
pages = page
page_search = []
for page in pages:
page_search.append(page - 1)
elif type(page) is int:
page = page - 1
if page < 0:
page = 0
page_search = [page]
else:
page_search = None
pdf_text = extract_text(
file_path,
password="",
page_numbers=page_search,
maxpages=maxpages,
caching=caching,
codec=codec,
)
pdf_text = self.__fix_unicode_conversion(pdf_text)
if wrap:
pdf_text = pdf_text.replace(" \n", " ")
pdf_text = pdf_text.strip() # Remove leading and trailing whitespace
return pdf_text
def assert_pdf_text(
self,
pdf,
text,
page=None,
maxpages=None,
password=None,
codec="utf-8",
wrap=True,
nav=False,
override=False,
caching=True,
):
"""Asserts text in a PDF file.
PDF can be either a URL or a file path on the local file system.
@Params
pdf - The URL or file path of the PDF file.
text - The expected text to verify in the PDF.
page - The page number of the PDF to use (optional).
If a page number is provided, looks only at that page.
(1 is the first page, 2 is the second page, etc.)
If no page number is provided, looks at all the pages.
maxpages - Instead of providing a page number, you can provide
the number of pages to use from the beginning.
password - If the PDF is password-protected, enter it here.
codec - The compression format for character encoding.
(The default codec used by this method is 'utf-8'.)
wrap - Replaces ' \n' with ' ' so that individual sentences
from a PDF don't get broken up into separate lines when
getting converted into text format.
nav - If PDF is a URL, navigates to the URL in the browser first.
(Not needed because the PDF will be downloaded anyway.)
override - If the PDF file to be downloaded already exists in the
downloaded_files/ folder, that PDF will be used
instead of downloading it again.
caching - If resources should be cached via pdfminer."""
text = self.__fix_unicode_conversion(text)
if not codec:
codec = "utf-8"
pdf_text = self.get_pdf_text(
pdf,
page=page,
maxpages=maxpages,
password=password,
codec=codec,
wrap=wrap,
nav=nav,
override=override,
caching=caching,
)
if type(page) is int:
if text not in pdf_text:
raise Exception(
"PDF [%s] is missing expected text [%s] on "
"page [%s]!" % (pdf, text, page)
)
else:
if text not in pdf_text:
raise Exception(
"PDF [%s] is missing expected text [%s]!" % (pdf, text)
)
return True
def create_folder(self, folder):
""" Creates a folder of the given name if it doesn't already exist. """
if folder.endswith("/"):
folder = folder[:-1]
if len(folder) < 1:
raise Exception("Minimum folder name length = 1.")
if not os.path.exists(folder):
try:
os.makedirs(folder)
except Exception:
pass
def choose_file(
self, selector, file_path, by=By.CSS_SELECTOR, timeout=None
):
"""This method is used to choose a file to upload to a website.
It works by populating a file-chooser "input" field of type="file".
A relative file_path will get converted into an absolute file_path.
Example usage:
self.choose_file('input[type="file"]', "my_dir/my_file.txt")
"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
abs_path = os.path.abspath(file_path)
element = self.wait_for_element_present(
selector, by=by, timeout=timeout
)
if self.is_element_visible(selector, by=by):
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
else:
choose_file_selector = 'input[type="file"]'
if self.is_element_present(choose_file_selector):
if not self.is_element_visible(choose_file_selector):
self.show_file_choosers()
if self.is_element_visible(selector, by=by):
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
pre_action_url = self.driver.current_url
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
sele_file_path = [selector, file_path]
action = ["chfil", sele_file_path, origin, time_stamp]
self.__extra_actions.append(action)
if type(abs_path) is int or type(abs_path) is float:
abs_path = str(abs_path)
try:
if self.browser == "safari":
try:
element.send_keys(abs_path)
except NoSuchElementException:
pass # May get this error on Safari even if upload works.
else:
element.send_keys(abs_path)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.16)
element = self.wait_for_element_present(
selector, by=by, timeout=timeout
)
if self.browser == "safari":
try:
element.send_keys(abs_path)
except NoSuchElementException:
pass # May get this error on Safari even if upload works.
else:
element.send_keys(abs_path)
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def save_element_as_image_file(
self, selector, file_name, folder=None, overlay_text=""
):
"""Take a screenshot of an element and save it as an image file.
If no folder is specified, will save it to the current folder.
If overlay_text is provided, will add that to the saved image."""
element = self.wait_for_element_visible(selector)
element_png = element.screenshot_as_png
if len(file_name.split(".")[0]) < 1:
raise Exception("Error: file_name length must be > 0.")
if not file_name.endswith(".png"):
file_name = file_name + ".png"
image_file_path = None
if folder:
if folder.endswith("/"):
folder = folder[:-1]
if len(folder) > 0:
self.create_folder(folder)
image_file_path = "%s/%s" % (folder, file_name)
if not image_file_path:
image_file_path = file_name
with open(image_file_path, "wb") as file:
file.write(element_png)
# Add a text overlay if given
if type(overlay_text) is str and len(overlay_text) > 0:
from PIL import Image, ImageDraw
text_rows = overlay_text.split("\n")
len_text_rows = len(text_rows)
max_width = 0
for text_row in text_rows:
if len(text_row) > max_width:
max_width = len(text_row)
image = Image.open(image_file_path)
draw = ImageDraw.Draw(image)
draw.rectangle(
(0, 0, (max_width * 6) + 6, 16 * len_text_rows),
fill=(236, 236, 28),
)
draw.text(
(4, 2), # Coordinates
overlay_text, # Text
(8, 38, 176), # Color
)
image.save(image_file_path, "PNG", quality=100, optimize=True)
def download_file(self, file_url, destination_folder=None):
"""Downloads the file from the url to the destination folder.
If no destination folder is specified, the default one is used.
(The default [Downloads Folder] = "./downloaded_files")"""
if not destination_folder:
destination_folder = constants.Files.DOWNLOADS_FOLDER
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
page_utils._download_file_to(file_url, destination_folder)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
url_dest = [file_url, destination_folder]
action = ["do_fi", url_dest, origin, time_stamp]
self.__extra_actions.append(action)
def save_file_as(self, file_url, new_file_name, destination_folder=None):
"""Similar to self.download_file(), except that you get to rename the
file being downloaded to whatever you want."""
if not destination_folder:
destination_folder = constants.Files.DOWNLOADS_FOLDER
page_utils._download_file_to(
file_url, destination_folder, new_file_name
)
def save_data_as(self, data, file_name, destination_folder=None):
"""Saves the data specified to a file of the name specified.
If no destination folder is specified, the default one is used.
(The default [Downloads Folder] = "./downloaded_files")"""
if not destination_folder:
destination_folder = constants.Files.DOWNLOADS_FOLDER
page_utils._save_data_as(data, destination_folder, file_name)
def get_downloads_folder(self):
"""Returns the path of the SeleniumBase "downloaded_files/" folder.
Calling self.download_file(file_url) will put that file in here.
With the exception of Safari, IE, and Chromium Guest Mode,
any clicks that download files will also use this folder
rather than using the browser's default "downloads/" path."""
self.__check_scope()
return download_helper.get_downloads_folder()
def get_browser_downloads_folder(self):
"""Returns the path that is used when a click initiates a download.
SeleniumBase overrides the system path to be "downloaded_files/"
The path can't be changed on Safari, IE, or Chromium Guest Mode.
The same problem occurs when using an out-of-date chromedriver.
"""
self.__check_scope()
if self.is_chromium() and self.guest_mode and not self.headless:
# Guest Mode (non-headless) can force the default downloads path
return os.path.join(os.path.expanduser("~"), "downloads")
elif self.browser == "safari" or self.browser == "ie":
# Can't change the system [Downloads Folder] on Safari or IE
return os.path.join(os.path.expanduser("~"), "downloads")
elif (
self.driver.capabilities["browserName"].lower() == "chrome"
and int(self.get_chromedriver_version().split(".")[0]) < 73
and self.headless
):
return os.path.join(os.path.expanduser("~"), "downloads")
else:
return download_helper.get_downloads_folder()
return os.path.join(os.path.expanduser("~"), "downloads")
def get_path_of_downloaded_file(self, file, browser=False):
""" Returns the OS path of the downloaded file. """
if browser:
return os.path.join(self.get_browser_downloads_folder(), file)
else:
return os.path.join(self.get_downloads_folder(), file)
def is_downloaded_file_present(self, file, browser=False):
"""Returns True if the file exists in the pre-set [Downloads Folder].
For browser click-initiated downloads, SeleniumBase will override
the system [Downloads Folder] to be "./downloaded_files/",
but that path can't be overridden when using Safari, IE,
or Chromium Guest Mode, which keeps the default system path.
self.download_file(file_url) will always use "./downloaded_files/".
@Params
file - The filename of the downloaded file.
browser - If True, uses the path set by click-initiated downloads.
If False, uses the self.download_file(file_url) path.
Those paths are often the same. (browser-dependent)
(Default: False).
"""
return os.path.exists(
self.get_path_of_downloaded_file(file, browser=browser)
)
def delete_downloaded_file_if_present(self, file, browser=False):
"""Deletes the file from the [Downloads Folder] if the file exists.
For browser click-initiated downloads, SeleniumBase will override
the system [Downloads Folder] to be "./downloaded_files/",
but that path can't be overridden when using Safari, IE,
or Chromium Guest Mode, which keeps the default system path.
self.download_file(file_url) will always use "./downloaded_files/".
@Params
file - The filename to be deleted from the [Downloads Folder].
browser - If True, uses the path set by click-initiated downloads.
If False, uses the self.download_file(file_url) path.
Those paths are usually the same. (browser-dependent)
(Default: False).
"""
if self.is_downloaded_file_present(file, browser=browser):
file_path = self.get_path_of_downloaded_file(file, browser=browser)
try:
os.remove(file_path)
except Exception:
pass
def delete_downloaded_file(self, file, browser=False):
"""Same as self.delete_downloaded_file_if_present()
Deletes the file from the [Downloads Folder] if the file exists.
For browser click-initiated downloads, SeleniumBase will override
the system [Downloads Folder] to be "./downloaded_files/",
but that path can't be overridden when using Safari, IE,
or Chromium Guest Mode, which keeps the default system path.
self.download_file(file_url) will always use "./downloaded_files/".
@Params
file - The filename to be deleted from the [Downloads Folder].
browser - If True, uses the path set by click-initiated downloads.
If False, uses the self.download_file(file_url) path.
Those paths are usually the same. (browser-dependent)
(Default: False).
"""
if self.is_downloaded_file_present(file, browser=browser):
file_path = self.get_path_of_downloaded_file(file, browser=browser)
try:
os.remove(file_path)
except Exception:
pass
def assert_downloaded_file(self, file, timeout=None, browser=False):
"""Asserts that the file exists in SeleniumBase's [Downloads Folder].
For browser click-initiated downloads, SeleniumBase will override
the system [Downloads Folder] to be "./downloaded_files/",
but that path can't be overridden when using Safari, IE,
or Chromium Guest Mode, which keeps the default system path.
self.download_file(file_url) will always use "./downloaded_files/".
@Params
file - The filename of the downloaded file.
timeout - The time (seconds) to wait for the download to complete.
browser - If True, uses the path set by click-initiated downloads.
If False, uses the self.download_file(file_url) path.
Those paths are often the same. (browser-dependent)
(Default: False).
"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
downloaded_file_path = self.get_path_of_downloaded_file(file, browser)
found = False
for x in range(int(timeout)):
shared_utils.check_if_time_limit_exceeded()
try:
self.assertTrue(
os.path.exists(downloaded_file_path),
"File [%s] was not found in the downloads folder [%s]!"
% (file, self.get_downloads_folder()),
)
found = True
break
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(1)
if not found and not os.path.exists(downloaded_file_path):
message = (
"File {%s} was not found in the downloads folder {%s} "
"after %s seconds! (Or the download didn't complete!)"
% (file, self.get_downloads_folder(), timeout)
)
page_actions.timeout_exception("NoSuchFileException", message)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["as_df", file, origin, time_stamp]
self.__extra_actions.append(action)
if self.demo_mode:
messenger_post = "ASSERT DOWNLOADED FILE: [%s]" % file
try:
js_utils.activate_jquery(self.driver)
js_utils.post_messenger_success_message(
self.driver, messenger_post, self.message_duration
)
except Exception:
pass
def assert_true(self, expr, msg=None):
"""Asserts that the expression is True.
Will raise an exception if the statement if False."""
self.assertTrue(expr, msg=msg)
def assert_false(self, expr, msg=None):
"""Asserts that the expression is False.
Will raise an exception if the statement if True."""
self.assertFalse(expr, msg=msg)
def assert_equal(self, first, second, msg=None):
"""Asserts that the two values are equal.
Will raise an exception if the values are not equal."""
self.assertEqual(first, second, msg=msg)
def assert_not_equal(self, first, second, msg=None):
"""Asserts that the two values are not equal.
Will raise an exception if the values are equal."""
self.assertNotEqual(first, second, msg=msg)
def assert_in(self, first, second, msg=None):
"""Asserts that the first string is in the second string.
Will raise an exception if the first string is not in the second."""
self.assertIn(first, second, msg=msg)
def assert_not_in(self, first, second, msg=None):
"""Asserts that the first string is not in the second string.
Will raise an exception if the first string is in the second string."""
self.assertNotIn(first, second, msg=msg)
def assert_raises(self, *args, **kwargs):
"""Asserts that the following block of code raises an exception.
Will raise an exception if the block of code has no exception.
Usage Example =>
# Verify that the expected exception is raised.
with self.assert_raises(Exception):
raise Exception("Expected Exception!")
"""
return self.assertRaises(*args, **kwargs)
def wait_for_attribute(
self, selector, attribute, value=None, by=By.CSS_SELECTOR, timeout=None
):
"""Raises an exception if the element attribute/value is not found.
If the value is not specified, the attribute only needs to exist.
Returns the element that contains the attribute if successful.
Default timeout = LARGE_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__wait_for_shadow_attribute_present(
selector, attribute, value=value, timeout=timeout
)
return page_actions.wait_for_attribute(
self.driver,
selector,
attribute,
value=value,
by=by,
timeout=timeout,
)
def assert_attribute(
self, selector, attribute, value=None, by=By.CSS_SELECTOR, timeout=None
):
"""Raises an exception if the element attribute/value is not found.
If the value is not specified, the attribute only needs to exist.
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_attribute(
selector, attribute, value=value, by=by, timeout=timeout
)
if (
self.demo_mode
and not self.__is_shadow_selector(selector)
and self.is_element_visible(selector, by=by)
):
a_a = "ASSERT ATTRIBUTE"
i_n = "in"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_a = SD.translate_assert_attribute(self._language)
i_n = SD.translate_in(self._language)
if not value:
messenger_post = "%s: {%s} %s %s: %s" % (
a_a,
attribute,
i_n,
by.upper(),
selector,
)
else:
messenger_post = '%s: {%s == "%s"} %s %s: %s' % (
a_a,
attribute,
value,
i_n,
by.upper(),
selector,
)
self.__highlight_with_assert_success(messenger_post, selector, by)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
value = value.replace("\\", "\\\\")
sel_att_val = [selector, attribute, value]
action = ["as_at", sel_att_val, origin, time_stamp]
self.__extra_actions.append(action)
return True
def assert_title(self, title):
"""Asserts that the web page title matches the expected title.
When a web page initially loads, the title starts as the URL,
but then the title switches over to the actual page title.
In Recorder Mode, this assertion is skipped because the Recorder
changes the page title to the selector of the hovered element.
"""
self.wait_for_ready_state_complete()
expected = title.strip()
actual = self.get_page_title().strip()
error = (
"Expected page title [%s] does not match the actual title [%s]!"
)
try:
if not self.recorder_mode:
self.assertEqual(expected, actual, error % (expected, actual))
except Exception:
self.wait_for_ready_state_complete()
self.sleep(settings.MINI_TIMEOUT)
actual = self.get_page_title().strip()
try:
self.assertEqual(expected, actual, error % (expected, actual))
except Exception:
self.wait_for_ready_state_complete()
self.sleep(settings.MINI_TIMEOUT)
actual = self.get_page_title().strip()
self.assertEqual(expected, actual, error % (expected, actual))
if self.demo_mode and not self.recorder_mode:
a_t = "ASSERT TITLE"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert_title(self._language)
messenger_post = "%s: {%s}" % (a_t, title)
self.__highlight_with_assert_success(messenger_post, "html")
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["as_ti", title, origin, time_stamp]
self.__extra_actions.append(action)
return True
def assert_no_js_errors(self):
"""Asserts that there are no JavaScript "SEVERE"-level page errors.
Works ONLY on Chromium browsers (Chrome or Edge).
Does NOT work on Firefox, IE, Safari, or some other browsers:
* See https://github.com/SeleniumHQ/selenium/issues/1161
Based on the following Stack Overflow solution:
* https://stackoverflow.com/a/41150512/7058266
"""
self.__check_scope()
time.sleep(0.1) # May take a moment for errors to appear after loads.
try:
browser_logs = self.driver.get_log("browser")
except (ValueError, WebDriverException):
# If unable to get browser logs, skip the assert and return.
return
messenger_library = "//cdnjs.cloudflare.com/ajax/libs/messenger"
underscore_library = "//cdnjs.cloudflare.com/ajax/libs/underscore"
errors = []
for entry in browser_logs:
if entry["level"] == "SEVERE":
if (
messenger_library not in entry["message"]
and underscore_library not in entry["message"]
):
# Add errors if not caused by SeleniumBase dependencies
errors.append(entry)
if len(errors) > 0:
for n in range(len(errors)):
f_t_l_r = " - Failed to load resource"
u_c_t_e = " Uncaught TypeError: "
if f_t_l_r in errors[n]["message"]:
url = errors[n]["message"].split(f_t_l_r)[0]
errors[n] = {"Error 404 (broken link)": url}
elif u_c_t_e in errors[n]["message"]:
url = errors[n]["message"].split(u_c_t_e)[0]
error = errors[n]["message"].split(u_c_t_e)[1]
errors[n] = {"Uncaught TypeError (%s)" % error: url}
er_str = str(errors)
er_str = er_str.replace("[{", "[\n{").replace("}, {", "},\n{")
current_url = self.get_current_url()
raise Exception(
"JavaScript errors found on %s => %s" % (current_url, er_str)
)
if self.demo_mode:
if self.browser == "chrome" or self.browser == "edge":
a_t = "ASSERT NO JS ERRORS"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert_no_js_errors(self._language)
messenger_post = "%s" % a_t
self.__highlight_with_assert_success(messenger_post, "html")
def __activate_html_inspector(self):
self.wait_for_ready_state_complete()
time.sleep(0.05)
js_utils.activate_html_inspector(self.driver)
def inspect_html(self):
"""Inspects the Page HTML with HTML-Inspector.
(https://github.com/philipwalton/html-inspector)
(https://cdnjs.com/libraries/html-inspector)
Prints the results and also returns them."""
self.__activate_html_inspector()
self.wait_for_ready_state_complete()
script = """HTMLInspector.inspect();"""
try:
self.execute_script(script)
except Exception:
# If unable to load the JavaScript, skip inspection and return.
msg = "(Unable to load HTML-Inspector JS! Inspection Skipped!)"
print("\n" + msg)
return msg
time.sleep(0.1)
browser_logs = []
try:
browser_logs = self.driver.get_log("browser")
except (ValueError, WebDriverException):
# If unable to get browser logs, skip the assert and return.
msg = "(Unable to Inspect HTML! -> Only works on Chromium!)"
print("\n" + msg)
return msg
messenger_library = "//cdnjs.cloudflare.com/ajax/libs/messenger"
url = self.get_current_url()
header = "\n* HTML Inspection Results: %s" % url
results = [header]
row_count = 0
for entry in browser_logs:
message = entry["message"]
if "0:6053 " in message:
message = message.split("0:6053")[1]
message = message.replace("\\u003C", "<")
if message.startswith(' "') and message.count('"') == 2:
message = message.split('"')[1]
message = "X - " + message
if messenger_library not in message:
if message not in results:
results.append(message)
row_count += 1
if row_count > 0:
results.append("* (See the Console output for details!)")
else:
results.append("* (No issues detected!)")
results = "\n".join(results)
print(results)
return results
def is_valid_url(self, url):
""" Return True if the url is a valid url. """
return page_utils.is_valid_url(url)
def is_chromium(self):
""" Return True if the browser is Chrome, Edge, or Opera. """
self.__check_scope()
chromium = False
browser_name = self.driver.capabilities["browserName"]
if browser_name.lower() in ("chrome", "edge", "msedge", "opera"):
chromium = True
return chromium
def __fail_if_not_using_chrome(self, method):
chrome = False
browser_name = self.driver.capabilities["browserName"]
if browser_name.lower() == "chrome":
chrome = True
if not chrome:
from seleniumbase.common.exceptions import NotUsingChromeException
message = (
'Error: "%s" should only be called '
'by tests running with self.browser == "chrome"! '
'You should add an "if" statement to your code before calling '
"this method if using browsers that are Not Chrome! "
'The browser detected was: "%s".' % (method, browser_name)
)
raise NotUsingChromeException(message)
def get_chrome_version(self):
self.__check_scope()
self.__fail_if_not_using_chrome("get_chrome_version()")
driver_capabilities = self.driver.capabilities
if "version" in driver_capabilities:
chrome_version = driver_capabilities["version"]
else:
chrome_version = driver_capabilities["browserVersion"]
return chrome_version
def get_chromedriver_version(self):
self.__check_scope()
self.__fail_if_not_using_chrome("get_chromedriver_version()")
chrome_dict = self.driver.capabilities["chrome"]
chromedriver_version = chrome_dict["chromedriverVersion"]
chromedriver_version = chromedriver_version.split(" ")[0]
return chromedriver_version
def is_chromedriver_too_old(self):
"""There are known issues with chromedriver versions below 73.
This can impact tests that need to hover over an element, or ones
that require a custom downloads folder ("./downloaded_files").
Due to the situation that newer versions of chromedriver require
an exact match to the version of Chrome, an "old" version of
chromedriver is installed by default. It is then up to the user
to upgrade to the correct version of chromedriver from there.
This method can be used to change test behavior when trying
to perform an action that is impacted by having an old version
of chromedriver installed."""
self.__check_scope()
self.__fail_if_not_using_chrome("is_chromedriver_too_old()")
if int(self.get_chromedriver_version().split(".")[0]) < 73:
return True # chromedriver is too old! Please upgrade!
return False
def get_mfa_code(self, totp_key=None):
"""Same as get_totp_code() and get_google_auth_password().
Returns a time-based one-time password based on the
Google Authenticator algorithm for multi-factor authentication.
If the "totp_key" is not specified, this method defaults
to using the one provided in [seleniumbase/config/settings.py].
Google Authenticator codes expire & change at 30-sec intervals.
If the fetched password expires in the next 1.5 seconds, waits
for a new one before returning it (may take up to 1.5 seconds).
See https://pyotp.readthedocs.io/en/latest/ for details."""
import pyotp
if not totp_key:
totp_key = settings.TOTP_KEY
epoch_interval = time.time() / 30.0
cycle_lifespan = float(epoch_interval) - int(epoch_interval)
if float(cycle_lifespan) > 0.95:
# Password expires in the next 1.5 seconds. Wait for a new one.
for i in range(30):
time.sleep(0.05)
epoch_interval = time.time() / 30.0
cycle_lifespan = float(epoch_interval) - int(epoch_interval)
if not float(cycle_lifespan) > 0.95:
# The new password cycle has begun
break
totp = pyotp.TOTP(totp_key)
return str(totp.now())
def enter_mfa_code(
self, selector, totp_key=None, by=By.CSS_SELECTOR, timeout=None
):
"""Enters into the field a Multi-Factor Authentication TOTP Code.
If the "totp_key" is not specified, this method defaults
to using the one provided in [seleniumbase/config/settings.py].
The TOTP code is generated by the Google Authenticator Algorithm.
This method will automatically press ENTER after typing the code."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
self.wait_for_element_visible(selector, by=by, timeout=timeout)
if self.recorder_mode:
css_selector = self.convert_to_css_selector(selector, by=by)
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
origin = self.get_origin()
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
sel_key = [css_selector, totp_key]
action = ["e_mfa", sel_key, origin, time_stamp]
self.__extra_actions.append(action)
# Sometimes Sign-In leaves the origin... Save work first.
self.__origins_to_save.append(origin)
tab_actions = self.__get_recorded_actions_on_active_tab()
self.__actions_to_save.append(tab_actions)
mfa_code = self.get_mfa_code(totp_key)
self.update_text(selector, mfa_code + "\n", by=by, timeout=timeout)
def convert_css_to_xpath(self, css):
return css_to_xpath.convert_css_to_xpath(css)
def convert_xpath_to_css(self, xpath):
return xpath_to_css.convert_xpath_to_css(xpath)
def convert_to_css_selector(self, selector, by):
"""This method converts a selector to a CSS_SELECTOR.
jQuery commands require a CSS_SELECTOR for finding elements.
This method should only be used for jQuery/JavaScript actions.
Pure JavaScript doesn't support using a:contains("LINK_TEXT")."""
if by == By.CSS_SELECTOR:
return selector
elif by == By.ID:
return "#%s" % selector
elif by == By.CLASS_NAME:
return ".%s" % selector
elif by == By.NAME:
return '[name="%s"]' % selector
elif by == By.TAG_NAME:
return selector
elif by == By.XPATH:
return self.convert_xpath_to_css(selector)
elif by == By.LINK_TEXT:
return 'a:contains("%s")' % selector
elif by == By.PARTIAL_LINK_TEXT:
return 'a:contains("%s")' % selector
else:
raise Exception(
"Exception: Could not convert {%s}(by=%s) to CSS_SELECTOR!"
% (selector, by)
)
def set_value(
self, selector, text, by=By.CSS_SELECTOR, timeout=None, scroll=True
):
""" This method uses JavaScript to update a text field. """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by, xp_ok=False)
self.wait_for_ready_state_complete()
self.wait_for_element_present(selector, by=by, timeout=timeout)
orginal_selector = selector
css_selector = self.convert_to_css_selector(selector, by=by)
self.__demo_mode_highlight_if_active(orginal_selector, by)
if scroll and not self.demo_mode and not self.slow_mode:
self.scroll_to(orginal_selector, by=by, timeout=timeout)
if type(text) is int or type(text) is float:
text = str(text)
value = re.escape(text)
value = self.__escape_quotes_if_needed(value)
pre_escape_css_selector = css_selector
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
the_type = None
if ":contains\\(" not in css_selector:
get_type_script = (
"""return document.querySelector('%s').getAttribute('type');"""
% css_selector
)
the_type = self.execute_script(get_type_script) # Used later
script = """document.querySelector('%s').value='%s';""" % (
css_selector,
value,
)
self.execute_script(script)
if self.recorder_mode:
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
sel_tex = [pre_escape_css_selector, text]
action = ["js_ty", sel_tex, origin, time_stamp]
self.__extra_actions.append(action)
else:
script = """jQuery('%s')[0].value='%s';""" % (css_selector, value)
self.safe_execute_script(script)
if text.endswith("\n"):
element = self.wait_for_element_present(
orginal_selector, by=by, timeout=timeout
)
element.send_keys(Keys.RETURN)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
else:
if the_type == "range" and ":contains\\(" not in css_selector:
# Some input sliders need a mouse event to trigger listeners.
try:
mouse_move_script = (
"""m_elm = document.querySelector('%s');"""
"""m_evt = new Event('mousemove');"""
"""m_elm.dispatchEvent(m_evt);"""
% css_selector
)
self.execute_script(mouse_move_script)
except Exception:
pass
self.__demo_mode_pause_if_active()
def js_update_text(self, selector, text, by=By.CSS_SELECTOR, timeout=None):
"""JavaScript + send_keys are used to update a text field.
Performs self.set_value() and triggers event listeners.
If text ends in "\n", set_value() presses RETURN after.
Works faster than send_keys() alone due to the JS call.
"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if type(text) is int or type(text) is float:
text = str(text)
self.set_value(selector, text, by=by, timeout=timeout)
if not text.endswith("\n"):
try:
element = page_actions.wait_for_element_present(
self.driver, selector, by, timeout=0.2
)
element.send_keys(" " + Keys.BACK_SPACE)
except Exception:
pass
def js_type(self, selector, text, by=By.CSS_SELECTOR, timeout=None):
"""Same as self.js_update_text()
JavaScript + send_keys are used to update a text field.
Performs self.set_value() and triggers event listeners.
If text ends in "\n", set_value() presses RETURN after.
Works faster than send_keys() alone due to the JS call.
"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.js_update_text(selector, text, by=by, timeout=timeout)
def set_text(self, selector, text, by=By.CSS_SELECTOR, timeout=None):
"""Same as self.js_update_text()
JavaScript + send_keys are used to update a text field.
Performs self.set_value() and triggers event listeners.
If text ends in "\n", set_value() presses RETURN after.
Works faster than send_keys() alone due to the JS call.
If not an input or textarea, sets textContent instead."""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
element = page_actions.wait_for_element_present(
self.driver, selector, by, timeout
)
if element.tag_name == "input" or element.tag_name == "textarea":
self.js_update_text(selector, text, by=by, timeout=timeout)
else:
self.set_text_content(selector, text, by=by, timeout=timeout)
def set_text_content(
self, selector, text, by=By.CSS_SELECTOR, timeout=None, scroll=False
):
"""This method uses JavaScript to set an element's textContent.
If the element is an input or textarea, sets the value instead."""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
element = page_actions.wait_for_element_present(
self.driver, selector, by, timeout
)
if element.tag_name == "input" or element.tag_name == "textarea":
self.js_update_text(selector, text, by=by, timeout=timeout)
return
orginal_selector = selector
css_selector = self.convert_to_css_selector(selector, by=by)
if scroll:
self.__demo_mode_highlight_if_active(orginal_selector, by)
if not self.demo_mode and not self.slow_mode:
self.scroll_to(orginal_selector, by=by, timeout=timeout)
if type(text) is int or type(text) is float:
text = str(text)
value = re.escape(text)
value = self.__escape_quotes_if_needed(value)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
if ":contains\\(" not in css_selector:
script = """document.querySelector('%s').textContent='%s';""" % (
css_selector,
value,
)
self.execute_script(script)
else:
script = """jQuery('%s')[0].textContent='%s';""" % (
css_selector,
value,
)
self.safe_execute_script(script)
self.__demo_mode_pause_if_active()
def jquery_update_text(
self, selector, text, by=By.CSS_SELECTOR, timeout=None
):
"""This method uses jQuery to update a text field.
If the text string ends with the newline character,
Selenium finishes the call, which simulates pressing
{Enter/Return} after the text is entered."""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by, xp_ok=False)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
self.__demo_mode_highlight_if_active(selector, by)
self.scroll_to(selector, by=by)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
selector = self.__escape_quotes_if_needed(selector)
text = re.escape(text)
text = self.__escape_quotes_if_needed(text)
update_text_script = """jQuery('%s').val('%s');""" % (selector, text)
self.safe_execute_script(update_text_script)
if text.endswith("\n"):
element.send_keys("\n")
self.__demo_mode_pause_if_active()
def get_value(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""This method uses JavaScript to get the value of an input field.
(Works on both input fields and textarea fields.)"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
self.wait_for_element_present(selector, by=by, timeout=timeout)
orginal_selector = selector
css_selector = self.convert_to_css_selector(selector, by=by)
self.__demo_mode_highlight_if_active(orginal_selector, by)
if not self.demo_mode and not self.slow_mode:
self.scroll_to(orginal_selector, by=by, timeout=timeout)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
if ":contains\\(" not in css_selector:
script = """return document.querySelector('%s').value;""" % (
css_selector
)
value = self.execute_script(script)
else:
script = """return jQuery('%s')[0].value;""" % css_selector
value = self.safe_execute_script(script)
return value
def set_time_limit(self, time_limit):
self.__check_scope()
if time_limit:
try:
sb_config.time_limit = float(time_limit)
except Exception:
sb_config.time_limit = None
else:
sb_config.time_limit = None
if sb_config.time_limit and sb_config.time_limit > 0:
sb_config.time_limit_ms = int(sb_config.time_limit * 1000.0)
self.time_limit = sb_config.time_limit
else:
self.time_limit = None
sb_config.time_limit = None
sb_config.time_limit_ms = None
def set_default_timeout(self, timeout):
"""This method changes the default timeout values of test methods
for the duration of the current test.
Effected timeouts: (used by methods that wait for elements)
* settings.SMALL_TIMEOUT - (default value: 6 seconds)
* settings.LARGE_TIMEOUT - (default value: 10 seconds)
The minimum allowable default timeout is: 0.5 seconds.
The maximum allowable default timeout is: 60.0 seconds.
(Test methods can still override timeouts outside that range.)
"""
self.__check_scope()
if not type(timeout) is int and not type(timeout) is float:
raise Exception('Expecting a numeric value for "timeout"!')
if timeout < 0:
raise Exception('The "timeout" cannot be a negative number!')
timeout = float(timeout)
# Min default timeout: 0.5 seconds. Max default timeout: 60.0 seconds.
min_timeout = 0.5
max_timeout = 60.0
if timeout < min_timeout:
logging.info("Minimum default timeout = %s" % min_timeout)
timeout = min_timeout
elif timeout > max_timeout:
logging.info("Maximum default timeout = %s" % max_timeout)
timeout = max_timeout
self.__overrided_default_timeouts = True
sb_config._is_timeout_changed = True
settings.SMALL_TIMEOUT = timeout
settings.LARGE_TIMEOUT = timeout
def reset_default_timeout(self):
"""Reset default timeout values to the original from settings.py
This method reverts the changes made by set_default_timeout()"""
if self.__overrided_default_timeouts:
if sb_config._SMALL_TIMEOUT and sb_config._LARGE_TIMEOUT:
settings.SMALL_TIMEOUT = sb_config._SMALL_TIMEOUT
settings.LARGE_TIMEOUT = sb_config._LARGE_TIMEOUT
sb_config._is_timeout_changed = False
self.__overrided_default_timeouts = False
def skip(self, reason=""):
""" Mark the test as Skipped. """
self.__check_scope()
if self.dashboard:
test_id = self.__get_test_id_2()
if hasattr(self, "_using_sb_fixture"):
test_id = sb_config._test_id
if (
test_id in sb_config._results.keys()
and sb_config._results[test_id] == "Passed"
):
# Duplicate tearDown() called where test already passed
self.__passed_then_skipped = True
self.__will_be_skipped = True
sb_config._results[test_id] = "Skipped"
if hasattr(self, "with_db_reporting") and self.with_db_reporting:
if self.is_pytest:
self.__skip_reason = reason
else:
self._nose_skip_reason = reason
# Add skip reason to the logs
if not hasattr(self, "_using_sb_fixture"):
test_id = self.__get_test_id() # Recalculate the test id
test_logpath = os.path.join(self.log_path, test_id)
self.__create_log_path_as_needed(test_logpath)
browser = self.browser
if not reason:
reason = "No skip reason given"
log_helper.log_skipped_test_data(
self, test_logpath, self.driver, browser, reason
)
# Finally skip the test for real
self.skipTest(reason)
############
# Shadow DOM / Shadow-root methods
def __get_shadow_element(
self, selector, timeout=None, must_be_visible=False
):
self.wait_for_ready_state_complete()
if timeout is None:
timeout = settings.SMALL_TIMEOUT
elif timeout == 0:
timeout = 0.1 # Use for: is_shadow_element_* (* = present/visible)
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.__fail_if_invalid_shadow_selector_usage(selector)
if "::shadow " not in selector:
raise Exception(
'A Shadow DOM selector must contain at least one "::shadow "!'
)
selectors = selector.split("::shadow ")
element = self.get_element(selectors[0])
selector_chain = selectors[0]
is_present = False
for selector_part in selectors[1:]:
shadow_root = None
if (
selenium4
and self.is_chromium()
and int(self.__get_major_browser_version()) >= 96
):
try:
shadow_root = element.shadow_root
except Exception:
if self.browser == "chrome":
chrome_dict = self.driver.capabilities["chrome"]
chrome_dr_version = chrome_dict["chromedriverVersion"]
chromedriver_version = chrome_dr_version.split(" ")[0]
major_c_dr_version = chromedriver_version.split(".")[0]
if int(major_c_dr_version) < 96:
upgrade_to = "latest"
major_browser_version = (
self.__get_major_browser_version()
)
if int(major_browser_version) >= 96:
upgrade_to = str(major_browser_version)
message = (
"You need to upgrade to a newer\n"
"version of chromedriver to interact\n"
"with Shadow root elements!\n"
"(Current driver version is: %s)"
"\n(Minimum driver version is: 96.*)"
"\nTo upgrade, run this:"
'\n"seleniumbase install chromedriver %s"'
% (chromedriver_version, upgrade_to)
)
raise Exception(message)
if timeout != 0.1: # Skip wait for special 0.1 (See above)
time.sleep(2)
try:
shadow_root = element.shadow_root
except Exception:
raise Exception(
"Element {%s} has no shadow root!" % selector_chain
)
else: # This part won't work on Chrome 96 or newer.
# If using Chrome 96 or newer (and on an old Python version),
# you'll need to upgrade in order to access Shadow roots.
# Firefox users will likely hit:
# https://github.com/mozilla/geckodriver/issues/1711
# When Firefox adds support, switch to element.shadow_root
try:
shadow_root = self.execute_script(
"return arguments[0].shadowRoot", element
)
except Exception:
time.sleep(2)
shadow_root = self.execute_script(
"return arguments[0].shadowRoot", element
)
if timeout == 0.1 and not shadow_root:
raise Exception(
"Element {%s} has no shadow root!" % selector_chain
)
elif not shadow_root:
time.sleep(2) # Wait two seconds for the shadow root to appear
shadow_root = self.execute_script(
"return arguments[0].shadowRoot", element
)
if not shadow_root:
raise Exception(
"Element {%s} has no shadow root!" % selector_chain
)
selector_chain += "::shadow "
selector_chain += selector_part
try:
if (
selenium4
and self.is_chromium()
and int(self.__get_major_browser_version()) >= 96
):
if timeout == 0.1:
element = shadow_root.find_element(
By.CSS_SELECTOR, value=selector_part)
else:
found = False
for i in range(int(timeout) * 4):
try:
element = shadow_root.find_element(
By.CSS_SELECTOR, value=selector_part)
is_present = True
if must_be_visible:
if not element.is_displayed():
raise Exception(
"Shadow Root element not visible!")
found = True
break
except Exception:
time.sleep(0.2)
continue
if not found:
element = shadow_root.find_element(
By.CSS_SELECTOR, value=selector_part)
is_present = True
if must_be_visible and not element.is_displayed():
raise Exception(
"Shadow Root element not visible!")
else:
element = page_actions.wait_for_element_present(
shadow_root,
selector_part,
by=By.CSS_SELECTOR,
timeout=timeout,
)
except Exception:
error = "not present"
the_exception = "NoSuchElementException"
if must_be_visible and is_present:
error = "not visible"
the_exception = "ElementNotVisibleException"
msg = (
"Shadow DOM Element {%s} was %s after %s seconds!"
% (selector_chain, error, timeout)
)
page_actions.timeout_exception(the_exception, msg)
return element
def __fail_if_invalid_shadow_selector_usage(self, selector):
if selector.strip().endswith("::shadow"):
msg = (
"A Shadow DOM selector cannot end on a shadow root element!"
" End the selector with an element inside the shadow root!"
)
raise Exception(msg)
def __is_shadow_selector(self, selector):
self.__fail_if_invalid_shadow_selector_usage(selector)
if "::shadow " in selector:
return True
return False
def __shadow_click(self, selector, timeout):
element = self.__get_shadow_element(
selector, timeout=timeout, must_be_visible=True
)
element.click()
def __shadow_type(self, selector, text, timeout, clear_first=True):
element = self.__get_shadow_element(
selector, timeout=timeout, must_be_visible=True
)
if clear_first:
try:
element.clear()
backspaces = Keys.BACK_SPACE * 42 # Autofill Defense
element.send_keys(backspaces)
except Exception:
pass
if type(text) is int or type(text) is float:
text = str(text)
if not text.endswith("\n"):
element.send_keys(text)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
else:
element.send_keys(text[:-1])
element.send_keys(Keys.RETURN)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
def __shadow_clear(self, selector, timeout):
element = self.__get_shadow_element(
selector, timeout=timeout, must_be_visible=True
)
try:
element.clear()
backspaces = Keys.BACK_SPACE * 42 # Autofill Defense
element.send_keys(backspaces)
except Exception:
pass
def __get_shadow_text(self, selector, timeout):
element = self.__get_shadow_element(
selector, timeout=timeout, must_be_visible=True
)
element_text = element.text
if self.browser == "safari":
element_text = element.get_attribute("innerText")
return element_text
def __get_shadow_attribute(self, selector, attribute, timeout):
element = self.__get_shadow_element(selector, timeout=timeout)
return element.get_attribute(attribute)
def __wait_for_shadow_text_visible(self, text, selector, timeout):
start_ms = time.time() * 1000.0
stop_ms = start_ms + (settings.SMALL_TIMEOUT * 1000.0)
for x in range(int(settings.SMALL_TIMEOUT * 10)):
try:
actual_text = self.__get_shadow_text(
selector, timeout=1
).strip()
text = text.strip()
if text not in actual_text:
msg = (
"Expected text {%s} in element {%s} was not visible!"
% (text, selector)
)
page_actions.timeout_exception(
"ElementNotVisibleException", msg
)
return True
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
actual_text = self.__get_shadow_text(selector, timeout=1).strip()
text = text.strip()
if text not in actual_text:
msg = "Expected text {%s} in element {%s} was not visible!" % (
text,
selector,
)
page_actions.timeout_exception("ElementNotVisibleException", msg)
return True
def __wait_for_exact_shadow_text_visible(self, text, selector, timeout):
start_ms = time.time() * 1000.0
stop_ms = start_ms + (settings.SMALL_TIMEOUT * 1000.0)
for x in range(int(settings.SMALL_TIMEOUT * 10)):
try:
actual_text = self.__get_shadow_text(
selector, timeout=1
).strip()
text = text.strip()
if text != actual_text:
msg = (
"Expected exact text {%s} in element {%s} not visible!"
"" % (text, selector)
)
page_actions.timeout_exception(
"ElementNotVisibleException", msg
)
return True
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
actual_text = self.__get_shadow_text(selector, timeout=1).strip()
text = text.strip()
if text != actual_text:
msg = (
"Expected exact text {%s} in element {%s} was not visible!"
% (text, selector)
)
page_actions.timeout_exception("ElementNotVisibleException", msg)
return True
def __assert_shadow_text_visible(self, text, selector, timeout):
self.__wait_for_shadow_text_visible(text, selector, timeout)
if self.demo_mode:
a_t = "ASSERT TEXT"
i_n = "in"
by = By.CSS_SELECTOR
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert_text(self._language)
i_n = SD.translate_in(self._language)
messenger_post = "%s: {%s} %s %s: %s" % (
a_t,
text,
i_n,
by.upper(),
selector,
)
try:
js_utils.activate_jquery(self.driver)
js_utils.post_messenger_success_message(
self.driver, messenger_post, self.message_duration
)
except Exception:
pass
def __assert_exact_shadow_text_visible(self, text, selector, timeout):
self.__wait_for_exact_shadow_text_visible(text, selector, timeout)
if self.demo_mode:
a_t = "ASSERT EXACT TEXT"
i_n = "in"
by = By.CSS_SELECTOR
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert_exact_text(self._language)
i_n = SD.translate_in(self._language)
messenger_post = "%s: {%s} %s %s: %s" % (
a_t,
text,
i_n,
by.upper(),
selector,
)
try:
js_utils.activate_jquery(self.driver)
js_utils.post_messenger_success_message(
self.driver, messenger_post, self.message_duration
)
except Exception:
pass
def __is_shadow_element_present(self, selector):
try:
element = self.__get_shadow_element(selector, timeout=0.1)
return element is not None
except Exception:
return False
def __is_shadow_element_visible(self, selector):
try:
element = self.__get_shadow_element(selector, timeout=0.1)
return element.is_displayed()
except Exception:
return False
def __is_shadow_element_enabled(self, selector):
try:
element = self.__get_shadow_element(selector, timeout=0.1)
return element.is_enabled()
except Exception:
return False
def __is_shadow_text_visible(self, text, selector):
try:
element = self.__get_shadow_element(selector, timeout=0.1)
if self.browser == "safari":
return (
element.is_displayed()
and text in element.get_attribute("innerText")
)
return element.is_displayed() and text in element.text
except Exception:
return False
def __is_shadow_attribute_present(self, selector, attribute, value=None):
try:
element = self.__get_shadow_element(selector, timeout=0.1)
found_value = element.get_attribute(attribute)
if found_value is None:
return False
if value is not None:
if found_value == value:
return True
else:
return False
else:
return True
except Exception:
return False
def __wait_for_shadow_element_present(self, selector, timeout):
element = self.__get_shadow_element(selector, timeout=timeout)
return element
def __wait_for_shadow_element_visible(self, selector, timeout):
element = self.__get_shadow_element(
selector, timeout=timeout, must_be_visible=True
)
return element
def __wait_for_shadow_attribute_present(
self, selector, attribute, value=None, timeout=None
):
element = self.__get_shadow_element(selector, timeout=timeout)
actual_value = element.get_attribute(attribute)
plural = "s"
if timeout == 1:
plural = ""
if value is None:
# The element attribute only needs to exist
if actual_value is not None:
return element
else:
# The element does not have the attribute
message = (
"Expected attribute {%s} of element {%s} "
"was not present after %s second%s!"
% (attribute, selector, timeout, plural)
)
page_actions.timeout_exception(
"NoSuchAttributeException", message
)
else:
if actual_value == value:
return element
else:
message = (
"Expected value {%s} for attribute {%s} of element "
"{%s} was not present after %s second%s! "
"(The actual value was {%s})"
% (
value,
attribute,
selector,
timeout,
plural,
actual_value,
)
)
page_actions.timeout_exception(
"NoSuchAttributeException", message
)
def __assert_shadow_element_present(self, selector):
self.__get_shadow_element(selector)
if self.demo_mode:
a_t = "ASSERT"
by = By.CSS_SELECTOR
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert(self._language)
messenger_post = "%s %s: %s" % (a_t, by.upper(), selector)
try:
js_utils.activate_jquery(self.driver)
js_utils.post_messenger_success_message(
self.driver, messenger_post, self.message_duration
)
except Exception:
pass
def __assert_shadow_element_visible(self, selector):
element = self.__get_shadow_element(selector)
if not element.is_displayed():
msg = "Shadow DOM Element {%s} was not visible!" % selector
page_actions.timeout_exception("NoSuchElementException", msg)
if self.demo_mode:
a_t = "ASSERT"
by = By.CSS_SELECTOR
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert(self._language)
messenger_post = "%s %s: %s" % (a_t, by.upper(), selector)
try:
js_utils.activate_jquery(self.driver)
js_utils.post_messenger_success_message(
self.driver, messenger_post, self.message_duration
)
except Exception:
pass
############
# Application "Local Storage" controls
def set_local_storage_item(self, key, value):
self.__check_scope()
self.execute_script(
"window.localStorage.setItem('{}', '{}');".format(key, value)
)
def get_local_storage_item(self, key):
self.__check_scope()
return self.execute_script(
"return window.localStorage.getItem('{}');".format(key)
)
def remove_local_storage_item(self, key):
self.__check_scope()
self.execute_script(
"window.localStorage.removeItem('{}');".format(key)
)
def clear_local_storage(self):
self.__check_scope()
self.execute_script("window.localStorage.clear();")
if self.recorder_mode:
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["c_l_s", "", origin, time_stamp]
self.__extra_actions.append(action)
def get_local_storage_keys(self):
self.__check_scope()
return self.execute_script(
"var ls = window.localStorage, keys = []; "
"for (var i = 0; i < ls.length; ++i) "
" keys[i] = ls.key(i); "
"return keys;"
)
def get_local_storage_items(self):
self.__check_scope()
return self.execute_script(
r"var ls = window.localStorage, items = {}; "
"for (var i = 0, k; i < ls.length; ++i) "
" items[k = ls.key(i)] = ls.getItem(k); "
"return items;"
)
# Application "Session Storage" controls
def set_session_storage_item(self, key, value):
self.__check_scope()
self.execute_script(
"window.sessionStorage.setItem('{}', '{}');".format(key, value)
)
def get_session_storage_item(self, key):
self.__check_scope()
return self.execute_script(
"return window.sessionStorage.getItem('{}');".format(key)
)
def remove_session_storage_item(self, key):
self.__check_scope()
self.execute_script(
"window.sessionStorage.removeItem('{}');".format(key)
)
def clear_session_storage(self):
self.__check_scope()
self.execute_script("window.sessionStorage.clear();")
def get_session_storage_keys(self):
self.__check_scope()
return self.execute_script(
"var ls = window.sessionStorage, keys = []; "
"for (var i = 0; i < ls.length; ++i) "
" keys[i] = ls.key(i); "
"return keys;"
)
def get_session_storage_items(self):
self.__check_scope()
return self.execute_script(
r"var ls = window.sessionStorage, items = {}; "
"for (var i = 0, k; i < ls.length; ++i) "
" items[k = ls.key(i)] = ls.getItem(k); "
"return items;"
)
############
# Duplicates (Avoids name confusion when migrating from other frameworks.)
def open_url(self, url):
""" Same as self.open() """
self.open(url)
def visit(self, url):
""" Same as self.open() """
self.open(url)
def visit_url(self, url):
""" Same as self.open() """
self.open(url)
def goto(self, url):
""" Same as self.open() """
self.open(url)
def go_to(self, url):
""" Same as self.open() """
self.open(url)
def reload(self):
""" Same as self.refresh_page() """
self.refresh_page()
def reload_page(self):
""" Same as self.refresh_page() """
self.refresh_page()
def open_new_tab(self, switch_to=True):
""" Same as self.open_new_window() """
self.open_new_window(switch_to=switch_to)
def switch_to_tab(self, tab, timeout=None):
""" Same as self.switch_to_window()
Switches control of the browser to the specified window.
The window can be an integer: 0 -> 1st tab, 1 -> 2nd tab, etc...
Or it can be a list item from self.driver.window_handles """
self.switch_to_window(window=tab, timeout=timeout)
def switch_to_default_tab(self):
""" Same as self.switch_to_default_window() """
self.switch_to_default_window()
def switch_to_newest_tab(self):
""" Same as self.switch_to_newest_window() """
self.switch_to_newest_window()
def input(
self, selector, text, by=By.CSS_SELECTOR, timeout=None, retry=False
):
""" Same as self.update_text() """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.update_text(selector, text, by=by, timeout=timeout, retry=retry)
def fill(
self, selector, text, by=By.CSS_SELECTOR, timeout=None, retry=False
):
""" Same as self.update_text() """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.update_text(selector, text, by=by, timeout=timeout, retry=retry)
def write(
self, selector, text, by=By.CSS_SELECTOR, timeout=None, retry=False
):
""" Same as self.update_text() """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.update_text(selector, text, by=by, timeout=timeout, retry=retry)
def send_keys(self, selector, text, by=By.CSS_SELECTOR, timeout=None):
""" Same as self.add_text() """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.add_text(selector, text, by=by, timeout=timeout)
def click_link(self, link_text, timeout=None):
""" Same as self.click_link_text() """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.click_link_text(link_text, timeout=timeout)
def click_partial_link(self, partial_link_text, timeout=None):
""" Same as self.click_partial_link_text() """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.click_partial_link_text(partial_link_text, timeout=timeout)
def wait_for_element_visible(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
""" Same as self.wait_for_element() """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__wait_for_shadow_element_visible(
selector, timeout
)
return page_actions.wait_for_element_visible(
self.driver, selector, by, timeout
)
def wait_for_element_not_present(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""Same as self.wait_for_element_absent()
Waits for an element to no longer appear in the HTML of a page.
A hidden element still counts as appearing in the page HTML.
If waiting for elements to be hidden instead of nonexistent,
use wait_for_element_not_visible() instead.
"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.wait_for_element_absent(
self.driver, selector, by, timeout
)
def assert_element_not_present(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""Same as self.assert_element_absent()
Will raise an exception if the element stays present.
A hidden element counts as a present element, which fails this assert.
If you want to assert that elements are hidden instead of nonexistent,
use assert_element_not_visible() instead.
(Note that hidden elements are still present in the HTML of the page.)
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_element_absent(selector, by=by, timeout=timeout)
return True
def get_google_auth_password(self, totp_key=None):
""" Same as self.get_mfa_code() """
return self.get_mfa_code(totp_key=totp_key)
def get_google_auth_code(self, totp_key=None):
""" Same as self.get_mfa_code() """
return self.get_mfa_code(totp_key=totp_key)
def get_totp_code(self, totp_key=None):
""" Same as self.get_mfa_code() """
return self.get_mfa_code(totp_key=totp_key)
def enter_totp_code(
self, selector, totp_key=None, by=By.CSS_SELECTOR, timeout=None
):
""" Same as self.enter_mfa_code() """
return self.enter_mfa_code(
selector=selector, totp_key=totp_key, by=by, timeout=timeout
)
def assert_no_broken_links(self, multithreaded=True):
""" Same as self.assert_no_404_errors() """
self.assert_no_404_errors(multithreaded=multithreaded)
def wait(self, seconds):
""" Same as self.sleep() - Some JS frameworks use this method name. """
self.sleep(seconds)
def block_ads(self):
""" Same as self.ad_block() """
self.ad_block()
def _print(self, msg):
"""Same as Python's print(), but won't print during multithreaded runs
because overlapping print() commands may lead to unexpected output.
In most cases, the print() command won't print for multithreaded tests,
but there are some exceptions, and this will take care of those.
Here's an example of running tests multithreaded: "pytest -n=4".
To force a print during multithreaded tests, use: "sys.stderr.write()".
To print without the new-line character end, use: "sys.stdout.write()".
"""
if not sb_config._multithreaded:
print(msg)
def start_tour(self, name=None, interval=0):
self.play_tour(name=name, interval=interval)
############
def add_css_link(self, css_link):
self.__check_scope()
self.__check_browser()
js_utils.add_css_link(self.driver, css_link)
def add_js_link(self, js_link):
self.__check_scope()
self.__check_browser()
js_utils.add_js_link(self.driver, js_link)
def add_css_style(self, css_style):
self.__check_scope()
self.__check_browser()
js_utils.add_css_style(self.driver, css_style)
def add_js_code_from_link(self, js_link):
self.__check_scope()
self.__check_browser()
js_utils.add_js_code_from_link(self.driver, js_link)
def add_js_code(self, js_code):
self.__check_scope()
self.__check_browser()
js_utils.add_js_code(self.driver, js_code)
def add_meta_tag(self, http_equiv=None, content=None):
self.__check_scope()
self.__check_browser()
js_utils.add_meta_tag(
self.driver, http_equiv=http_equiv, content=content
)
############
def create_presentation(
self, name=None, theme="default", transition="default"
):
"""Creates a Reveal-JS presentation that you can add slides to.
@Params
name - If creating multiple presentations at the same time,
use this to specify the name of the current presentation.
theme - Set a theme with a unique style for the presentation.
Valid themes: "serif" (default), "sky", "white", "black",
"simple", "league", "moon", "night",
"beige", "blood", and "solarized".
transition - Set a transition between slides.
Valid transitions: "none" (default), "slide", "fade",
"zoom", "convex", and "concave".
"""
if not name:
name = "default"
if not theme or theme == "default":
theme = "serif"
valid_themes = [
"serif",
"white",
"black",
"beige",
"simple",
"sky",
"league",
"moon",
"night",
"blood",
"solarized",
]
theme = theme.lower()
if theme not in valid_themes:
raise Exception(
"Theme {%s} not found! Valid themes: %s"
% (theme, valid_themes)
)
if not transition or transition == "default":
transition = "none"
valid_transitions = [
"none",
"slide",
"fade",
"zoom",
"convex",
"concave",
]
transition = transition.lower()
if transition not in valid_transitions:
raise Exception(
"Transition {%s} not found! Valid transitions: %s"
% (transition, valid_transitions)
)
reveal_theme_css = None
if theme == "serif":
reveal_theme_css = constants.Reveal.SERIF_MIN_CSS
elif theme == "sky":
reveal_theme_css = constants.Reveal.SKY_MIN_CSS
elif theme == "white":
reveal_theme_css = constants.Reveal.WHITE_MIN_CSS
elif theme == "black":
reveal_theme_css = constants.Reveal.BLACK_MIN_CSS
elif theme == "simple":
reveal_theme_css = constants.Reveal.SIMPLE_MIN_CSS
elif theme == "league":
reveal_theme_css = constants.Reveal.LEAGUE_MIN_CSS
elif theme == "moon":
reveal_theme_css = constants.Reveal.MOON_MIN_CSS
elif theme == "night":
reveal_theme_css = constants.Reveal.NIGHT_MIN_CSS
elif theme == "beige":
reveal_theme_css = constants.Reveal.BEIGE_MIN_CSS
elif theme == "blood":
reveal_theme_css = constants.Reveal.BLOOD_MIN_CSS
elif theme == "solarized":
reveal_theme_css = constants.Reveal.SOLARIZED_MIN_CSS
else:
# Use the default if unable to determine the theme
reveal_theme_css = constants.Reveal.SERIF_MIN_CSS
new_presentation = (
"<html>\n"
"<head>\n"
'<meta charset="utf-8">\n'
'<meta http-equiv="Content-Type" content="text/html">\n'
'<meta name="viewport" content="shrink-to-fit=no">\n'
'<link rel="stylesheet" href="%s">\n'
'<link rel="stylesheet" href="%s">\n'
"<style>\n"
"pre{background-color:#fbe8d4;border-radius:8px;}\n"
"div[flex_div]{height:68vh;margin:0;align-items:center;"
"justify-content:center;}\n"
"img[rounded]{border-radius:16px;max-width:64%%;}\n"
"</style>\n"
"</head>\n\n"
"<body>\n"
"<!-- Generated by SeleniumBase - https://seleniumbase.io -->\n"
'<div class="reveal">\n'
'<div class="slides">\n'
% (constants.Reveal.MIN_CSS, reveal_theme_css)
)
self._presentation_slides[name] = []
self._presentation_slides[name].append(new_presentation)
self._presentation_transition[name] = transition
def add_slide(
self,
content=None,
image=None,
code=None,
iframe=None,
content2=None,
notes=None,
transition=None,
name=None,
):
"""Allows the user to add slides to a presentation.
@Params
content - The HTML content to display on the presentation slide.
image - Attach an image (from a URL link) to the slide.
code - Attach code of any programming language to the slide.
Language-detection will be used to add syntax formatting.
iframe - Attach an iFrame (from a URL link) to the slide.
content2 - HTML content to display after adding an image or code.
notes - Additional notes to include with the slide.
ONLY SEEN if show_notes is set for the presentation.
transition - Set a transition between slides. (overrides previous)
Valid transitions: "none" (default), "slide", "fade",
"zoom", "convex", and "concave".
name - If creating multiple presentations at the same time,
use this to select the presentation to add slides to.
"""
if not name:
name = "default"
if name not in self._presentation_slides:
# Create a presentation if it doesn't already exist
self.create_presentation(name=name)
if not content:
content = ""
if not content2:
content2 = ""
if not notes:
notes = ""
if not transition:
transition = self._presentation_transition[name]
elif transition == "default":
transition = "none"
valid_transitions = [
"none",
"slide",
"fade",
"zoom",
"convex",
"concave",
]
transition = transition.lower()
if transition not in valid_transitions:
raise Exception(
"Transition {%s} not found! Valid transitions: %s"
"" % (transition, valid_transitions)
)
add_line = ""
if content.startswith("<"):
add_line = "\n"
html = '\n<section data-transition="%s">%s%s' % (
transition,
add_line,
content,
)
if image:
html += '\n<div flex_div><img rounded src="%s" /></div>' % image
if code:
html += "\n<div></div>"
html += '\n<pre class="prettyprint">\n%s</pre>' % code
if iframe:
html += (
"\n<div></div>"
'\n<iframe src="%s" style="width:92%%;height:550px;" '
'title="iframe content"></iframe>' % iframe
)
add_line = ""
if content2.startswith("<"):
add_line = "\n"
if content2:
html += "%s%s" % (add_line, content2)
html += '\n<aside class="notes">%s</aside>' % notes
html += "\n</section>\n"
self._presentation_slides[name].append(html)
def save_presentation(
self, name=None, filename=None, show_notes=False, interval=0
):
"""Saves a Reveal-JS Presentation to a file for later use.
@Params
name - If creating multiple presentations at the same time,
use this to select the one you wish to use.
filename - The name of the HTML file that you wish to
save the presentation to. (filename must end in ".html")
show_notes - When set to True, the Notes feature becomes enabled,
which allows presenters to see notes next to slides.
interval - The delay time between autoplaying slides. (in seconds)
If set to 0 (default), autoplay is disabled.
"""
if not name:
name = "default"
if not filename:
filename = "my_presentation.html"
if name not in self._presentation_slides:
raise Exception("Presentation {%s} does not exist!" % name)
if not filename.endswith(".html"):
raise Exception('Presentation file must end in ".html"!')
if not interval:
interval = 0
if interval == 0 and self.interval:
interval = float(self.interval)
if not type(interval) is int and not type(interval) is float:
raise Exception('Expecting a numeric value for "interval"!')
if interval < 0:
raise Exception('The "interval" cannot be a negative number!')
interval_ms = float(interval) * 1000.0
show_notes_str = "false"
if show_notes:
show_notes_str = "true"
the_html = ""
for slide in self._presentation_slides[name]:
the_html += slide
the_html += (
"\n</div>\n"
"</div>\n"
'<script src="%s"></script>\n'
'<script src="%s"></script>\n'
"<script>Reveal.initialize("
"{showNotes: %s, slideNumber: true, progress: true, hash: false, "
"autoSlide: %s,});"
"</script>\n"
"</body>\n"
"</html>\n"
% (
constants.Reveal.MIN_JS,
constants.PrettifyJS.RUN_PRETTIFY_JS,
show_notes_str,
interval_ms,
)
)
# Remove duplicate ChartMaker library declarations
chart_libs = """
<script src="%s"></script>
<script src="%s"></script>
<script src="%s"></script>
<script src="%s"></script>
""" % (
constants.HighCharts.HC_JS,
constants.HighCharts.EXPORTING_JS,
constants.HighCharts.EXPORT_DATA_JS,
constants.HighCharts.ACCESSIBILITY_JS,
)
if the_html.count(chart_libs) > 1:
chart_libs_comment = "<!-- HighCharts Libraries Imported -->"
the_html = the_html.replace(chart_libs, chart_libs_comment)
# Only need to import the HighCharts libraries once
the_html = the_html.replace(chart_libs_comment, chart_libs, 1)
saved_presentations_folder = constants.Presentations.SAVED_FOLDER
if saved_presentations_folder.endswith("/"):
saved_presentations_folder = saved_presentations_folder[:-1]
if not os.path.exists(saved_presentations_folder):
try:
os.makedirs(saved_presentations_folder)
except Exception:
pass
file_path = saved_presentations_folder + "/" + filename
out_file = codecs.open(file_path, "w+", encoding="utf-8")
out_file.writelines(the_html)
out_file.close()
print("\n>>> [%s] was saved!\n" % file_path)
return file_path
def begin_presentation(
self, name=None, filename=None, show_notes=False, interval=0
):
"""Begin a Reveal-JS Presentation in the web browser.
@Params
name - If creating multiple presentations at the same time,
use this to select the one you wish to use.
filename - The name of the HTML file that you wish to
save the presentation to. (filename must end in ".html")
show_notes - When set to True, the Notes feature becomes enabled,
which allows presenters to see notes next to slides.
interval - The delay time between autoplaying slides. (in seconds)
If set to 0 (default), autoplay is disabled.
"""
if self.headless or self.xvfb:
return # Presentations should not run in headless mode.
if not name:
name = "default"
if not filename:
filename = "my_presentation.html"
if name not in self._presentation_slides:
raise Exception("Presentation {%s} does not exist!" % name)
if not filename.endswith(".html"):
raise Exception('Presentation file must end in ".html"!')
if not interval:
interval = 0
if interval == 0 and self.interval:
interval = float(self.interval)
if not type(interval) is int and not type(interval) is float:
raise Exception('Expecting a numeric value for "interval"!')
if interval < 0:
raise Exception('The "interval" cannot be a negative number!')
end_slide = (
'\n<section data-transition="none">\n'
'<p class="End_Presentation_Now"> </p>\n</section>\n'
)
self._presentation_slides[name].append(end_slide)
file_path = self.save_presentation(
name=name,
filename=filename,
show_notes=show_notes,
interval=interval,
)
self._presentation_slides[name].pop()
self.open_html_file(file_path)
presentation_folder = constants.Presentations.SAVED_FOLDER
try:
while (
len(self.driver.window_handles) > 0
and presentation_folder in self.get_current_url()
):
time.sleep(0.05)
if self.is_element_visible(
"section.present p.End_Presentation_Now"
):
break
time.sleep(0.05)
except Exception:
pass
############
def create_pie_chart(
self,
chart_name=None,
title=None,
subtitle=None,
data_name=None,
unit=None,
libs=True,
labels=True,
legend=True,
):
"""Creates a JavaScript pie chart using "HighCharts".
@Params
chart_name - If creating multiple charts,
use this to select which one.
title - The title displayed for the chart.
subtitle - The subtitle displayed for the chart.
data_name - The series name. Useful for multi-series charts.
If no data_name, will default to using "Series 1".
unit - The description label given to the chart's y-axis values.
libs - The option to include Chart libraries (JS and CSS files).
Should be set to True (default) for the first time creating
a chart on a web page. If creating multiple charts on the
same web page, you won't need to re-import the libraries
when creating additional charts.
labels - If True, displays labels on the chart for data points.
legend - If True, displays the data point legend on the chart.
"""
if not chart_name:
chart_name = "default"
if not data_name:
data_name = ""
style = "pie"
self.__create_highchart(
chart_name=chart_name,
title=title,
subtitle=subtitle,
style=style,
data_name=data_name,
unit=unit,
libs=libs,
labels=labels,
legend=legend,
)
def create_bar_chart(
self,
chart_name=None,
title=None,
subtitle=None,
data_name=None,
unit=None,
libs=True,
labels=True,
legend=True,
):
"""Creates a JavaScript bar chart using "HighCharts".
@Params
chart_name - If creating multiple charts,
use this to select which one.
title - The title displayed for the chart.
subtitle - The subtitle displayed for the chart.
data_name - The series name. Useful for multi-series charts.
If no data_name, will default to using "Series 1".
unit - The description label given to the chart's y-axis values.
libs - The option to include Chart libraries (JS and CSS files).
Should be set to True (default) for the first time creating
a chart on a web page. If creating multiple charts on the
same web page, you won't need to re-import the libraries
when creating additional charts.
labels - If True, displays labels on the chart for data points.
legend - If True, displays the data point legend on the chart.
"""
if not chart_name:
chart_name = "default"
if not data_name:
data_name = ""
style = "bar"
self.__create_highchart(
chart_name=chart_name,
title=title,
subtitle=subtitle,
style=style,
data_name=data_name,
unit=unit,
libs=libs,
labels=labels,
legend=legend,
)
def create_column_chart(
self,
chart_name=None,
title=None,
subtitle=None,
data_name=None,
unit=None,
libs=True,
labels=True,
legend=True,
):
"""Creates a JavaScript column chart using "HighCharts".
@Params
chart_name - If creating multiple charts,
use this to select which one.
title - The title displayed for the chart.
subtitle - The subtitle displayed for the chart.
data_name - The series name. Useful for multi-series charts.
If no data_name, will default to using "Series 1".
unit - The description label given to the chart's y-axis values.
libs - The option to include Chart libraries (JS and CSS files).
Should be set to True (default) for the first time creating
a chart on a web page. If creating multiple charts on the
same web page, you won't need to re-import the libraries
when creating additional charts.
labels - If True, displays labels on the chart for data points.
legend - If True, displays the data point legend on the chart.
"""
if not chart_name:
chart_name = "default"
if not data_name:
data_name = ""
style = "column"
self.__create_highchart(
chart_name=chart_name,
title=title,
subtitle=subtitle,
style=style,
data_name=data_name,
unit=unit,
libs=libs,
labels=labels,
legend=legend,
)
def create_line_chart(
self,
chart_name=None,
title=None,
subtitle=None,
data_name=None,
unit=None,
zero=False,
libs=True,
labels=True,
legend=True,
):
"""Creates a JavaScript line chart using "HighCharts".
@Params
chart_name - If creating multiple charts,
use this to select which one.
title - The title displayed for the chart.
subtitle - The subtitle displayed for the chart.
data_name - The series name. Useful for multi-series charts.
If no data_name, will default to using "Series 1".
unit - The description label given to the chart's y-axis values.
zero - If True, the y-axis always starts at 0. (Default: False).
libs - The option to include Chart libraries (JS and CSS files).
Should be set to True (default) for the first time creating
a chart on a web page. If creating multiple charts on the
same web page, you won't need to re-import the libraries
when creating additional charts.
labels - If True, displays labels on the chart for data points.
legend - If True, displays the data point legend on the chart.
"""
if not chart_name:
chart_name = "default"
if not data_name:
data_name = ""
style = "line"
self.__create_highchart(
chart_name=chart_name,
title=title,
subtitle=subtitle,
style=style,
data_name=data_name,
unit=unit,
zero=zero,
libs=libs,
labels=labels,
legend=legend,
)
def create_area_chart(
self,
chart_name=None,
title=None,
subtitle=None,
data_name=None,
unit=None,
zero=False,
libs=True,
labels=True,
legend=True,
):
"""Creates a JavaScript area chart using "HighCharts".
@Params
chart_name - If creating multiple charts,
use this to select which one.
title - The title displayed for the chart.
subtitle - The subtitle displayed for the chart.
data_name - The series name. Useful for multi-series charts.
If no data_name, will default to using "Series 1".
unit - The description label given to the chart's y-axis values.
zero - If True, the y-axis always starts at 0. (Default: False).
libs - The option to include Chart libraries (JS and CSS files).
Should be set to True (default) for the first time creating
a chart on a web page. If creating multiple charts on the
same web page, you won't need to re-import the libraries
when creating additional charts.
labels - If True, displays labels on the chart for data points.
legend - If True, displays the data point legend on the chart.
"""
if not chart_name:
chart_name = "default"
if not data_name:
data_name = ""
style = "area"
self.__create_highchart(
chart_name=chart_name,
title=title,
subtitle=subtitle,
style=style,
data_name=data_name,
unit=unit,
zero=zero,
libs=libs,
labels=labels,
legend=legend,
)
def __create_highchart(
self,
chart_name=None,
title=None,
subtitle=None,
style=None,
data_name=None,
unit=None,
zero=False,
libs=True,
labels=True,
legend=True,
):
""" Creates a JavaScript chart using the "HighCharts" library. """
if not chart_name:
chart_name = "default"
if not title:
title = ""
if not subtitle:
subtitle = ""
if not style:
style = "pie"
if not data_name:
data_name = "Series 1"
if not unit:
unit = "Values"
if labels:
labels = "true"
else:
labels = "false"
if legend:
legend = "true"
else:
legend = "false"
title = title.replace("'", "\\'")
subtitle = subtitle.replace("'", "\\'")
unit = unit.replace("'", "\\'")
self._chart_count += 1
# If chart_libs format is changed, also change: save_presentation()
chart_libs = """
<script src="%s"></script>
<script src="%s"></script>
<script src="%s"></script>
<script src="%s"></script>
""" % (
constants.HighCharts.HC_JS,
constants.HighCharts.EXPORTING_JS,
constants.HighCharts.EXPORT_DATA_JS,
constants.HighCharts.ACCESSIBILITY_JS,
)
if not libs:
chart_libs = ""
chart_css = """
<style>
.highcharts-figure, .highcharts-data-table table {
min-width: 320px;
max-width: 660px;
margin: 1em auto;
}
.highcharts-data-table table {
font-family: Verdana, sans-serif;
border-collapse: collapse;
border: 1px solid #EBEBEB;
margin: 10px auto;
text-align: center;
width: 100%;
max-width: 500px;
}
.highcharts-data-table caption {
padding: 1em 0;
font-size: 1.2em;
color: #555;
}
.highcharts-data-table th {
font-weight: 600;
padding: 0.5em;
}
.highcharts-data-table td, .highcharts-data-table th,
.highcharts-data-table caption {
padding: 0.5em;
}
.highcharts-data-table thead tr,
.highcharts-data-table tr:nth-child(even) {
background: #f8f8f8;
}
.highcharts-data-table tr:hover {
background: #f1f7ff;
}
</style>
"""
if not libs:
chart_css = ""
chart_description = ""
chart_figure = """
<figure class="highcharts-figure">
<div id="chartcontainer_num_%s"></div>
<p class="highcharts-description">%s</p>
</figure>
""" % (
self._chart_count,
chart_description,
)
min_zero = ""
if zero:
min_zero = "min: 0,"
chart_init_1 = """
<script>
// Build the chart
Highcharts.chart('chartcontainer_num_%s', {
credits: {
enabled: false
},
title: {
text: '%s'
},
subtitle: {
text: '%s'
},
xAxis: { },
yAxis: {
%s
title: {
text: '%s',
style: {
fontSize: '14px'
}
},
labels: {
useHTML: true,
style: {
fontSize: '14px'
}
}
},
chart: {
renderTo: 'statusChart',
plotBackgroundColor: null,
plotBorderWidth: null,
plotShadow: false,
type: '%s'
},
""" % (
self._chart_count,
title,
subtitle,
min_zero,
unit,
style,
)
# "{series.name}:"
point_format = (
r"<b>{point.y}</b><br />" r"<b>{point.percentage:.1f}%</b>"
)
if style != "pie":
point_format = r"<b>{point.y}</b>"
chart_init_2 = (
"""
tooltip: {
enabled: true,
useHTML: true,
style: {
padding: '6px',
fontSize: '14px'
},
backgroundColor: {
linearGradient: {
x1: 0,
y1: 0,
x2: 0,
y2: 1
},
stops: [
[0, 'rgba(255, 255, 255, 0.78)'],
[0.5, 'rgba(235, 235, 235, 0.76)'],
[1, 'rgba(244, 252, 255, 0.74)']
]
},
hideDelay: 40,
pointFormat: '%s'
},
"""
% point_format
)
chart_init_3 = """
accessibility: {
point: {
valueSuffix: '%%'
}
},
plotOptions: {
series: {
states: {
inactive: {
opacity: 0.85
}
}
},
pie: {
size: "95%%",
allowPointSelect: true,
animation: false,
cursor: 'pointer',
dataLabels: {
enabled: %s,
formatter: function() {
if (this.y > 0) {
return this.point.name + ': ' + this.point.y
}
}
},
states: {
hover: {
enabled: true
}
},
showInLegend: %s
}
},
""" % (
labels,
legend,
)
if style != "pie":
chart_init_3 = """
allowPointSelect: true,
cursor: 'pointer',
legend: {
layout: 'vertical',
align: 'right',
verticalAlign: 'middle'
},
states: {
hover: {
enabled: true
}
},
plotOptions: {
series: {
dataLabels: {
enabled: %s
},
showInLegend: %s,
animation: false,
shadow: false,
lineWidth: 3,
fillOpacity: 0.5,
marker: {
enabled: true
}
}
},
""" % (
labels,
legend,
)
chart_init = chart_init_1 + chart_init_2 + chart_init_3
color_by_point = "true"
if style != "pie":
color_by_point = "false"
series = """
series: [{
name: '%s',
colorByPoint: %s,
data: [
""" % (
data_name,
color_by_point,
)
new_chart = chart_libs + chart_css + chart_figure + chart_init + series
self._chart_data[chart_name] = []
self._chart_label[chart_name] = []
self._chart_data[chart_name].append(new_chart)
self._chart_first_series[chart_name] = True
self._chart_series_count[chart_name] = 1
def add_series_to_chart(self, data_name=None, chart_name=None):
"""Add a new data series to an existing chart.
This allows charts to have multiple data sets.
@Params
data_name - Set the series name. Useful for multi-series charts.
chart_name - If creating multiple charts,
use this to select which one.
"""
if not chart_name:
chart_name = "default"
self._chart_series_count[chart_name] += 1
if not data_name:
data_name = "Series %s" % self._chart_series_count[chart_name]
series = (
"""
]
},
{
name: '%s',
colorByPoint: false,
data: [
"""
% data_name
)
self._chart_data[chart_name].append(series)
self._chart_first_series[chart_name] = False
def add_data_point(self, label, value, color=None, chart_name=None):
"""Add a data point to a SeleniumBase-generated chart.
@Params
label - The label name for the data point.
value - The numeric value of the data point.
color - The HTML color of the data point.
Can be an RGB color. Eg: "#55ACDC".
Can also be a named color. Eg: "Teal".
chart_name - If creating multiple charts,
use this to select which one.
"""
if not chart_name:
chart_name = "default"
if chart_name not in self._chart_data:
# Create a chart if it doesn't already exist
self.create_pie_chart(chart_name=chart_name)
if not value:
value = 0
if not type(value) is int and not type(value) is float:
raise Exception('Expecting a numeric value for "value"!')
if not color:
color = ""
label = label.replace("'", "\\'")
color = color.replace("'", "\\'")
data_point = """
{
name: '%s',
y: %s,
color: '%s'
},
""" % (
label,
value,
color,
)
self._chart_data[chart_name].append(data_point)
if self._chart_first_series[chart_name]:
self._chart_label[chart_name].append(label)
def save_chart(self, chart_name=None, filename=None, folder=None):
"""Saves a SeleniumBase-generated chart to a file for later use.
@Params
chart_name - If creating multiple charts at the same time,
use this to select the one you wish to use.
filename - The name of the HTML file that you wish to
save the chart to. (filename must end in ".html")
folder - The name of the folder where you wish to
save the HTML file. (Default: "./saved_charts/")
"""
if not chart_name:
chart_name = "default"
if not filename:
filename = "my_chart.html"
if chart_name not in self._chart_data:
raise Exception("Chart {%s} does not exist!" % chart_name)
if not filename.endswith(".html"):
raise Exception('Chart file must end in ".html"!')
the_html = '<meta charset="utf-8">\n'
the_html += '<meta http-equiv="Content-Type" content="text/html">\n'
the_html += '<meta name="viewport" content="shrink-to-fit=no">\n'
for chart_data_point in self._chart_data[chart_name]:
the_html += chart_data_point
the_html += """
]
}]
});
</script>
"""
axis = "xAxis: {\n"
axis += " labels: {\n"
axis += " useHTML: true,\n"
axis += " style: {\n"
axis += " fontSize: '14px',\n"
axis += " },\n"
axis += " },\n"
axis += " categories: ["
for label in self._chart_label[chart_name]:
axis += "'%s'," % label
axis += "], crosshair: false},"
the_html = the_html.replace("xAxis: { },", axis)
if not folder:
saved_charts_folder = constants.Charts.SAVED_FOLDER
else:
saved_charts_folder = folder
if saved_charts_folder.endswith("/"):
saved_charts_folder = saved_charts_folder[:-1]
if not os.path.exists(saved_charts_folder):
try:
os.makedirs(saved_charts_folder)
except Exception:
pass
file_path = saved_charts_folder + "/" + filename
out_file = codecs.open(file_path, "w+", encoding="utf-8")
out_file.writelines(the_html)
out_file.close()
print("\n>>> [%s] was saved!" % file_path)
return file_path
def display_chart(self, chart_name=None, filename=None, interval=0):
"""Displays a SeleniumBase-generated chart in the browser window.
@Params
chart_name - If creating multiple charts at the same time,
use this to select the one you wish to use.
filename - The name of the HTML file that you wish to
save the chart to. (filename must end in ".html")
interval - The delay time for auto-advancing charts. (in seconds)
If set to 0 (default), auto-advancing is disabled.
"""
if self.headless or self.xvfb:
interval = 1 # Race through chart if running in headless mode
if not chart_name:
chart_name = "default"
if not filename:
filename = "my_chart.html"
if not interval:
interval = 0
if interval == 0 and self.interval:
interval = float(self.interval)
if not type(interval) is int and not type(interval) is float:
raise Exception('Expecting a numeric value for "interval"!')
if interval < 0:
raise Exception('The "interval" cannot be a negative number!')
if chart_name not in self._chart_data:
raise Exception("Chart {%s} does not exist!" % chart_name)
if not filename.endswith(".html"):
raise Exception('Chart file must end in ".html"!')
file_path = self.save_chart(chart_name=chart_name, filename=filename)
self.open_html_file(file_path)
chart_folder = constants.Charts.SAVED_FOLDER
if interval == 0:
try:
print("\n*** Close the browser window to continue ***")
# Will also continue if manually navigating to a new page
while len(self.driver.window_handles) > 0 and (
chart_folder in self.get_current_url()
):
time.sleep(0.05)
except Exception:
pass
else:
try:
start_ms = time.time() * 1000.0
stop_ms = start_ms + (interval * 1000.0)
for x in range(int(interval * 10)):
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
if len(self.driver.window_handles) == 0:
break
if chart_folder not in self.get_current_url():
break
time.sleep(0.1)
except Exception:
pass
def extract_chart(self, chart_name=None):
"""Extracts the HTML from a SeleniumBase-generated chart.
@Params
chart_name - If creating multiple charts at the same time,
use this to select the one you wish to use.
"""
if not chart_name:
chart_name = "default"
if chart_name not in self._chart_data:
raise Exception("Chart {%s} does not exist!" % chart_name)
the_html = ""
for chart_data_point in self._chart_data[chart_name]:
the_html += chart_data_point
the_html += """
]
}]
});
</script>
"""
axis = "xAxis: {\n"
axis += " labels: {\n"
axis += " useHTML: true,\n"
axis += " style: {\n"
axis += " fontSize: '14px',\n"
axis += " },\n"
axis += " },\n"
axis += " categories: ["
for label in self._chart_label[chart_name]:
axis += "'%s'," % label
axis += "], crosshair: false},"
the_html = the_html.replace("xAxis: { },", axis)
self._chart_xcount += 1
the_html = the_html.replace(
"chartcontainer_num_", "chartcontainer_%s_" % self._chart_xcount
)
return the_html
############
def create_tour(self, name=None, theme=None):
"""Creates a guided tour for any website.
The default theme is the IntroJS Library.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
theme - Sets the default theme for the website tour. Available themes:
"Bootstrap", "DriverJS", "Hopscotch", "IntroJS", "Shepherd".
The "Shepherd" library also contains multiple variation themes:
"light"/"arrows", "dark", "default", "square", "square-dark".
"""
if not name:
name = "default"
if theme:
if theme.lower() == "bootstrap":
self.create_bootstrap_tour(name)
elif theme.lower() == "hopscotch":
self.create_hopscotch_tour(name)
elif theme.lower() == "intro":
self.create_introjs_tour(name)
elif theme.lower() == "introjs":
self.create_introjs_tour(name)
elif theme.lower() == "driver":
self.create_driverjs_tour(name)
elif theme.lower() == "driverjs":
self.create_driverjs_tour(name)
elif theme.lower() == "shepherd":
self.create_shepherd_tour(name, theme="light")
elif theme.lower() == "light":
self.create_shepherd_tour(name, theme="light")
elif theme.lower() == "arrows":
self.create_shepherd_tour(name, theme="light")
elif theme.lower() == "dark":
self.create_shepherd_tour(name, theme="dark")
elif theme.lower() == "square":
self.create_shepherd_tour(name, theme="square")
elif theme.lower() == "square-dark":
self.create_shepherd_tour(name, theme="square-dark")
elif theme.lower() == "default":
self.create_shepherd_tour(name, theme="default")
else:
self.create_introjs_tour(name)
else:
self.create_introjs_tour(name)
def create_shepherd_tour(self, name=None, theme=None):
"""Creates a Shepherd JS website tour.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
theme - Sets the default theme for the tour.
Choose from "light"/"arrows", "dark", "default", "square",
and "square-dark". ("light" is used if None is selected.)
"""
shepherd_theme = "shepherd-theme-arrows"
if theme:
if theme.lower() == "default":
shepherd_theme = "shepherd-theme-default"
elif theme.lower() == "dark":
shepherd_theme = "shepherd-theme-dark"
elif theme.lower() == "light":
shepherd_theme = "shepherd-theme-arrows"
elif theme.lower() == "arrows":
shepherd_theme = "shepherd-theme-arrows"
elif theme.lower() == "square":
shepherd_theme = "shepherd-theme-square"
elif theme.lower() == "square-dark":
shepherd_theme = "shepherd-theme-square-dark"
if not name:
name = "default"
new_tour = (
"""
// Shepherd Tour
var tour = new Shepherd.Tour({
defaults: {
classes: '%s',
scrollTo: true
}
});
var allButtons = {
skip: {
text: "Skip",
action: tour.cancel,
classes: 'shepherd-button-secondary tour-button-left'
},
back: {
text: "Back",
action: tour.back,
classes: 'shepherd-button-secondary'
},
next: {
text: "Next",
action: tour.next,
classes: 'shepherd-button-primary tour-button-right'
},
};
var firstStepButtons = [allButtons.skip, allButtons.next];
var midTourButtons = [allButtons.back, allButtons.next];
"""
% shepherd_theme
)
self._tour_steps[name] = []
self._tour_steps[name].append(new_tour)
def create_bootstrap_tour(self, name=None):
"""Creates a Bootstrap tour for a website.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
"""
if not name:
name = "default"
new_tour = """
// Bootstrap Tour
var tour = new Tour({
container: 'body',
animation: true,
keyboard: true,
orphan: true,
smartPlacement: true,
autoscroll: true,
backdrop: true,
backdropContainer: 'body',
backdropPadding: 3,
});
tour.addSteps([
"""
self._tour_steps[name] = []
self._tour_steps[name].append(new_tour)
def create_driverjs_tour(self, name=None):
"""Creates a DriverJS tour for a website.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
"""
if not name:
name = "default"
new_tour = """
// DriverJS Tour
var tour = new Driver({
opacity: 0.24, // Background opacity (0: no popover / overlay)
padding: 6, // Distance of element from around the edges
allowClose: false, // Whether clicking on overlay should close
overlayClickNext: false, // Move to next step on overlay click
doneBtnText: 'Done', // Text that appears on the Done button
closeBtnText: 'Close', // Text appearing on the Close button
nextBtnText: 'Next', // Text that appears on the Next button
prevBtnText: 'Previous', // Text appearing on Previous button
showButtons: true, // This shows control buttons in the footer
keyboardControl: true, // (escape to close, arrow keys to move)
animate: true, // Animate while changing highlighted element
});
tour.defineSteps([
"""
self._tour_steps[name] = []
self._tour_steps[name].append(new_tour)
def create_hopscotch_tour(self, name=None):
"""Creates a Hopscotch tour for a website.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
"""
if not name:
name = "default"
new_tour = """
// Hopscotch Tour
var tour = {
id: "hopscotch_tour",
steps: [
"""
self._tour_steps[name] = []
self._tour_steps[name].append(new_tour)
def create_introjs_tour(self, name=None):
"""Creates an IntroJS tour for a website.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
"""
if not hasattr(sb_config, "introjs_theme_color"):
sb_config.introjs_theme_color = constants.TourColor.theme_color
if not hasattr(sb_config, "introjs_hover_color"):
sb_config.introjs_hover_color = constants.TourColor.hover_color
if not name:
name = "default"
new_tour = """
// IntroJS Tour
function startIntro(){
var intro = introJs();
intro.setOptions({
steps: [
"""
self._tour_steps[name] = []
self._tour_steps[name].append(new_tour)
def set_introjs_colors(self, theme_color=None, hover_color=None):
"""Use this method to set the theme colors for IntroJS tours.
Args must be hex color values that start with a "#" sign.
If a color isn't specified, the color will reset to the default.
The border color of buttons is set to the hover color.
@Params
theme_color - The color of buttons.
hover_color - The color of buttons after hovering over them.
"""
if not hasattr(sb_config, "introjs_theme_color"):
sb_config.introjs_theme_color = constants.TourColor.theme_color
if not hasattr(sb_config, "introjs_hover_color"):
sb_config.introjs_hover_color = constants.TourColor.hover_color
if theme_color:
match = re.search(r'^#(?:[0-9a-fA-F]{3}){1,2}$', theme_color)
if not match:
raise Exception(
'Expecting a hex value color that starts with "#"!')
sb_config.introjs_theme_color = theme_color
else:
sb_config.introjs_theme_color = constants.TourColor.theme_color
if hover_color:
match = re.search(r'^#(?:[0-9a-fA-F]{3}){1,2}$', hover_color)
if not match:
raise Exception(
'Expecting a hex value color that starts with "#"!')
sb_config.introjs_hover_color = hover_color
else:
sb_config.introjs_hover_color = constants.TourColor.hover_color
def add_tour_step(
self,
message,
selector=None,
name=None,
title=None,
theme=None,
alignment=None,
duration=None,
):
"""Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
theme - (Shepherd Tours ONLY) The styling of the tour step.
Choose from "light"/"arrows", "dark", "default", "square",
and "square-dark". ("arrows" is used if None is selected.)
alignment - Choose from "top", "bottom", "left", and "right".
("top" is default, except for Hopscotch and DriverJS).
duration - (Bootstrap Tours ONLY) The amount of time, in seconds,
before automatically advancing to the next tour step.
"""
if not selector:
selector = "html"
if page_utils.is_name_selector(selector):
name = page_utils.get_name_from_selector(selector)
selector = '[name="%s"]' % name
if page_utils.is_xpath_selector(selector):
selector = self.convert_to_css_selector(selector, By.XPATH)
selector = self.__escape_quotes_if_needed(selector)
if not name:
name = "default"
if name not in self._tour_steps:
# By default, will create an IntroJS tour if no tours exist
self.create_tour(name=name, theme="introjs")
if not title:
title = ""
title = self.__escape_quotes_if_needed(title)
if message:
message = self.__escape_quotes_if_needed(message)
else:
message = ""
if not alignment or alignment not in [
"top",
"bottom",
"left",
"right",
]:
t_name = self._tour_steps[name][0]
if "Hopscotch" not in t_name and "DriverJS" not in t_name:
alignment = "top"
else:
alignment = "bottom"
if "Bootstrap" in self._tour_steps[name][0]:
self.__add_bootstrap_tour_step(
message,
selector=selector,
name=name,
title=title,
alignment=alignment,
duration=duration,
)
elif "DriverJS" in self._tour_steps[name][0]:
self.__add_driverjs_tour_step(
message,
selector=selector,
name=name,
title=title,
alignment=alignment,
)
elif "Hopscotch" in self._tour_steps[name][0]:
self.__add_hopscotch_tour_step(
message,
selector=selector,
name=name,
title=title,
alignment=alignment,
)
elif "IntroJS" in self._tour_steps[name][0]:
self.__add_introjs_tour_step(
message,
selector=selector,
name=name,
title=title,
alignment=alignment,
)
else:
self.__add_shepherd_tour_step(
message,
selector=selector,
name=name,
title=title,
theme=theme,
alignment=alignment,
)
def __add_shepherd_tour_step(
self,
message,
selector=None,
name=None,
title=None,
theme=None,
alignment=None,
):
"""Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
theme - (Shepherd Tours ONLY) The styling of the tour step.
Choose from "light"/"arrows", "dark", "default", "square",
and "square-dark". ("arrows" is used if None is selected.)
alignment - Choose from "top", "bottom", "left", and "right".
("top" is the default alignment).
"""
if theme == "default":
shepherd_theme = "shepherd-theme-default"
elif theme == "dark":
shepherd_theme = "shepherd-theme-dark"
elif theme == "light":
shepherd_theme = "shepherd-theme-arrows"
elif theme == "arrows":
shepherd_theme = "shepherd-theme-arrows"
elif theme == "square":
shepherd_theme = "shepherd-theme-square"
elif theme == "square-dark":
shepherd_theme = "shepherd-theme-square-dark"
else:
shepherd_base_theme = re.search(
r"[\S\s]+classes: '([\S\s]+)',[\S\s]+",
self._tour_steps[name][0],
).group(1)
shepherd_theme = shepherd_base_theme
shepherd_classes = shepherd_theme
if selector == "html":
shepherd_classes += " shepherd-orphan"
buttons = "firstStepButtons"
if len(self._tour_steps[name]) > 1:
buttons = "midTourButtons"
step = """tour.addStep('%s', {
title: '%s',
classes: '%s',
text: '%s',
attachTo: {element: '%s', on: '%s'},
buttons: %s,
advanceOn: '.docs-link click'
});""" % (
name,
title,
shepherd_classes,
message,
selector,
alignment,
buttons,
)
self._tour_steps[name].append(step)
def __add_bootstrap_tour_step(
self,
message,
selector=None,
name=None,
title=None,
alignment=None,
duration=None,
):
"""Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
alignment - Choose from "top", "bottom", "left", and "right".
("top" is the default alignment).
duration - (Bootstrap Tours ONLY) The amount of time, in seconds,
before automatically advancing to the next tour step.
"""
if selector != "html":
selector = self.__make_css_match_first_element_only(selector)
element_row = "element: '%s'," % selector
else:
element_row = ""
if not duration:
duration = "0"
else:
duration = str(float(duration) * 1000.0)
bd = "backdrop: true,"
if selector == "html":
bd = "backdrop: false,"
step = """{
%s
title: '%s',
content: '%s',
orphan: true,
autoscroll: true,
%s
placement: 'auto %s',
smartPlacement: true,
duration: %s,
},""" % (
element_row,
title,
message,
bd,
alignment,
duration,
)
self._tour_steps[name].append(step)
def __add_driverjs_tour_step(
self, message, selector=None, name=None, title=None, alignment=None
):
"""Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
alignment - Choose from "top", "bottom", "left", and "right".
("top" is the default alignment).
"""
message = (
'<font size="3" color="#33477B"><b>' + message + "</b></font>"
)
title_row = ""
if not title:
title_row = "title: '%s'," % message
message = ""
else:
title_row = "title: '%s'," % title
align_row = "position: '%s'," % alignment
ani_row = "animate: true,"
if not selector or selector == "html" or selector == "body":
selector = "body"
ani_row = "animate: false,"
align_row = "position: '%s'," % "mid-center"
element_row = "element: '%s'," % selector
desc_row = "description: '%s'," % message
step = """{
%s
%s
popover: {
className: 'popover-class',
%s
%s
%s
}
},""" % (
element_row,
ani_row,
title_row,
desc_row,
align_row,
)
self._tour_steps[name].append(step)
def __add_hopscotch_tour_step(
self, message, selector=None, name=None, title=None, alignment=None
):
"""Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
alignment - Choose from "top", "bottom", "left", and "right".
("bottom" is the default alignment).
"""
arrow_offset_row = None
if not selector or selector == "html":
selector = "head"
alignment = "bottom"
arrow_offset_row = "arrowOffset: '200',"
else:
arrow_offset_row = ""
step = """{
target: '%s',
title: '%s',
content: '%s',
%s
showPrevButton: 'true',
scrollDuration: '550',
placement: '%s'},
""" % (
selector,
title,
message,
arrow_offset_row,
alignment,
)
self._tour_steps[name].append(step)
def __add_introjs_tour_step(
self, message, selector=None, name=None, title=None, alignment=None
):
"""Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
alignment - Choose from "top", "bottom", "left", and "right".
("top" is the default alignment).
"""
if selector != "html":
element_row = "element: '%s'," % selector
else:
element_row = ""
if title:
message = "<center><b>" + title + "</b></center><hr>" + message
message = '<font size="3" color="#33477B">' + message + "</font>"
step = """{%s
intro: '%s',
position: '%s'},""" % (
element_row,
message,
alignment,
)
self._tour_steps[name].append(step)
def play_tour(self, name=None, interval=0):
"""Plays a tour on the current website.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
interval - The delay time between autoplaying tour steps. (Seconds)
If set to 0 (default), the tour is fully manual control.
"""
from seleniumbase.core import tour_helper
if self.headless or self.xvfb:
return # Tours should not run in headless mode.
self.wait_for_ready_state_complete()
if not interval:
interval = 0
if interval == 0 and self.interval:
interval = float(self.interval)
if not name:
name = "default"
if name not in self._tour_steps:
raise Exception("Tour {%s} does not exist!" % name)
if "Bootstrap" in self._tour_steps[name][0]:
tour_helper.play_bootstrap_tour(
self.driver,
self._tour_steps,
self.browser,
self.message_duration,
name=name,
interval=interval,
)
elif "DriverJS" in self._tour_steps[name][0]:
tour_helper.play_driverjs_tour(
self.driver,
self._tour_steps,
self.browser,
self.message_duration,
name=name,
interval=interval,
)
elif "Hopscotch" in self._tour_steps[name][0]:
tour_helper.play_hopscotch_tour(
self.driver,
self._tour_steps,
self.browser,
self.message_duration,
name=name,
interval=interval,
)
elif "IntroJS" in self._tour_steps[name][0]:
tour_helper.play_introjs_tour(
self.driver,
self._tour_steps,
self.browser,
self.message_duration,
name=name,
interval=interval,
)
else:
# "Shepherd"
tour_helper.play_shepherd_tour(
self.driver,
self._tour_steps,
self.message_duration,
name=name,
interval=interval,
)
def export_tour(self, name=None, filename="my_tour.js", url=None):
"""Exports a tour as a JS file.
You can call self.export_tour() anywhere where you would
normally use self.play_tour() to play a website tour.
It will include necessary resources as well, such as jQuery.
You'll be able to copy the tour directly into the Console of
any web browser to play the tour outside of SeleniumBase runs.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
filename - The name of the JavaScript file that you wish to
save the tour to.
url - The URL where the tour starts. If not specified, the URL
of the current page will be used.
"""
from seleniumbase.core import tour_helper
if not url:
url = self.get_current_url()
tour_helper.export_tour(
self._tour_steps, name=name, filename=filename, url=url
)
############
def activate_jquery_confirm(self):
""" See https://craftpip.github.io/jquery-confirm/ for usage. """
self.__check_scope()
self.__check_browser()
js_utils.activate_jquery_confirm(self.driver)
self.wait_for_ready_state_complete()
def set_jqc_theme(self, theme, color=None, width=None):
""" Sets the default jquery-confirm theme and width (optional).
Available themes: "bootstrap", "modern", "material", "supervan",
"light", "dark", and "seamless".
Available colors: (This sets the BORDER color, NOT the button color.)
"blue", "default", "green", "red", "purple", "orange", "dark".
Width can be set using percent or pixels. Eg: "36.0%", "450px".
"""
if not self.__changed_jqc_theme:
self.__jqc_default_theme = constants.JqueryConfirm.DEFAULT_THEME
self.__jqc_default_color = constants.JqueryConfirm.DEFAULT_COLOR
self.__jqc_default_width = constants.JqueryConfirm.DEFAULT_WIDTH
valid_themes = [
"bootstrap",
"modern",
"material",
"supervan",
"light",
"dark",
"seamless",
]
if theme.lower() not in valid_themes:
raise Exception(
"%s is not a valid jquery-confirm theme! "
"Select from %s" % (theme.lower(), valid_themes)
)
constants.JqueryConfirm.DEFAULT_THEME = theme.lower()
if color:
valid_colors = [
"blue",
"default",
"green",
"red",
"purple",
"orange",
"dark",
]
if color.lower() not in valid_colors:
raise Exception(
"%s is not a valid jquery-confirm border color! "
"Select from %s" % (color.lower(), valid_colors)
)
constants.JqueryConfirm.DEFAULT_COLOR = color.lower()
if width:
if type(width) is int or type(width) is float:
# Convert to a string if a number is given
width = str(width)
if width.isnumeric():
if int(width) <= 0:
raise Exception("Width must be set to a positive number!")
elif int(width) <= 100:
width = str(width) + "%"
else:
width = str(width) + "px" # Use pixels if width is > 100
if not width.endswith("%") and not width.endswith("px"):
raise Exception(
"jqc width must end with %% for percent or px for pixels!"
)
value = None
if width.endswith("%"):
value = width[:-1]
if width.endswith("px"):
value = width[:-2]
try:
value = float(value)
except Exception:
raise Exception("%s is not a numeric value!" % value)
if value <= 0:
raise Exception("%s is not a positive number!" % value)
constants.JqueryConfirm.DEFAULT_WIDTH = width
def reset_jqc_theme(self):
""" Resets the jqc theme settings to factory defaults. """
if self.__changed_jqc_theme:
constants.JqueryConfirm.DEFAULT_THEME = self.__jqc_default_theme
constants.JqueryConfirm.DEFAULT_COLOR = self.__jqc_default_color
constants.JqueryConfirm.DEFAULT_WIDTH = self.__jqc_default_width
self.__changed_jqc_theme = False
def get_jqc_button_input(self, message, buttons, options=None):
"""
Pop up a jquery-confirm box and return the text of the button clicked.
If running in headless mode, the last button text is returned.
@Params
message: The message to display in the jquery-confirm dialog.
buttons: A list of tuples for text and color.
Example: [("Yes!", "green"), ("No!", "red")]
Available colors: blue, green, red, orange, purple, default, dark.
A simple text string also works: "My Button". (Uses default color.)
options: A list of tuples for options to set.
Example: [("theme", "bootstrap"), ("width", "450px")]
Available theme options: bootstrap, modern, material, supervan,
light, dark, and seamless.
Available colors: (For the BORDER color, NOT the button color.)
"blue", "default", "green", "red", "purple", "orange", "dark".
Example option for changing the border color: ("color", "default")
Width can be set using percent or pixels. Eg: "36.0%", "450px".
"""
from seleniumbase.core import jqc_helper
if message and type(message) is not str:
raise Exception('Expecting a string for arg: "message"!')
if not type(buttons) is list and not type(buttons) is tuple:
raise Exception('Expecting a list or tuple for arg: "button"!')
if len(buttons) < 1:
raise Exception('List "buttons" requires at least one button!')
new_buttons = []
for button in buttons:
if (
(type(button) is list or type(button) is tuple)
and (len(button) == 1)
):
new_buttons.append(button[0])
elif (
(type(button) is list or type(button) is tuple)
and (len(button) > 1)
):
new_buttons.append((button[0], str(button[1]).lower()))
else:
new_buttons.append((str(button), ""))
buttons = new_buttons
if options:
for option in options:
if not type(option) is list and not type(option) is tuple:
raise Exception('"options" should be a list of tuples!')
if self.headless or self.xvfb:
return buttons[-1][0]
jqc_helper.jquery_confirm_button_dialog(
self.driver, message, buttons, options
)
self.sleep(0.02)
jf = "document.querySelector('.jconfirm-box').focus();"
try:
self.execute_script(jf)
except Exception:
pass
waiting_for_response = True
while waiting_for_response:
self.sleep(0.05)
jqc_open = self.execute_script(
"return jconfirm.instances.length"
)
if str(jqc_open) == "0":
break
self.sleep(0.1)
status = None
try:
status = self.execute_script("return $jqc_status")
except Exception:
status = self.execute_script(
"return jconfirm.lastButtonText"
)
return status
def get_jqc_text_input(self, message, button=None, options=None):
"""
Pop up a jquery-confirm box and return the text submitted by the input.
If running in headless mode, the text returned is "" by default.
@Params
message: The message to display in the jquery-confirm dialog.
button: A 2-item list or tuple for text and color. Or just the text.
Example: ["Submit", "blue"] -> (default button if not specified)
Available colors: blue, green, red, orange, purple, default, dark.
A simple text string also works: "My Button". (Uses default color.)
options: A list of tuples for options to set.
Example: [("theme", "bootstrap"), ("width", "450px")]
Available theme options: bootstrap, modern, material, supervan,
light, dark, and seamless.
Available colors: (For the BORDER color, NOT the button color.)
"blue", "default", "green", "red", "purple", "orange", "dark".
Example option for changing the border color: ("color", "default")
Width can be set using percent or pixels. Eg: "36.0%", "450px".
"""
from seleniumbase.core import jqc_helper
if message and type(message) is not str:
raise Exception('Expecting a string for arg: "message"!')
if button:
if (
(type(button) is list or type(button) is tuple)
and (len(button) == 1)
):
button = (str(button[0]), "")
elif (
(type(button) is list or type(button) is tuple)
and (len(button) > 1)
):
valid_colors = [
"blue",
"default",
"green",
"red",
"purple",
"orange",
"dark",
]
detected_color = str(button[1]).lower()
if str(button[1]).lower() not in valid_colors:
raise Exception(
"%s is an invalid jquery-confirm button color!\n"
"Select from %s" % (detected_color, valid_colors)
)
button = (str(button[0]), str(button[1]).lower())
else:
button = (str(button), "")
else:
button = ("Submit", "blue")
if options:
for option in options:
if not type(option) is list and not type(option) is tuple:
raise Exception('"options" should be a list of tuples!')
if self.headless or self.xvfb:
return ""
jqc_helper.jquery_confirm_text_dialog(
self.driver, message, button, options
)
self.sleep(0.02)
jf = "document.querySelector('.jconfirm-box input.jqc_input').focus();"
try:
self.execute_script(jf)
except Exception:
pass
waiting_for_response = True
while waiting_for_response:
self.sleep(0.05)
jqc_open = self.execute_script(
"return jconfirm.instances.length"
)
if str(jqc_open) == "0":
break
self.sleep(0.1)
status = None
try:
status = self.execute_script("return $jqc_input")
except Exception:
status = self.execute_script(
"return jconfirm.lastInputText"
)
return status
def get_jqc_form_inputs(self, message, buttons, options=None):
"""
Pop up a jquery-confirm box and return the input/button texts as tuple.
If running in headless mode, returns the ("", buttons[-1][0]) tuple.
@Params
message: The message to display in the jquery-confirm dialog.
buttons: A list of tuples for text and color.
Example: [("Yes!", "green"), ("No!", "red")]
Available colors: blue, green, red, orange, purple, default, dark.
A simple text string also works: "My Button". (Uses default color.)
options: A list of tuples for options to set.
Example: [("theme", "bootstrap"), ("width", "450px")]
Available theme options: bootstrap, modern, material, supervan,
light, dark, and seamless.
Available colors: (For the BORDER color, NOT the button color.)
"blue", "default", "green", "red", "purple", "orange", "dark".
Example option for changing the border color: ("color", "default")
Width can be set using percent or pixels. Eg: "36.0%", "450px".
"""
from seleniumbase.core import jqc_helper
if message and type(message) is not str:
raise Exception('Expecting a string for arg: "message"!')
if not type(buttons) is list and not type(buttons) is tuple:
raise Exception('Expecting a list or tuple for arg: "button"!')
if len(buttons) < 1:
raise Exception('List "buttons" requires at least one button!')
new_buttons = []
for button in buttons:
if (
(type(button) is list or type(button) is tuple)
and (len(button) == 1)
):
new_buttons.append(button[0])
elif (
(type(button) is list or type(button) is tuple)
and (len(button) > 1)
):
new_buttons.append((button[0], str(button[1]).lower()))
else:
new_buttons.append((str(button), ""))
buttons = new_buttons
if options:
for option in options:
if not type(option) is list and not type(option) is tuple:
raise Exception('"options" should be a list of tuples!')
if self.headless or self.xvfb:
return ("", buttons[-1][0])
jqc_helper.jquery_confirm_full_dialog(
self.driver, message, buttons, options
)
self.sleep(0.02)
jf = "document.querySelector('.jconfirm-box input.jqc_input').focus();"
try:
self.execute_script(jf)
except Exception:
pass
waiting_for_response = True
while waiting_for_response:
self.sleep(0.05)
jqc_open = self.execute_script(
"return jconfirm.instances.length"
)
if str(jqc_open) == "0":
break
self.sleep(0.1)
text_status = None
button_status = None
try:
text_status = self.execute_script("return $jqc_input")
button_status = self.execute_script("return $jqc_status")
except Exception:
text_status = self.execute_script(
"return jconfirm.lastInputText"
)
button_status = self.execute_script(
"return jconfirm.lastButtonText"
)
return (text_status, button_status)
############
def activate_messenger(self):
self.__check_scope()
self.__check_browser()
js_utils.activate_messenger(self.driver)
self.wait_for_ready_state_complete()
def set_messenger_theme(
self, theme="default", location="default", max_messages="default"
):
"""Sets a theme for posting messages.
Themes: ["flat", "future", "block", "air", "ice"]
Locations: ["top_left", "top_center", "top_right",
"bottom_left", "bottom_center", "bottom_right"]
max_messages is the limit of concurrent messages to display.
"""
self.__check_scope()
self.__check_browser()
if not theme:
theme = "default" # "flat"
if not location:
location = "default" # "bottom_right"
if not max_messages:
max_messages = "default" # "8"
else:
max_messages = str(max_messages) # Value must be in string format
js_utils.set_messenger_theme(
self.driver,
theme=theme,
location=location,
max_messages=max_messages,
)
def post_message(self, message, duration=None, pause=True, style="info"):
"""Post a message on the screen with Messenger.
Arguments:
message: The message to display.
duration: The time until the message vanishes. (Default: 2.55s)
pause: If True, the program waits until the message completes.
style: "info", "success", or "error".
You can also post messages by using =>
self.execute_script('Messenger().post("My Message")')
"""
self.__check_scope()
self.__check_browser()
if style not in ["info", "success", "error"]:
style = "info"
if not duration:
if not self.message_duration:
duration = settings.DEFAULT_MESSAGE_DURATION
else:
duration = self.message_duration
if (self.headless or self.xvfb) and float(duration) > 0.75:
duration = 0.75
try:
js_utils.post_message(self.driver, message, duration, style=style)
except Exception:
print(" * %s message: %s" % (style.upper(), message))
if pause:
duration = float(duration) + 0.15
time.sleep(float(duration))
def post_message_and_highlight(
self, message, selector, by=By.CSS_SELECTOR
):
"""Post a message on the screen and highlight an element.
Arguments:
message: The message to display.
selector: The selector of the Element to highlight.
by: The type of selector to search by. (Default: CSS Selector)
"""
self.__check_scope()
self.__highlight_with_assert_success(message, selector, by=by)
def post_success_message(self, message, duration=None, pause=True):
"""Post a success message on the screen with Messenger.
Arguments:
message: The success message to display.
duration: The time until the message vanishes. (Default: 2.55s)
pause: If True, the program waits until the message completes.
"""
self.__check_scope()
self.__check_browser()
if not duration:
if not self.message_duration:
duration = settings.DEFAULT_MESSAGE_DURATION
else:
duration = self.message_duration
if (self.headless or self.xvfb) and float(duration) > 0.75:
duration = 0.75
try:
js_utils.post_message(
self.driver, message, duration, style="success"
)
except Exception:
print(" * SUCCESS message: %s" % message)
if pause:
duration = float(duration) + 0.15
time.sleep(float(duration))
def post_error_message(self, message, duration=None, pause=True):
"""Post an error message on the screen with Messenger.
Arguments:
message: The error message to display.
duration: The time until the message vanishes. (Default: 2.55s)
pause: If True, the program waits until the message completes.
"""
self.__check_scope()
self.__check_browser()
if not duration:
if not self.message_duration:
duration = settings.DEFAULT_MESSAGE_DURATION
else:
duration = self.message_duration
if (self.headless or self.xvfb) and float(duration) > 0.75:
duration = 0.75
try:
js_utils.post_message(
self.driver, message, duration, style="error"
)
except Exception:
print(" * ERROR message: %s" % message)
if pause:
duration = float(duration) + 0.15
time.sleep(float(duration))
############
def generate_referral(self, start_page, destination_page, selector=None):
"""This method opens the start_page, creates a referral link there,
and clicks on that link, which goes to the destination_page.
If a selector is given, clicks that on the destination_page,
which can prevent an artificial rise in website bounce-rate.
(This generates real traffic for testing analytics software.)"""
self.__check_scope()
if not page_utils.is_valid_url(destination_page):
raise Exception(
"Exception: destination_page {%s} is not a valid URL!"
% destination_page
)
if start_page:
if not page_utils.is_valid_url(start_page):
raise Exception(
"Exception: start_page {%s} is not a valid URL! "
"(Use an empty string or None to start from current page.)"
% start_page
)
self.open(start_page)
time.sleep(0.08)
self.wait_for_ready_state_complete()
referral_link = (
"""<body>"""
"""<a class='analytics referral test' href='%s' """
"""style='font-family: Arial,sans-serif; """
"""font-size: 30px; color: #18a2cd'>"""
"""Magic Link Button</a></body>""" % destination_page
)
self.execute_script(
'''document.body.outerHTML = \"%s\"''' % referral_link
)
# Now click the generated button
self.click("a.analytics.referral.test", timeout=2)
time.sleep(0.15)
if selector:
self.click(selector)
time.sleep(0.15)
def generate_traffic(
self, start_page, destination_page, loops=1, selector=None
):
"""Similar to generate_referral(), but can do multiple loops.
If a selector is given, clicks that on the destination_page,
which can prevent an artificial rise in website bounce-rate."""
self.__check_scope()
for loop in range(loops):
self.generate_referral(
start_page, destination_page, selector=selector
)
time.sleep(0.05)
def generate_referral_chain(self, pages):
"""Use this method to chain the action of creating button links on
one website page that will take you to the next page.
(When you want to create a referral to a website for traffic
generation without increasing the bounce rate, you'll want to visit
at least one additional page on that site with a button click.)"""
self.__check_scope()
if not type(pages) is tuple and not type(pages) is list:
raise Exception(
"Exception: Expecting a list of website pages for chaining!"
)
if len(pages) < 2:
raise Exception(
"Exception: At least two website pages required for chaining!"
)
for page in pages:
# Find out if any of the web pages are invalid before continuing
if not page_utils.is_valid_url(page):
raise Exception(
"Exception: Website page {%s} is not a valid URL!" % page
)
for page in pages:
self.generate_referral(None, page)
def generate_traffic_chain(self, pages, loops=1):
""" Similar to generate_referral_chain(), but for multiple loops. """
self.__check_scope()
for loop in range(loops):
self.generate_referral_chain(pages)
time.sleep(0.05)
############
def wait_for_element_present(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""Waits for an element to appear in the HTML of a page.
The element does not need be visible (it may be hidden)."""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__wait_for_shadow_element_present(
selector, timeout
)
return page_actions.wait_for_element_present(
self.driver, selector, by, timeout
)
def wait_for_element(self, selector, by=By.CSS_SELECTOR, timeout=None):
"""Waits for an element to appear in the HTML of a page.
The element must be visible (it cannot be hidden)."""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__wait_for_shadow_element_visible(
selector, timeout
)
return page_actions.wait_for_element_visible(
self.driver, selector, by, timeout
)
def get_element(self, selector, by=By.CSS_SELECTOR, timeout=None):
"""Same as wait_for_element_present() - returns the element.
The element does not need be visible (it may be hidden)."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
return self.wait_for_element_present(selector, by=by, timeout=timeout)
def assert_element_present(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""Similar to wait_for_element_present(), but returns nothing.
Waits for an element to appear in the HTML of a page.
The element does not need be visible (it may be hidden).
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if type(selector) is list:
self.assert_elements_present(selector, by=by, timeout=timeout)
return True
if self.__is_shadow_selector(selector):
self.__assert_shadow_element_present(selector)
return True
self.wait_for_element_present(selector, by=by, timeout=timeout)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["as_ep", selector, origin, time_stamp]
self.__extra_actions.append(action)
return True
def assert_elements_present(self, *args, **kwargs):
"""Similar to self.assert_element_present(),
but can assert that multiple elements are present in the HTML.
The input is a list of elements.
Optional kwargs include "by" and "timeout" (used by all selectors).
Raises an exception if any of the elements are not visible.
Examples:
self.assert_elements_present("head", "style", "script", "body")
OR
self.assert_elements_present(["head", "body", "h1", "h2"])
"""
self.__check_scope()
selectors = []
timeout = None
by = By.CSS_SELECTOR
for kwarg in kwargs:
if kwarg == "timeout":
timeout = kwargs["timeout"]
elif kwarg == "by":
by = kwargs["by"]
elif kwarg == "selector":
selector = kwargs["selector"]
if type(selector) is str:
selectors.append(selector)
elif type(selector) is list:
selectors_list = selector
for selector in selectors_list:
if type(selector) is str:
selectors.append(selector)
else:
raise Exception('Unknown kwarg: "%s"!' % kwarg)
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
for arg in args:
if type(arg) is list:
for selector in arg:
if type(selector) is str:
selectors.append(selector)
elif type(arg) is str:
selectors.append(arg)
for selector in selectors:
if self.__is_shadow_selector(selector):
self.__assert_shadow_element_visible(selector)
continue
self.wait_for_element_present(selector, by=by, timeout=timeout)
continue
return True
def find_element(self, selector, by=By.CSS_SELECTOR, timeout=None):
""" Same as wait_for_element_visible() - returns the element """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_element_visible(selector, by=by, timeout=timeout)
def assert_element(self, selector, by=By.CSS_SELECTOR, timeout=None):
"""Similar to wait_for_element_visible(), but returns nothing.
As above, will raise an exception if nothing can be found.
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if type(selector) is list:
self.assert_elements(selector, by=by, timeout=timeout)
return True
if self.__is_shadow_selector(selector):
self.__assert_shadow_element_visible(selector)
return True
self.wait_for_element_visible(selector, by=by, timeout=timeout)
if self.demo_mode:
selector, by = self.__recalculate_selector(
selector, by, xp_ok=False
)
a_t = "ASSERT"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert(self._language)
messenger_post = "%s %s: %s" % (a_t, by.upper(), selector)
self.__highlight_with_assert_success(messenger_post, selector, by)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["as_el", selector, origin, time_stamp]
self.__extra_actions.append(action)
return True
def assert_element_visible(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""Same as self.assert_element()
As above, will raise an exception if nothing can be found."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.assert_element(selector, by=by, timeout=timeout)
return True
def assert_elements(self, *args, **kwargs):
"""Similar to self.assert_element(), but can assert multiple elements.
The input is a list of elements.
Optional kwargs include "by" and "timeout" (used by all selectors).
Raises an exception if any of the elements are not visible.
Examples:
self.assert_elements("h1", "h2", "h3")
OR
self.assert_elements(["h1", "h2", "h3"])"""
self.__check_scope()
selectors = []
timeout = None
by = By.CSS_SELECTOR
for kwarg in kwargs:
if kwarg == "timeout":
timeout = kwargs["timeout"]
elif kwarg == "by":
by = kwargs["by"]
elif kwarg == "selector":
selector = kwargs["selector"]
if type(selector) is str:
selectors.append(selector)
elif type(selector) is list:
selectors_list = selector
for selector in selectors_list:
if type(selector) is str:
selectors.append(selector)
else:
raise Exception('Unknown kwarg: "%s"!' % kwarg)
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
for arg in args:
if type(arg) is list:
for selector in arg:
if type(selector) is str:
selectors.append(selector)
elif type(arg) is str:
selectors.append(arg)
for selector in selectors:
if self.__is_shadow_selector(selector):
self.__assert_shadow_element_visible(selector)
continue
self.wait_for_element_visible(selector, by=by, timeout=timeout)
if self.demo_mode:
selector, by = self.__recalculate_selector(selector, by)
a_t = "ASSERT"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert(self._language)
messenger_post = "%s %s: %s" % (a_t, by.upper(), selector)
self.__highlight_with_assert_success(
messenger_post, selector, by
)
continue
return True
def assert_elements_visible(self, *args, **kwargs):
"""Same as self.assert_elements()
Raises an exception if any element cannot be found."""
return self.assert_elements(*args, **kwargs)
############
def wait_for_text_visible(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__wait_for_shadow_text_visible(
text, selector, timeout
)
return page_actions.wait_for_text_visible(
self.driver, text, selector, by, timeout, self.browser
)
def wait_for_exact_text_visible(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__wait_for_exact_shadow_text_visible(
text, selector, timeout
)
return page_actions.wait_for_exact_text_visible(
self.driver, text, selector, by, timeout, self.browser
)
def wait_for_text(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
""" The shorter version of wait_for_text_visible() """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_text_visible(
text, selector, by=by, timeout=timeout
)
def find_text(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
""" Same as wait_for_text_visible() - returns the element """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_text_visible(
text, selector, by=by, timeout=timeout
)
def assert_text_visible(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
""" Same as assert_text() """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.assert_text(text, selector, by=by, timeout=timeout)
def assert_text(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
"""Similar to wait_for_text_visible()
Raises an exception if the element or the text is not found.
The text only needs to be a subset within the complete text.
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
self.__assert_shadow_text_visible(text, selector, timeout)
return True
self.wait_for_text_visible(text, selector, by=by, timeout=timeout)
if self.demo_mode:
a_t = "ASSERT TEXT"
i_n = "in"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert_text(self._language)
i_n = SD.translate_in(self._language)
messenger_post = "%s: {%s} %s %s: %s" % (
a_t,
text,
i_n,
by.upper(),
selector,
)
self.__highlight_with_assert_success(messenger_post, selector, by)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
text_selector = [text, selector]
action = ["as_te", text_selector, origin, time_stamp]
self.__extra_actions.append(action)
return True
def assert_exact_text(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
"""Similar to assert_text(), but the text must be exact,
rather than exist as a subset of the full text.
(Extra whitespace at the beginning or the end doesn't count.)
Raises an exception if the element or the text is not found.
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
self.__assert_exact_shadow_text_visible(text, selector, timeout)
return True
self.wait_for_exact_text_visible(
text, selector, by=by, timeout=timeout
)
if self.demo_mode:
a_t = "ASSERT EXACT TEXT"
i_n = "in"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert_exact_text(self._language)
i_n = SD.translate_in(self._language)
messenger_post = "%s: {%s} %s %s: %s" % (
a_t,
text,
i_n,
by.upper(),
selector,
)
self.__highlight_with_assert_success(messenger_post, selector, by)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
text_selector = [text, selector]
action = ["as_et", text_selector, origin, time_stamp]
self.__extra_actions.append(action)
return True
############
def wait_for_link_text_present(self, link_text, timeout=None):
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 5)):
shared_utils.check_if_time_limit_exceeded()
try:
if not self.is_link_text_present(link_text):
raise Exception(
"Link text {%s} was not found!" % link_text
)
return
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.2)
message = "Link text {%s} was not present after %s seconds!" % (
link_text,
timeout,
)
page_actions.timeout_exception("NoSuchElementException", message)
def wait_for_partial_link_text_present(self, link_text, timeout=None):
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 5)):
shared_utils.check_if_time_limit_exceeded()
try:
if not self.is_partial_link_text_present(link_text):
raise Exception(
"Partial Link text {%s} was not found!" % link_text
)
return
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.2)
message = (
"Partial Link text {%s} was not present after %s seconds!"
"" % (link_text, timeout)
)
page_actions.timeout_exception("NoSuchElementException", message)
def wait_for_link_text_visible(self, link_text, timeout=None):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_element_visible(
link_text, by=By.LINK_TEXT, timeout=timeout
)
def wait_for_link_text(self, link_text, timeout=None):
""" The shorter version of wait_for_link_text_visible() """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_link_text_visible(link_text, timeout=timeout)
def find_link_text(self, link_text, timeout=None):
""" Same as wait_for_link_text_visible() - returns the element """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_link_text_visible(link_text, timeout=timeout)
def assert_link_text(self, link_text, timeout=None):
"""Similar to wait_for_link_text_visible(), but returns nothing.
As above, will raise an exception if nothing can be found.
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_link_text_visible(link_text, timeout=timeout)
if self.demo_mode:
a_t = "ASSERT LINK TEXT"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert_link_text(self._language)
messenger_post = "%s: {%s}" % (a_t, link_text)
self.__highlight_with_assert_success(
messenger_post, link_text, by=By.LINK_TEXT
)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["as_lt", link_text, origin, time_stamp]
self.__extra_actions.append(action)
return True
def wait_for_partial_link_text(self, partial_link_text, timeout=None):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_element_visible(
partial_link_text, by=By.PARTIAL_LINK_TEXT, timeout=timeout
)
def find_partial_link_text(self, partial_link_text, timeout=None):
""" Same as wait_for_partial_link_text() - returns the element """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_partial_link_text(
partial_link_text, timeout=timeout
)
def assert_partial_link_text(self, partial_link_text, timeout=None):
"""Similar to wait_for_partial_link_text(), but returns nothing.
As above, will raise an exception if nothing can be found.
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_partial_link_text(partial_link_text, timeout=timeout)
if self.demo_mode:
a_t = "ASSERT PARTIAL LINK TEXT"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert_link_text(self._language)
messenger_post = "%s: {%s}" % (a_t, partial_link_text)
self.__highlight_with_assert_success(
messenger_post, partial_link_text, by=By.PARTIAL_LINK_TEXT
)
return True
############
def wait_for_element_absent(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""Waits for an element to no longer appear in the HTML of a page.
A hidden element counts as a present element, which fails this assert.
If waiting for elements to be hidden instead of nonexistent,
use wait_for_element_not_visible() instead.
"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.wait_for_element_absent(
self.driver, selector, by, timeout
)
def assert_element_absent(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""Similar to wait_for_element_absent()
As above, will raise an exception if the element stays present.
A hidden element counts as a present element, which fails this assert.
If you want to assert that elements are hidden instead of nonexistent,
use assert_element_not_visible() instead.
(Note that hidden elements are still present in the HTML of the page.)
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_element_absent(selector, by=by, timeout=timeout)
return True
############
def wait_for_element_not_visible(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""Waits for an element to no longer be visible on a page.
The element can be non-existent in the HTML or hidden on the page
to qualify as not visible."""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.wait_for_element_not_visible(
self.driver, selector, by, timeout
)
def assert_element_not_visible(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""Similar to wait_for_element_not_visible()
As above, will raise an exception if the element stays visible.
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_element_not_visible(selector, by=by, timeout=timeout)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["asenv", selector, origin, time_stamp]
self.__extra_actions.append(action)
return True
############
def wait_for_text_not_visible(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.wait_for_text_not_visible(
self.driver, text, selector, by, timeout
)
def assert_text_not_visible(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
"""Similar to wait_for_text_not_visible()
Raises an exception if the text is still visible after timeout.
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_text_not_visible(
text, selector, by=by, timeout=timeout
)
############
def wait_for_attribute_not_present(
self, selector, attribute, value=None, by=By.CSS_SELECTOR, timeout=None
):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.wait_for_attribute_not_present(
self.driver, selector, attribute, value, by, timeout
)
def assert_attribute_not_present(
self, selector, attribute, value=None, by=By.CSS_SELECTOR, timeout=None
):
"""Similar to wait_for_attribute_not_present()
Raises an exception if the attribute is still present after timeout.
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_attribute_not_present(
selector, attribute, value=value, by=by, timeout=timeout
)
############
def wait_for_and_accept_alert(self, timeout=None):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return page_actions.wait_for_and_accept_alert(self.driver, timeout)
def wait_for_and_dismiss_alert(self, timeout=None):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return page_actions.wait_for_and_dismiss_alert(self.driver, timeout)
def wait_for_and_switch_to_alert(self, timeout=None):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return page_actions.wait_for_and_switch_to_alert(self.driver, timeout)
############
def accept_alert(self, timeout=None):
""" Same as wait_for_and_accept_alert(), but smaller default T_O """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return page_actions.wait_for_and_accept_alert(self.driver, timeout)
def dismiss_alert(self, timeout=None):
""" Same as wait_for_and_dismiss_alert(), but smaller default T_O """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return page_actions.wait_for_and_dismiss_alert(self.driver, timeout)
def switch_to_alert(self, timeout=None):
""" Same as wait_for_and_switch_to_alert(), but smaller default T_O """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return page_actions.wait_for_and_switch_to_alert(self.driver, timeout)
############
def __assert_eq(self, *args, **kwargs):
""" Minified assert_equal() using only the list diff. """
minified_exception = None
try:
self.assertEqual(*args, **kwargs)
except Exception as e:
str_e = str(e)
minified_exception = "\nAssertionError:\n"
lines = str_e.split("\n")
countdown = 3
countdown_on = False
first_differing = False
skip_lines = False
for line in lines:
if countdown_on:
if not skip_lines:
minified_exception += line + "\n"
countdown = countdown - 1
if countdown == 0:
countdown_on = False
skip_lines = False
elif line.startswith("First differing"):
first_differing = True
countdown_on = True
countdown = 3
minified_exception += line + "\n"
elif line.startswith("First list"):
countdown_on = True
countdown = 3
if not first_differing:
minified_exception += line + "\n"
else:
skip_lines = True
elif line.startswith("F"):
countdown_on = True
countdown = 3
minified_exception += line + "\n"
elif line.startswith("+") or line.startswith("-"):
minified_exception += line + "\n"
elif line.startswith("?"):
minified_exception += line + "\n"
elif line.strip().startswith("*"):
minified_exception += line + "\n"
if minified_exception:
raise Exception(minified_exception)
def __process_visual_baseline_logs(self):
""" Save copies of baseline PNGs in "./latest_logs" during failures.
Also create a side_by_side.html file for visual comparisons. """
test_logpath = os.path.join(self.log_path, self.__get_test_id())
for baseline_copy_tuple in self.__visual_baseline_copies:
baseline_path = baseline_copy_tuple[0]
baseline_copy_name = baseline_copy_tuple[1]
b_c_alt_name = baseline_copy_tuple[2]
latest_png_path = baseline_copy_tuple[3]
latest_copy_name = baseline_copy_tuple[4]
l_c_alt_name = baseline_copy_tuple[5]
baseline_copy_path = os.path.join(test_logpath, baseline_copy_name)
b_c_alt_path = os.path.join(test_logpath, b_c_alt_name)
latest_copy_path = os.path.join(test_logpath, latest_copy_name)
l_c_alt_path = os.path.join(test_logpath, l_c_alt_name)
if len(self.__visual_baseline_copies) == 1:
baseline_copy_path = b_c_alt_path
latest_copy_path = l_c_alt_path
if (
os.path.exists(baseline_path)
and not os.path.exists(baseline_copy_path)
):
self.__create_log_path_as_needed(test_logpath)
shutil.copy(baseline_path, baseline_copy_path)
if (
os.path.exists(latest_png_path)
and not os.path.exists(latest_copy_path)
):
self.__create_log_path_as_needed(test_logpath)
shutil.copy(latest_png_path, latest_copy_path)
if len(self.__visual_baseline_copies) != 1:
return # Only possible when deferred visual asserts are used
head = (
'<head><meta charset="utf-8">'
'<meta name="viewport" content="shrink-to-fit=no">'
'<link rel="shortcut icon" href="%s">'
"<title>Visual Comparison</title>"
"</head>"
% (constants.SideBySide.SIDE_BY_SIDE_PNG)
)
table_html = (
'<table border="3px solid #E6E6E6;" width="100%;" padding: 12px;'
' font-size="16px;" text-align="left;" id="results-table"'
' style="background-color: #FAFAFA;">'
'<thead id="results-table-head">'
'<tr>'
'<th style="background-color: rgba(0, 128, 0, 0.25);"'
' col="baseline">Baseline Screenshot</th>'
'<th style="background-color: rgba(128, 0, 0, 0.25);"'
' col="failure">Visual Diff Failure Screenshot</th>'
"</tr></thead>"
)
row = (
'<tbody class="compare results-table-row">'
'<tr style="background-color: #F4F4FE;">'
'<td><img src="%s" width="100%%" /></td>'
'<td><img src="%s" width="100%%" /></td>'
"</tr></tbody>"
"" % ("baseline.png", "baseline_diff.png")
)
header_text = "SeleniumBase Visual Comparison"
header = '<h3 align="center">%s</h3>' % header_text
table_html += row
table_html += "</table>"
footer = "<br /><b>Last updated:</b> "
timestamp, the_date, the_time = log_helper.get_master_time()
last_updated = "%s at %s" % (the_date, the_time)
footer = footer + "%s" % last_updated
gen_by = (
'<p><div>Generated by: <b><a href="https://seleniumbase.io/">'
"SeleniumBase</a></b></div></p><p></p>"
)
footer = footer + gen_by
the_html = (
'<html lang="en">'
+ head
+ '<body style="background-color: #FCFCF4;">'
+ header
+ table_html
+ footer
+ "</body>"
)
file_path = os.path.join(test_logpath, constants.SideBySide.HTML_FILE)
out_file = codecs.open(file_path, "w+", encoding="utf-8")
out_file.writelines(the_html)
out_file.close()
def check_window(
self,
name="default",
level=0,
baseline=False,
check_domain=True,
full_diff=False,
):
"""*** Automated Visual Testing with SeleniumBase ***
The first time a test calls self.check_window() for a unique "name"
parameter provided, it will set a visual baseline, meaning that it
creates a folder, saves the URL to a file, saves the current window
screenshot to a file, and creates the following three files
with the listed data saved:
tags_level1.txt -> HTML tags from the window
tags_level2.txt -> HTML tags + attributes from the window
tags_level3.txt -> HTML tags + attributes/values from the window
Baseline folders are named based on the test name and the name
parameter passed to self.check_window(). The same test can store
multiple baseline folders.
If the baseline is being set/reset, the "level" doesn't matter.
After the first run of self.check_window(), it will compare the
HTML tags of the latest window to the one from the initial run.
Here's how the level system works:
* level=0 ->
DRY RUN ONLY - Will perform comparisons to the baseline (and
print out any differences that are found) but
won't fail the test even if differences exist.
* level=1 ->
HTML tags are compared to tags_level1.txt
* level=2 ->
HTML tags are compared to tags_level1.txt and
HTML tags/attributes are compared to tags_level2.txt
* level=3 ->
HTML tags are compared to tags_level1.txt and
HTML tags + attributes are compared to tags_level2.txt and
HTML tags + attributes/values are compared to tags_level3.txt
As shown, Level-3 is the most strict, Level-1 is the least strict.
If the comparisons from the latest window to the existing baseline
don't match, the current test will fail, except for Level-0 tests.
You can reset the visual baseline on the command line by using:
--visual_baseline
As long as "--visual_baseline" is used on the command line while
running tests, the self.check_window() method cannot fail because
it will rebuild the visual baseline rather than comparing the html
tags of the latest run to the existing baseline. If there are any
expected layout changes to a website that you're testing, you'll
need to reset the baseline to prevent unnecessary failures.
self.check_window() will fail with "Page Domain Mismatch Failure"
if the page domain doesn't match the domain of the baseline,
unless "check_domain" is set to False when calling check_window().
If you want to use self.check_window() to compare a web page to
a later version of itself from within the same test run, you can
add the parameter "baseline=True" to the first time you call
self.check_window() in a test to use that as the baseline. This
only makes sense if you're calling self.check_window() more than
once with the same name parameter in the same test.
If "full_diff" is set to False, the error output will only
include the first differing element in the list comparison.
Set "full_diff" to True if you want to see the full output.
Automated Visual Testing with self.check_window() is not very
effective for websites that have dynamic content that changes
the layout and structure of web pages. For those, you're much
better off using regular SeleniumBase functional testing.
Example usage:
self.check_window(name="testing", level=0)
self.check_window(name="xkcd_home", level=1)
self.check_window(name="github_page", level=2)
self.check_window(name="wikipedia_page", level=3)
"""
self.wait_for_ready_state_complete()
if level == "0":
level = 0
if level == "1":
level = 1
if level == "2":
level = 2
if level == "3":
level = 3
if level != 0 and level != 1 and level != 2 and level != 3:
raise Exception('Parameter "level" must be set to 0, 1, 2, or 3!')
if self.demo_mode:
message = (
"WARNING: Using check_window() from Demo Mode may lead "
"to unexpected results caused by Demo Mode HTML changes."
)
logging.info(message)
test_id = self.__get_display_id().split("::")[-1]
if not name or len(name) < 1:
name = "default"
name = str(name)
from seleniumbase.core import visual_helper
visual_helper.visual_baseline_folder_setup()
baseline_dir = constants.VisualBaseline.STORAGE_FOLDER
visual_baseline_path = baseline_dir + "/" + test_id + "/" + name
page_url_file = visual_baseline_path + "/page_url.txt"
baseline_png = "baseline.png"
baseline_png_path = visual_baseline_path + "/%s" % baseline_png
latest_png = "latest.png"
latest_png_path = visual_baseline_path + "/%s" % latest_png
level_1_file = visual_baseline_path + "/tags_level_1.txt"
level_2_file = visual_baseline_path + "/tags_level_2.txt"
level_3_file = visual_baseline_path + "/tags_level_3.txt"
set_baseline = False
if baseline or self.visual_baseline:
set_baseline = True
if not os.path.exists(visual_baseline_path):
set_baseline = True
try:
os.makedirs(visual_baseline_path)
except Exception:
pass # Only reachable during multi-threaded test runs
if not os.path.exists(page_url_file):
set_baseline = True
if not os.path.exists(baseline_png_path):
set_baseline = True
if not os.path.exists(level_1_file):
set_baseline = True
if not os.path.exists(level_2_file):
set_baseline = True
if not os.path.exists(level_3_file):
set_baseline = True
page_url = self.get_current_url()
soup = self.get_beautiful_soup()
html_tags = soup.body.find_all()
level_1 = [[tag.name] for tag in html_tags]
level_1 = json.loads(json.dumps(level_1)) # Tuples become lists
level_2 = [[tag.name, sorted(tag.attrs.keys())] for tag in html_tags]
level_2 = json.loads(json.dumps(level_2)) # Tuples become lists
level_3 = [[tag.name, sorted(tag.attrs.items())] for tag in html_tags]
level_3 = json.loads(json.dumps(level_3)) # Tuples become lists
if set_baseline:
self.save_screenshot(
baseline_png, visual_baseline_path, selector="body"
)
out_file = codecs.open(page_url_file, "w+", encoding="utf-8")
out_file.writelines(page_url)
out_file.close()
out_file = codecs.open(level_1_file, "w+", encoding="utf-8")
out_file.writelines(json.dumps(level_1))
out_file.close()
out_file = codecs.open(level_2_file, "w+", encoding="utf-8")
out_file.writelines(json.dumps(level_2))
out_file.close()
out_file = codecs.open(level_3_file, "w+", encoding="utf-8")
out_file.writelines(json.dumps(level_3))
out_file.close()
baseline_path = os.path.join(visual_baseline_path, baseline_png)
baseline_copy_name = "baseline_%s.png" % name
b_c_alt_name = "baseline.png"
latest_copy_name = "baseline_diff_%s.png" % name
l_c_alt_name = "baseline_diff.png"
baseline_copy_tuple = (
baseline_path, baseline_copy_name, b_c_alt_name,
latest_png_path, latest_copy_name, l_c_alt_name,
)
self.__visual_baseline_copies.append(baseline_copy_tuple)
if not set_baseline:
self.save_screenshot(
latest_png, visual_baseline_path, selector="body"
)
f = open(page_url_file, "r")
page_url_data = f.read().strip()
f.close()
f = open(level_1_file, "r")
level_1_data = json.loads(f.read())
f.close()
f = open(level_2_file, "r")
level_2_data = json.loads(f.read())
f.close()
f = open(level_3_file, "r")
level_3_data = json.loads(f.read())
f.close()
domain_fail = (
"\n*\nPage Domain Mismatch Failure: "
"Current Page Domain doesn't match the Page Domain of the "
"Baseline! Can't compare two completely different sites! "
"Run with --visual_baseline to reset the baseline!"
)
level_1_failure = (
"\n*\n*** Exception: <Level 1> Visual Diff Failure:\n"
"* HTML tags don't match the baseline!"
)
level_2_failure = (
"\n*\n*** Exception: <Level 2> Visual Diff Failure:\n"
"* HTML tag attribute names don't match the baseline!"
)
level_3_failure = (
"\n*\n*** Exception: <Level 3> Visual Diff Failure:\n"
"* HTML tag attribute values don't match the baseline!"
)
page_domain = self.get_domain_url(page_url)
page_data_domain = self.get_domain_url(page_url_data)
unittest.TestCase.maxDiff = 3200
if level != 0 and check_domain:
self.assertEqual(page_data_domain, page_domain, domain_fail)
unittest.TestCase.maxDiff = 6400 # Use `None` for no limit
if level == 3:
if not full_diff:
self.__assert_eq(level_3_data, level_3, level_3_failure)
else:
self.assertEqual(level_3_data, level_3, level_3_failure)
unittest.TestCase.maxDiff = 3200
if level == 2:
if not full_diff:
self.__assert_eq(level_2_data, level_2, level_2_failure)
else:
self.assertEqual(level_2_data, level_2, level_2_failure)
if level == 1:
if not full_diff:
self.__assert_eq(level_1_data, level_1, level_1_failure)
else:
self.assertEqual(level_1_data, level_1, level_1_failure)
unittest.TestCase.maxDiff = 6400 # Use `None` for no limit
if level == 0:
try:
unittest.TestCase.maxDiff = 3200
if check_domain:
self.assertEqual(
page_domain, page_data_domain, domain_fail
)
try:
if not full_diff:
self.__assert_eq(
level_1_data, level_1, level_1_failure
)
else:
self.assertEqual(
level_1_data, level_1, level_1_failure
)
except Exception as e:
print(e)
try:
if not full_diff:
self.__assert_eq(
level_2_data, level_2, level_2_failure
)
else:
self.assertEqual(
level_2_data, level_2, level_2_failure
)
except Exception as e:
print(e)
unittest.TestCase.maxDiff = 6400 # Use `None` for no limit
if not full_diff:
self.__assert_eq(
level_3_data, level_3, level_3_failure
)
else:
self.assertEqual(
level_3_data, level_3, level_3_failure
)
except Exception as e:
print(e) # Level-0 Dry Run (Only print the differences)
unittest.TestCase.maxDiff = None # Reset unittest.TestCase.maxDiff
# Since the check passed, do not save an extra copy of the baseline
del self.__visual_baseline_copies[-1] # .pop() returns the element
############
def __get_new_timeout(self, timeout):
""" When using --timeout_multiplier=#.# """
import math
self.__check_scope()
try:
timeout_multiplier = float(self.timeout_multiplier)
if timeout_multiplier <= 0.5:
timeout_multiplier = 0.5
timeout = int(math.ceil(timeout_multiplier * timeout))
return timeout
except Exception:
# Wrong data type for timeout_multiplier (expecting int or float)
return timeout
############
def __check_scope(self):
if hasattr(self, "browser"): # self.browser stores the type of browser
return # All good: setUp() already initialized variables in "self"
else:
from seleniumbase.common.exceptions import OutOfScopeException
message = (
"\n It looks like you are trying to call a SeleniumBase method"
"\n from outside the scope of your test class's `self` object,"
"\n which is initialized by calling BaseCase's setUp() method."
"\n The `self` object is where all test variables are defined."
"\n If you created a custom setUp() method (that overrided the"
"\n the default one), make sure to call super().setUp() in it."
"\n When using page objects, be sure to pass the `self` object"
"\n from your test class into your page object methods so that"
"\n they can call BaseCase class methods with all the required"
"\n variables, which are initialized during the setUp() method"
"\n that runs automatically before all tests called by pytest."
)
raise OutOfScopeException(message)
############
def __check_browser(self):
"""This method raises an exception if the window was already closed."""
active_window = None
try:
active_window = self.driver.current_window_handle # Fails if None
except Exception:
pass
if not active_window:
raise NoSuchWindowException("Active window was already closed!")
############
def __get_exception_message(self):
"""This method extracts the message from an exception if there
was an exception that occurred during the test, assuming
that the exception was in a try/except block and not thrown."""
exception_info = sys.exc_info()[1]
if hasattr(exception_info, "msg"):
exc_message = exception_info.msg
elif hasattr(exception_info, "message"):
exc_message = exception_info.message
else:
exc_message = sys.exc_info()
return exc_message
def __add_deferred_assert_failure(self):
""" Add a deferred_assert failure to a list for future processing. """
self.__check_scope()
current_url = self.driver.current_url
message = self.__get_exception_message()
self.__deferred_assert_failures.append(
"CHECK #%s: (%s) %s\n"
% (self.__deferred_assert_count, current_url, message)
)
############
def deferred_assert_element(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""A non-terminating assertion for an element on a page.
Failures will be saved until the process_deferred_asserts()
method is called from inside a test, likely at the end of it."""
self.__check_scope()
if not timeout:
timeout = settings.MINI_TIMEOUT
if self.timeout_multiplier and timeout == settings.MINI_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.__deferred_assert_count += 1
try:
url = self.get_current_url()
if url == self.__last_url_of_deferred_assert:
timeout = 1 # Was already on page (full wait not needed)
else:
self.__last_url_of_deferred_assert = url
except Exception:
pass
try:
self.wait_for_element_visible(selector, by=by, timeout=timeout)
return True
except Exception:
self.__add_deferred_assert_failure()
return False
def deferred_assert_text(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
"""A non-terminating assertion for text from an element on a page.
Failures will be saved until the process_deferred_asserts()
method is called from inside a test, likely at the end of it."""
self.__check_scope()
if not timeout:
timeout = settings.MINI_TIMEOUT
if self.timeout_multiplier and timeout == settings.MINI_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.__deferred_assert_count += 1
try:
url = self.get_current_url()
if url == self.__last_url_of_deferred_assert:
timeout = 1 # Was already on page (full wait not needed)
else:
self.__last_url_of_deferred_assert = url
except Exception:
pass
try:
self.wait_for_text_visible(text, selector, by=by, timeout=timeout)
return True
except Exception:
self.__add_deferred_assert_failure()
return False
def deferred_assert_exact_text(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
"""A non-terminating assertion for exact text from an element.
Failures will be saved until the process_deferred_asserts()
method is called from inside a test, likely at the end of it."""
self.__check_scope()
if not timeout:
timeout = settings.MINI_TIMEOUT
if self.timeout_multiplier and timeout == settings.MINI_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.__deferred_assert_count += 1
try:
url = self.get_current_url()
if url == self.__last_url_of_deferred_assert:
timeout = 1 # Was already on page (full wait not needed)
else:
self.__last_url_of_deferred_assert = url
except Exception:
pass
try:
self.wait_for_exact_text_visible(
text, selector, by=by, timeout=timeout
)
return True
except Exception:
self.__add_deferred_assert_failure()
return False
def deferred_check_window(
self,
name="default",
level=0,
baseline=False,
check_domain=True,
full_diff=False,
):
"""A non-terminating assertion for the check_window() method.
Failures will be saved until the process_deferred_asserts()
method is called from inside a test, likely at the end of it."""
self.__check_scope()
self.__deferred_assert_count += 1
try:
self.check_window(
name=name,
level=level,
baseline=baseline,
check_domain=check_domain,
full_diff=full_diff,
)
return True
except Exception:
self.__add_deferred_assert_failure()
return False
def process_deferred_asserts(self, print_only=False):
"""To be used with any test that uses deferred_asserts, which are
non-terminating verifications that only raise exceptions
after this method is called.
This is useful for pages with multiple elements to be checked when
you want to find as many bugs as possible in a single test run
before having all the exceptions get raised simultaneously.
Might be more useful if this method is called after processing all
the deferred asserts on a single html page so that the failure
screenshot matches the location of the deferred asserts.
If "print_only" is set to True, the exception won't get raised."""
if self.__deferred_assert_failures:
exception_output = ""
exception_output += "\n***** DEFERRED ASSERTION FAILURES:\n"
exception_output += "TEST: %s\n\n" % self.id()
all_failing_checks = self.__deferred_assert_failures
self.__deferred_assert_failures = []
for tb in all_failing_checks:
exception_output += "%s\n" % tb
if print_only:
print(exception_output)
else:
raise Exception(exception_output.replace("\\n", "\n"))
############
# Alternate naming scheme for the "deferred_assert" methods.
def delayed_assert_element(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
""" Same as self.deferred_assert_element() """
return self.deferred_assert_element(
selector=selector, by=by, timeout=timeout
)
def delayed_assert_text(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
""" Same as self.deferred_assert_text() """
return self.deferred_assert_text(
text=text, selector=selector, by=by, timeout=timeout
)
def delayed_assert_exact_text(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
""" Same as self.deferred_assert_exact_text() """
return self.deferred_assert_exact_text(
text=text, selector=selector, by=by, timeout=timeout
)
def delayed_check_window(
self,
name="default",
level=0,
baseline=False,
check_domain=True,
full_diff=False
):
""" Same as self.deferred_check_window() """
return self.deferred_check_window(
name=name,
level=level,
baseline=baseline,
check_domain=check_domain,
full_diff=full_diff,
)
def process_delayed_asserts(self, print_only=False):
""" Same as self.process_deferred_asserts() """
self.process_deferred_asserts(print_only=print_only)
############
def __js_click(self, selector, by=By.CSS_SELECTOR):
""" Clicks an element using pure JS. Does not use jQuery. """
selector, by = self.__recalculate_selector(selector, by)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
script = (
"""var simulateClick = function (elem) {
var evt = new MouseEvent('click', {
bubbles: true,
cancelable: true,
view: window
});
var canceled = !elem.dispatchEvent(evt);
};
var someLink = document.querySelector('%s');
simulateClick(someLink);"""
% css_selector
)
self.execute_script(script)
def __js_click_all(self, selector, by=By.CSS_SELECTOR):
""" Clicks all matching elements using pure JS. (No jQuery) """
selector, by = self.__recalculate_selector(selector, by)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
script = (
"""var simulateClick = function (elem) {
var evt = new MouseEvent('click', {
bubbles: true,
cancelable: true,
view: window
});
var canceled = !elem.dispatchEvent(evt);
};
var $elements = document.querySelectorAll('%s');
var index = 0, length = $elements.length;
for(; index < length; index++){
simulateClick($elements[index]);}"""
% css_selector
)
self.execute_script(script)
def __jquery_slow_scroll_to(self, selector, by=By.CSS_SELECTOR):
selector, by = self.__recalculate_selector(selector, by)
element = self.wait_for_element_present(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
dist = js_utils.get_scroll_distance_to_element(self.driver, element)
time_offset = 0
try:
if dist and abs(dist) > constants.Values.SSMD:
time_offset = int(
float(abs(dist) - constants.Values.SSMD) / 12.5
)
if time_offset > 950:
time_offset = 950
except Exception:
time_offset = 0
scroll_time_ms = 550 + time_offset
sleep_time = 0.625 + (float(time_offset) / 1000.0)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
scroll_script = (
"""jQuery([document.documentElement, document.body]).animate({"""
"""scrollTop: jQuery('%s').offset().top - 130}, %s);"""
% (selector, scroll_time_ms)
)
if js_utils.is_jquery_activated(self.driver):
self.execute_script(scroll_script)
else:
self.__slow_scroll_to_element(element)
self.sleep(sleep_time)
def __jquery_click(self, selector, by=By.CSS_SELECTOR):
""" Clicks an element using jQuery. Different from using pure JS. """
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_element_present(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
click_script = """jQuery('%s')[0].click();""" % selector
self.safe_execute_script(click_script)
def __get_major_browser_version(self):
try:
version = self.driver.__dict__["caps"]["browserVersion"]
except Exception:
try:
version = self.driver.__dict__["caps"]["version"]
except Exception:
version = str(
self.driver.__dict__["capabilities"]["version"]
)
self.driver.__dict__["caps"]["browserVersion"] = version
major_browser_version = version.split(".")[0]
return major_browser_version
def __get_href_from_link_text(self, link_text, hard_fail=True):
href = self.get_link_attribute(link_text, "href", hard_fail)
if not href:
return None
if href.startswith("//"):
link = "http:" + href
elif href.startswith("/"):
url = self.driver.current_url
domain_url = self.get_domain_url(url)
link = domain_url + href
else:
link = href
return link
def __click_dropdown_link_text(self, link_text, link_css):
""" When a link may be hidden under a dropdown menu, use this. """
soup = self.get_beautiful_soup()
drop_down_list = []
for item in soup.select("li[class]"):
drop_down_list.append(item)
csstype = link_css.split("[")[1].split("=")[0]
for item in drop_down_list:
item_text_list = item.text.split("\n")
if link_text in item_text_list and csstype in item.decode():
dropdown_css = ""
try:
for css_class in item["class"]:
dropdown_css += "."
dropdown_css += css_class
except Exception:
continue
dropdown_css = item.name + dropdown_css
matching_dropdowns = self.find_visible_elements(dropdown_css)
for dropdown in matching_dropdowns:
# The same class names might be used for multiple dropdowns
if dropdown.is_displayed():
try:
try:
page_actions.hover_element(
self.driver,
dropdown,
)
except Exception:
# If hovering fails, driver is likely outdated
# Time to go directly to the hidden link text
self.open(
self.__get_href_from_link_text(link_text)
)
return True
page_actions.hover_element_and_click(
self.driver,
dropdown,
link_text,
click_by=By.LINK_TEXT,
timeout=0.12,
)
return True
except Exception:
pass
return False
def __get_href_from_partial_link_text(self, link_text, hard_fail=True):
href = self.get_partial_link_text_attribute(
link_text, "href", hard_fail
)
if not href:
return None
if href.startswith("//"):
link = "http:" + href
elif href.startswith("/"):
url = self.driver.current_url
domain_url = self.get_domain_url(url)
link = domain_url + href
else:
link = href
return link
def __click_dropdown_partial_link_text(self, link_text, link_css):
""" When a partial link may be hidden under a dropdown, use this. """
soup = self.get_beautiful_soup()
drop_down_list = []
for item in soup.select("li[class]"):
drop_down_list.append(item)
csstype = link_css.split("[")[1].split("=")[0]
for item in drop_down_list:
item_text_list = item.text.split("\n")
if link_text in item_text_list and csstype in item.decode():
dropdown_css = ""
try:
for css_class in item["class"]:
dropdown_css += "."
dropdown_css += css_class
except Exception:
continue
dropdown_css = item.name + dropdown_css
matching_dropdowns = self.find_visible_elements(dropdown_css)
for dropdown in matching_dropdowns:
# The same class names might be used for multiple dropdowns
if dropdown.is_displayed():
try:
try:
page_actions.hover_element(
self.driver, dropdown
)
except Exception:
# If hovering fails, driver is likely outdated
# Time to go directly to the hidden link text
self.open(
self.__get_href_from_partial_link_text(
link_text
)
)
return True
page_actions.hover_element_and_click(
self.driver,
dropdown,
link_text,
click_by=By.LINK_TEXT,
timeout=0.12,
)
return True
except Exception:
pass
return False
def __recalculate_selector(self, selector, by, xp_ok=True):
"""Use autodetection to return the correct selector with "by" updated.
If "xp_ok" is False, don't call convert_css_to_xpath(), which is
used to make the ":contains()" selector valid outside of JS calls."""
_type = type(selector) # First make sure the selector is a string
not_string = False
if not python3:
if _type is not str and _type is not unicode: # noqa: F821
not_string = True
else:
if _type is not str:
not_string = True
if not_string:
msg = "Expecting a selector of type: \"<class 'str'>\" (string)!"
raise Exception('Invalid selector type: "%s"\n%s' % (_type, msg))
if page_utils.is_xpath_selector(selector):
by = By.XPATH
if page_utils.is_link_text_selector(selector):
selector = page_utils.get_link_text_from_selector(selector)
by = By.LINK_TEXT
if page_utils.is_partial_link_text_selector(selector):
selector = page_utils.get_partial_link_text_from_selector(selector)
by = By.PARTIAL_LINK_TEXT
if page_utils.is_name_selector(selector):
name = page_utils.get_name_from_selector(selector)
selector = '[name="%s"]' % name
by = By.CSS_SELECTOR
if xp_ok:
if ":contains(" in selector and by == By.CSS_SELECTOR:
selector = self.convert_css_to_xpath(selector)
by = By.XPATH
return (selector, by)
def __looks_like_a_page_url(self, url):
"""Returns True if the url parameter looks like a URL. This method
is slightly more lenient than page_utils.is_valid_url(url) due to
possible typos when calling self.get(url), which will try to
navigate to the page if a URL is detected, but will instead call
self.get_element(URL_AS_A_SELECTOR) if the input in not a URL."""
if (
url.startswith("http:")
or url.startswith("https:")
or url.startswith("://")
or url.startswith("chrome:")
or url.startswith("about:")
or url.startswith("data:")
or url.startswith("file:")
or url.startswith("edge:")
or url.startswith("opera:")
or url.startswith("view-source:")
):
return True
else:
return False
def __make_css_match_first_element_only(self, selector):
# Only get the first match
return page_utils.make_css_match_first_element_only(selector)
def __demo_mode_pause_if_active(self, tiny=False):
if self.demo_mode:
wait_time = settings.DEFAULT_DEMO_MODE_TIMEOUT
if self.demo_sleep:
wait_time = float(self.demo_sleep)
if not tiny:
time.sleep(wait_time)
else:
time.sleep(wait_time / 3.4)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def __slow_mode_pause_if_active(self):
if self.slow_mode:
wait_time = settings.DEFAULT_DEMO_MODE_TIMEOUT
if self.demo_sleep:
wait_time = float(self.demo_sleep)
time.sleep(wait_time)
def __demo_mode_scroll_if_active(self, selector, by):
if self.demo_mode:
self.slow_scroll_to(selector, by=by)
def __demo_mode_highlight_if_active(self, selector, by):
if self.demo_mode:
# Includes self.slow_scroll_to(selector, by=by) by default
self.highlight(selector, by=by)
elif self.slow_mode:
# Just do the slow scroll part of the highlight() method
time.sleep(0.08)
selector, by = self.__recalculate_selector(selector, by)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
try:
scroll_distance = js_utils.get_scroll_distance_to_element(
self.driver, element
)
if abs(scroll_distance) > constants.Values.SSMD:
self.__jquery_slow_scroll_to(selector, by)
else:
self.__slow_scroll_to_element(element)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.12)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
self.__slow_scroll_to_element(element)
time.sleep(0.12)
def __scroll_to_element(self, element, selector=None, by=By.CSS_SELECTOR):
success = js_utils.scroll_to_element(self.driver, element)
if not success and selector:
self.wait_for_ready_state_complete()
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=settings.SMALL_TIMEOUT
)
self.__demo_mode_pause_if_active(tiny=True)
def __slow_scroll_to_element(self, element):
try:
js_utils.slow_scroll_to_element(self.driver, element, self.browser)
except Exception:
# Scroll to the element instantly if the slow scroll fails
js_utils.scroll_to_element(self.driver, element)
def __highlight_with_assert_success(
self, message, selector, by=By.CSS_SELECTOR
):
selector, by = self.__recalculate_selector(selector, by, xp_ok=False)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
try:
scroll_distance = js_utils.get_scroll_distance_to_element(
self.driver, element
)
if abs(scroll_distance) > constants.Values.SSMD:
self.__jquery_slow_scroll_to(selector, by)
else:
self.__slow_scroll_to_element(element)
except Exception:
self.wait_for_ready_state_complete()
time.sleep(0.12)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
self.__slow_scroll_to_element(element)
try:
selector = self.convert_to_css_selector(selector, by=by)
except Exception:
# Don't highlight if can't convert to CSS_SELECTOR
return
o_bs = "" # original_box_shadow
try:
style = element.get_attribute("style")
except Exception:
self.wait_for_ready_state_complete()
time.sleep(0.12)
element = self.wait_for_element_visible(
selector, by=By.CSS_SELECTOR, timeout=settings.SMALL_TIMEOUT
)
style = element.get_attribute("style")
if style:
if "box-shadow: " in style:
box_start = style.find("box-shadow: ")
box_end = style.find(";", box_start) + 1
original_box_shadow = style[box_start:box_end]
o_bs = original_box_shadow
if ":contains" not in selector and ":first" not in selector:
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
self.__highlight_with_js_2(message, selector, o_bs)
else:
selector = self.__make_css_match_first_element_only(selector)
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
try:
self.__highlight_with_jquery_2(message, selector, o_bs)
except Exception:
pass # JQuery probably couldn't load. Skip highlighting.
time.sleep(0.065)
def __highlight_with_js_2(self, message, selector, o_bs):
duration = self.message_duration
if not duration:
duration = settings.DEFAULT_MESSAGE_DURATION
if (self.headless or self.xvfb) and float(duration) > 0.75:
duration = 0.75
js_utils.highlight_with_js_2(
self.driver, message, selector, o_bs, duration
)
def __highlight_with_jquery_2(self, message, selector, o_bs):
duration = self.message_duration
if not duration:
duration = settings.DEFAULT_MESSAGE_DURATION
if (self.headless or self.xvfb) and float(duration) > 0.75:
duration = 0.75
js_utils.highlight_with_jquery_2(
self.driver, message, selector, o_bs, duration
)
############
from seleniumbase.common import decorators
@decorators.deprecated("You should use re.escape() instead.")
def jq_format(self, code):
# DEPRECATED - re.escape() already performs the intended action.
return js_utils._jq_format(code)
############
def setUp(self, masterqa_mode=False):
"""
Be careful if a subclass of BaseCase overrides setUp()
You'll need to add the following line to the subclass setUp() method:
super(SubClassOfBaseCase, self).setUp()
"""
if not hasattr(self, "_using_sb_fixture") and self.__called_setup:
# This test already called setUp()
return
self.__called_setup = True
self.__called_teardown = False
self.masterqa_mode = masterqa_mode
self.is_pytest = None
try:
# This raises an exception if the test is not coming from pytest
self.is_pytest = sb_config.is_pytest
except Exception:
# Not using pytest (probably nosetests)
self.is_pytest = False
if self.is_pytest:
# pytest-specific code
test_id = self.__get_test_id()
self.test_id = test_id
if hasattr(self, "_using_sb_fixture"):
self.test_id = sb_config._test_id
self.browser = sb_config.browser
self.account = sb_config.account
self.data = sb_config.data
self.var1 = sb_config.var1
self.var2 = sb_config.var2
self.var3 = sb_config.var3
self.slow_mode = sb_config.slow_mode
self.demo_mode = sb_config.demo_mode
self.demo_sleep = sb_config.demo_sleep
self.highlights = sb_config.highlights
self.time_limit = sb_config._time_limit
sb_config.time_limit = sb_config._time_limit # Reset between tests
self.environment = sb_config.environment
self.env = self.environment # Add a shortened version
self.with_selenium = sb_config.with_selenium # Should be True
self.headless = sb_config.headless
self.headless_active = False
self.headed = sb_config.headed
self.xvfb = sb_config.xvfb
self.locale_code = sb_config.locale_code
self.interval = sb_config.interval
self.start_page = sb_config.start_page
self.log_path = sb_config.log_path
self.with_testing_base = sb_config.with_testing_base
self.with_basic_test_info = sb_config.with_basic_test_info
self.with_screen_shots = sb_config.with_screen_shots
self.with_page_source = sb_config.with_page_source
self.with_db_reporting = sb_config.with_db_reporting
self.with_s3_logging = sb_config.with_s3_logging
self.protocol = sb_config.protocol
self.servername = sb_config.servername
self.port = sb_config.port
self.proxy_string = sb_config.proxy_string
self.proxy_bypass_list = sb_config.proxy_bypass_list
self.user_agent = sb_config.user_agent
self.mobile_emulator = sb_config.mobile_emulator
self.device_metrics = sb_config.device_metrics
self.cap_file = sb_config.cap_file
self.cap_string = sb_config.cap_string
self.settings_file = sb_config.settings_file
self.database_env = sb_config.database_env
self.message_duration = sb_config.message_duration
self.js_checking_on = sb_config.js_checking_on
self.ad_block_on = sb_config.ad_block_on
self.block_images = sb_config.block_images
self.chromium_arg = sb_config.chromium_arg
self.firefox_arg = sb_config.firefox_arg
self.firefox_pref = sb_config.firefox_pref
self.verify_delay = sb_config.verify_delay
self.recorder_mode = sb_config.recorder_mode
self.recorder_ext = sb_config.recorder_mode
self.disable_csp = sb_config.disable_csp
self.disable_ws = sb_config.disable_ws
self.enable_ws = sb_config.enable_ws
if not self.disable_ws:
self.enable_ws = True
self.enable_sync = sb_config.enable_sync
self.use_auto_ext = sb_config.use_auto_ext
self.no_sandbox = sb_config.no_sandbox
self.disable_gpu = sb_config.disable_gpu
self.incognito = sb_config.incognito
self.guest_mode = sb_config.guest_mode
self.devtools = sb_config.devtools
self.remote_debug = sb_config.remote_debug
self._multithreaded = sb_config._multithreaded
self._reuse_session = sb_config.reuse_session
self._crumbs = sb_config.crumbs
self.dashboard = sb_config.dashboard
self._dash_initialized = sb_config._dashboard_initialized
if self.dashboard and self._multithreaded:
import fasteners
self.dash_lock = fasteners.InterProcessLock(
constants.Dashboard.LOCKFILE
)
self.swiftshader = sb_config.swiftshader
self.user_data_dir = sb_config.user_data_dir
self.extension_zip = sb_config.extension_zip
self.extension_dir = sb_config.extension_dir
self.external_pdf = sb_config.external_pdf
self.maximize_option = sb_config.maximize_option
self.save_screenshot_after_test = sb_config.save_screenshot
self.visual_baseline = sb_config.visual_baseline
self.timeout_multiplier = sb_config.timeout_multiplier
self.pytest_html_report = sb_config.pytest_html_report
self.report_on = False
if self.pytest_html_report:
self.report_on = True
self.use_grid = False
if self.servername != "localhost":
# Use Selenium Grid (Use --server="127.0.0.1" for a local Grid)
self.use_grid = True
if self.with_db_reporting:
import getpass
import uuid
from seleniumbase.core.application_manager import (
ApplicationManager,
)
from seleniumbase.core.testcase_manager import (
ExecutionQueryPayload,
)
from seleniumbase.core.testcase_manager import (
TestcaseDataPayload,
)
from seleniumbase.core.testcase_manager import TestcaseManager
self.execution_guid = str(uuid.uuid4())
self.testcase_guid = None
self.execution_start_time = 0
self.case_start_time = 0
self.application = None
self.testcase_manager = None
self.error_handled = False
self.testcase_manager = TestcaseManager(self.database_env)
#
exec_payload = ExecutionQueryPayload()
exec_payload.execution_start_time = int(time.time() * 1000)
self.execution_start_time = exec_payload.execution_start_time
exec_payload.guid = self.execution_guid
exec_payload.username = getpass.getuser()
self.testcase_manager.insert_execution_data(exec_payload)
#
data_payload = TestcaseDataPayload()
self.testcase_guid = str(uuid.uuid4())
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
if self.with_selenium:
data_payload.browser = self.browser
else:
data_payload.browser = "N/A"
data_payload.test_address = test_id
application = ApplicationManager.generate_application_string(
self._testMethodName
)
data_payload.env = application.split(".")[0]
data_payload.start_time = application.split(".")[1]
data_payload.state = constants.State.UNTESTED
self.__skip_reason = None
self.testcase_manager.insert_testcase_data(data_payload)
self.case_start_time = int(time.time() * 1000)
if self.headless or self.xvfb:
width = settings.HEADLESS_START_WIDTH
height = settings.HEADLESS_START_HEIGHT
try:
# from pyvirtualdisplay import Display # Skip for own lib
from sbvirtualdisplay import Display
self.display = Display(visible=0, size=(width, height))
self.display.start()
self.headless_active = True
except Exception:
# pyvirtualdisplay might not be necessary anymore because
# Chrome and Firefox now have built-in headless displays
pass
else:
# (Nosetests / Not Pytest)
pass # Setup performed in plugins
# Verify that SeleniumBase is installed successfully
if not hasattr(self, "browser"):
raise Exception(
'SeleniumBase plugins DID NOT load! * Please REINSTALL!\n'
'*** Either install SeleniumBase in Dev Mode from a clone:\n'
' >>> "pip install -e ." (Run in DIR with setup.py)\n'
'*** Or install the latest SeleniumBase version from PyPI:\n'
' >>> "pip install -U seleniumbase" (Run in any DIR)'
)
if not hasattr(sb_config, "_is_timeout_changed"):
# Should only be reachable from pure Python runs
sb_config._is_timeout_changed = False
sb_config._SMALL_TIMEOUT = settings.SMALL_TIMEOUT
sb_config._LARGE_TIMEOUT = settings.LARGE_TIMEOUT
if sb_config._is_timeout_changed:
if sb_config._SMALL_TIMEOUT and sb_config._LARGE_TIMEOUT:
settings.SMALL_TIMEOUT = sb_config._SMALL_TIMEOUT
settings.LARGE_TIMEOUT = sb_config._LARGE_TIMEOUT
if not hasattr(sb_config, "_recorded_actions"):
# Only filled when Recorder Mode is enabled
sb_config._recorded_actions = {}
if not hasattr(settings, "SWITCH_TO_NEW_TABS_ON_CLICK"):
# If using an older settings file, set the new definitions manually
settings.SWITCH_TO_NEW_TABS_ON_CLICK = True
# Parse the settings file
if self.settings_file:
from seleniumbase.core import settings_parser
settings_parser.set_settings(self.settings_file)
# Set variables that may be useful to developers
self.log_abspath = os.path.abspath(self.log_path)
self.data_path = os.path.join(self.log_path, self.__get_test_id())
self.data_abspath = os.path.abspath(self.data_path)
# Mobile Emulator device metrics: CSS Width, CSS Height, & Pixel-Ratio
if self.device_metrics:
metrics_string = self.device_metrics
metrics_string = metrics_string.replace(" ", "")
metrics_list = metrics_string.split(",")
exception_string = (
"Invalid input for Mobile Emulator device metrics!\n"
"Expecting a comma-separated string with three\n"
"integer values for Width, Height, and Pixel-Ratio.\n"
'Example: --metrics="411,731,3" '
)
if len(metrics_list) != 3:
raise Exception(exception_string)
try:
self.__device_width = int(metrics_list[0])
self.__device_height = int(metrics_list[1])
self.__device_pixel_ratio = int(metrics_list[2])
self.mobile_emulator = True
except Exception:
raise Exception(exception_string)
if self.mobile_emulator:
if not self.user_agent:
# Use the Pixel 4 user agent by default if not specified
self.user_agent = (
"Mozilla/5.0 (Linux; Android 11; Pixel 4 XL) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/89.0.4389.105 Mobile Safari/537.36"
)
if self.browser in ["firefox", "ie", "safari", "opera"]:
# The Recorder Mode browser extension is only for Chrome/Edge.
if self.recorder_mode:
message = (
"Recorder Mode ONLY supports Chrome and Edge!\n"
'(Your browser choice was: "%s")' % self.browser)
raise Exception(message)
# Dashboard pre-processing:
if self.dashboard:
if self._multithreaded:
with self.dash_lock:
sb_config._sbase_detected = True
sb_config._only_unittest = False
if not self._dash_initialized:
sb_config._dashboard_initialized = True
self._dash_initialized = True
self.__process_dashboard(False, init=True)
else:
sb_config._sbase_detected = True
sb_config._only_unittest = False
if not self._dash_initialized:
sb_config._dashboard_initialized = True
self._dash_initialized = True
self.__process_dashboard(False, init=True)
# Set the JS start time for Recorder Mode if reusing the session.
# Use this to skip saving recorded actions from previous tests.
if self.recorder_mode and self._reuse_session:
self.__js_start_time = int(time.time() * 1000.0)
has_url = False
if self._reuse_session:
if not hasattr(sb_config, "shared_driver"):
sb_config.shared_driver = None
if sb_config.shared_driver:
try:
self._default_driver = sb_config.shared_driver
self.driver = sb_config.shared_driver
self._drivers_list = [sb_config.shared_driver]
url = self.get_current_url()
if url is not None:
has_url = True
if len(self.driver.window_handles) > 1:
while len(self.driver.window_handles) > 1:
self.switch_to_window(
len(self.driver.window_handles) - 1
)
self.driver.close()
self.switch_to_window(0)
if self._crumbs:
self.driver.delete_all_cookies()
except Exception:
pass
if self._reuse_session and sb_config.shared_driver and has_url:
good_start_page = False
if self.recorder_ext:
self.__js_start_time = int(time.time() * 1000.0)
if self.start_page and len(self.start_page) >= 4:
if page_utils.is_valid_url(self.start_page):
good_start_page = True
self.__new_window_on_rec_open = False
self.open(self.start_page)
self.__new_window_on_rec_open = True
else:
new_start_page = "https://" + self.start_page
if page_utils.is_valid_url(new_start_page):
good_start_page = True
self.__dont_record_open = True
self.open(new_start_page)
self.__dont_record_open = False
if self.recorder_ext or (self._crumbs and not good_start_page):
if self.get_current_url() != "data:,":
self.__new_window_on_rec_open = False
self.open("data:,")
self.__new_window_on_rec_open = True
if self.recorder_ext:
self.__js_start_time = int(time.time() * 1000.0)
else:
# Launch WebDriver for both Pytest and Nosetests
self.driver = self.get_new_driver(
browser=self.browser,
headless=self.headless,
locale_code=self.locale_code,
protocol=self.protocol,
servername=self.servername,
port=self.port,
proxy=self.proxy_string,
proxy_bypass_list=self.proxy_bypass_list,
agent=self.user_agent,
switch_to=True,
cap_file=self.cap_file,
cap_string=self.cap_string,
recorder_ext=self.recorder_ext,
disable_csp=self.disable_csp,
enable_ws=self.enable_ws,
enable_sync=self.enable_sync,
use_auto_ext=self.use_auto_ext,
no_sandbox=self.no_sandbox,
disable_gpu=self.disable_gpu,
incognito=self.incognito,
guest_mode=self.guest_mode,
devtools=self.devtools,
remote_debug=self.remote_debug,
swiftshader=self.swiftshader,
ad_block_on=self.ad_block_on,
block_images=self.block_images,
chromium_arg=self.chromium_arg,
firefox_arg=self.firefox_arg,
firefox_pref=self.firefox_pref,
user_data_dir=self.user_data_dir,
extension_zip=self.extension_zip,
extension_dir=self.extension_dir,
external_pdf=self.external_pdf,
is_mobile=self.mobile_emulator,
d_width=self.__device_width,
d_height=self.__device_height,
d_p_r=self.__device_pixel_ratio,
)
self._default_driver = self.driver
if self._reuse_session:
sb_config.shared_driver = self.driver
if self.browser in ["firefox", "ie", "safari", "opera"]:
# Only Chrome and Edge browsers have the mobile emulator.
# Some actions such as hover-clicking are different on mobile.
self.mobile_emulator = False
# Configure the test time limit (if used).
self.set_time_limit(self.time_limit)
# Set the start time for the test (in ms).
# Although the pytest clock starts before setUp() begins,
# the time-limit clock starts at the end of the setUp() method.
sb_config.start_time_ms = int(time.time() * 1000.0)
if not self.__start_time_ms:
# Call this once in case of multiple setUp() calls in the same test
self.__start_time_ms = sb_config.start_time_ms
def __set_last_page_screenshot(self):
"""self.__last_page_screenshot is only for pytest html report logs.
self.__last_page_screenshot_png is for all screenshot log files."""
if not self.__last_page_screenshot and (
not self.__last_page_screenshot_png
):
try:
element = self.driver.find_element(
by=By.TAG_NAME, value="body"
)
if self.is_pytest and self.report_on:
self.__last_page_screenshot_png = (
self.driver.get_screenshot_as_png()
)
self.__last_page_screenshot = element.screenshot_as_base64
else:
self.__last_page_screenshot_png = element.screenshot_as_png
except Exception:
if not self.__last_page_screenshot:
if self.is_pytest and self.report_on:
try:
self.__last_page_screenshot = (
self.driver.get_screenshot_as_base64()
)
except Exception:
self.__last_page_screenshot = (
constants.Warnings.SCREENSHOT_UNDEFINED
)
if not self.__last_page_screenshot_png:
try:
self.__last_page_screenshot_png = (
self.driver.get_screenshot_as_png()
)
except Exception:
self.__last_page_screenshot_png = (
constants.Warnings.SCREENSHOT_UNDEFINED
)
def __set_last_page_url(self):
if not self.__last_page_url:
try:
self.__last_page_url = log_helper.get_last_page(self.driver)
except Exception:
self.__last_page_url = None
def __set_last_page_source(self):
if not self.__last_page_source:
try:
self.__last_page_source = (
log_helper.get_html_source_with_base_href(
self.driver, self.driver.page_source
)
)
except Exception:
self.__last_page_source = (
constants.Warnings.PAGE_SOURCE_UNDEFINED
)
def __get_exception_info(self):
exc_message = None
if (
python3
and hasattr(self, "_outcome")
and (hasattr(self._outcome, "errors") and self._outcome.errors)
):
try:
exc_message = self._outcome.errors[0][1][1]
except Exception:
exc_message = "(Unknown Exception)"
else:
try:
exc_message = sys.last_value
except Exception:
exc_message = "(Unknown Exception)"
return str(exc_message)
def __insert_test_result(self, state, err):
from seleniumbase.core.testcase_manager import TestcaseDataPayload
data_payload = TestcaseDataPayload()
data_payload.runtime = int(time.time() * 1000) - self.case_start_time
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
data_payload.state = state
if err:
import traceback
tb_string = traceback.format_exc()
if "Message: " in tb_string:
data_payload.message = (
"Message: " + tb_string.split("Message: ")[-1]
)
elif "Exception: " in tb_string:
data_payload.message = tb_string.split("Exception: ")[-1]
elif "Error: " in tb_string:
data_payload.message = tb_string.split("Error: ")[-1]
else:
data_payload.message = self.__get_exception_info()
else:
test_id = self.__get_test_id_2()
if (
self.is_pytest
and test_id in sb_config._results.keys()
and (sb_config._results[test_id] == "Skipped")
):
if self.__skip_reason:
data_payload.message = "Skipped: " + self.__skip_reason
else:
data_payload.message = "Skipped: (no reason given)"
self.testcase_manager.update_testcase_data(data_payload)
def __add_pytest_html_extra(self):
if not self.__added_pytest_html_extra:
try:
if self.with_selenium:
if not self.__last_page_screenshot:
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
if self.report_on:
extra_url = {}
extra_url["name"] = "URL"
extra_url["format"] = "url"
extra_url["content"] = self.get_current_url()
extra_url["mime_type"] = None
extra_url["extension"] = None
extra_image = {}
extra_image["name"] = "Screenshot"
extra_image["format"] = "image"
extra_image["content"] = self.__last_page_screenshot
extra_image["mime_type"] = "image/png"
extra_image["extension"] = "png"
self.__added_pytest_html_extra = True
if self.__last_page_screenshot != (
constants.Warnings.SCREENSHOT_UNDEFINED
):
self._html_report_extra.append(extra_url)
self._html_report_extra.append(extra_image)
except Exception:
pass
def __quit_all_drivers(self):
if self._reuse_session and sb_config.shared_driver:
if len(self._drivers_list) > 0:
if self._drivers_list[0] != sb_config.shared_driver:
if sb_config.shared_driver in self._drivers_list:
self._drivers_list.remove(sb_config.shared_driver)
self._drivers_list.insert(0, sb_config.shared_driver)
self._default_driver = self._drivers_list[0]
self.switch_to_default_driver()
if len(self._drivers_list) > 1:
self._drivers_list = self._drivers_list[1:]
else:
self._drivers_list = []
# Close all open browser windows
self._drivers_list.reverse() # Last In, First Out
for driver in self._drivers_list:
try:
driver.quit()
except AttributeError:
pass
except Exception:
pass
self.driver = None
self._default_driver = None
self._drivers_list = []
def __has_exception(self):
has_exception = False
if hasattr(sys, "last_traceback") and sys.last_traceback is not None:
has_exception = True
elif python3 and hasattr(self, "_outcome"):
if hasattr(self._outcome, "errors") and self._outcome.errors:
has_exception = True
else:
if python3:
has_exception = sys.exc_info()[1] is not None
else:
if not hasattr(self, "_using_sb_fixture_class") and (
not hasattr(self, "_using_sb_fixture_no_class")
):
has_exception = sys.exc_info()[1] is not None
else:
has_exception = len(str(sys.exc_info()[1]).strip()) > 0
if (
self.__will_be_skipped
and (hasattr(self, "_using_sb_fixture") or not python3)
):
has_exception = False
return has_exception
def __get_test_id(self):
""" The id used in various places such as the test log path. """
test_id = "%s.%s.%s" % (
self.__class__.__module__,
self.__class__.__name__,
self._testMethodName,
)
if self._sb_test_identifier and len(str(self._sb_test_identifier)) > 6:
test_id = self._sb_test_identifier
test_id = test_id.replace(".py::", ".").replace("::", ".")
return test_id
def __get_test_id_2(self):
""" The id for SeleniumBase Dashboard entries. """
if "PYTEST_CURRENT_TEST" in os.environ:
return os.environ["PYTEST_CURRENT_TEST"].split(" ")[0]
test_id = "%s.%s.%s" % (
self.__class__.__module__.split(".")[-1],
self.__class__.__name__,
self._testMethodName,
)
if self._sb_test_identifier and len(str(self._sb_test_identifier)) > 6:
test_id = self._sb_test_identifier
if test_id.count(".") > 1:
test_id = ".".join(test_id.split(".")[1:])
return test_id
def __get_display_id(self):
""" The id for running a test from pytest. (Displayed on Dashboard) """
if "PYTEST_CURRENT_TEST" in os.environ:
return os.environ["PYTEST_CURRENT_TEST"].split(" ")[0]
test_id = "%s.py::%s::%s" % (
self.__class__.__module__.replace(".", "/"),
self.__class__.__name__,
self._testMethodName,
)
if self._sb_test_identifier and len(str(self._sb_test_identifier)) > 6:
test_id = self._sb_test_identifier
if hasattr(self, "_using_sb_fixture_class"):
if test_id.count(".") >= 2:
parts = test_id.split(".")
full = parts[-3] + ".py::" + parts[-2] + "::" + parts[-1]
test_id = full
elif hasattr(self, "_using_sb_fixture_no_class"):
if test_id.count(".") >= 1:
parts = test_id.split(".")
full = parts[-2] + ".py::" + parts[-1]
test_id = full
return test_id
def __get_filename(self):
""" The filename of the current SeleniumBase test. (NOT Path) """
filename = None
if "PYTEST_CURRENT_TEST" in os.environ:
test_id = os.environ["PYTEST_CURRENT_TEST"].split(" ")[0]
filename = test_id.split("::")[0].split("/")[-1]
else:
filename = self.__class__.__module__.split(".")[-1] + ".py"
return filename
def __create_log_path_as_needed(self, test_logpath):
if not os.path.exists(test_logpath):
try:
os.makedirs(test_logpath)
except Exception:
pass # Only reachable during multi-threaded runs
def __process_dashboard(self, has_exception, init=False):
""" SeleniumBase Dashboard Processing """
if self._multithreaded:
existing_res = sb_config._results # For recording "Skipped" tests
abs_path = os.path.abspath(".")
dash_json_loc = constants.Dashboard.DASH_JSON
dash_jsonpath = os.path.join(abs_path, dash_json_loc)
if not init and os.path.exists(dash_jsonpath):
with open(dash_jsonpath, "r") as f:
dash_json = f.read().strip()
dash_data, d_id, dash_rt, tlp, d_stats = json.loads(dash_json)
num_passed, num_failed, num_skipped, num_untested = d_stats
sb_config._results = dash_data
sb_config._display_id = d_id
sb_config._duration = dash_rt # Dashboard Run Time
sb_config._d_t_log_path = tlp # Test Log Path
sb_config.item_count_passed = num_passed
sb_config.item_count_failed = num_failed
sb_config.item_count_skipped = num_skipped
sb_config.item_count_untested = num_untested
if len(sb_config._extra_dash_entries) > 0:
# First take care of existing entries from non-SeleniumBase tests
for test_id in sb_config._extra_dash_entries:
if test_id in sb_config._results.keys():
if sb_config._results[test_id] == "Skipped":
sb_config.item_count_skipped += 1
sb_config.item_count_untested -= 1
elif sb_config._results[test_id] == "Failed":
sb_config.item_count_failed += 1
sb_config.item_count_untested -= 1
elif sb_config._results[test_id] == "Passed":
sb_config.item_count_passed += 1
sb_config.item_count_untested -= 1
else: # Mark "Skipped" if unknown
sb_config.item_count_skipped += 1
sb_config.item_count_untested -= 1
sb_config._extra_dash_entries = [] # Reset the list to empty
# Process new entries
log_dir = self.log_path
ft_id = self.__get_test_id() # Full test id with path to log files
test_id = self.__get_test_id_2() # The test id used by the DashBoard
dud = "seleniumbase/plugins/pytest_plugin.py::BaseClass::base_method"
dud2 = "pytest_plugin.BaseClass.base_method"
if hasattr(self, "_using_sb_fixture") and self.__will_be_skipped:
test_id = sb_config._test_id
if not init:
duration_ms = int(time.time() * 1000) - self.__start_time_ms
duration = float(duration_ms) / 1000.0
duration = "{:.2f}".format(duration)
sb_config._duration[test_id] = duration
if (
has_exception
or self.save_screenshot_after_test
or self.__screenshot_count > 0
or self.__will_be_skipped
):
sb_config._d_t_log_path[test_id] = os.path.join(log_dir, ft_id)
else:
sb_config._d_t_log_path[test_id] = None
if test_id not in sb_config._display_id.keys():
sb_config._display_id[test_id] = self.__get_display_id()
if sb_config._display_id[test_id] == dud:
return
if (
hasattr(self, "_using_sb_fixture")
and test_id not in sb_config._results.keys()
):
if test_id.count(".") > 1:
alt_test_id = ".".join(test_id.split(".")[1:])
if alt_test_id in sb_config._results.keys():
sb_config._results.pop(alt_test_id)
elif test_id.count(".") == 1:
alt_test_id = sb_config._display_id[test_id]
alt_test_id = alt_test_id.replace(".py::", ".")
alt_test_id = alt_test_id.replace("::", ".")
if alt_test_id in sb_config._results.keys():
sb_config._results.pop(alt_test_id)
if test_id in sb_config._results.keys() and (
sb_config._results[test_id] == "Skipped"
):
if self.__passed_then_skipped:
# Multiple calls of setUp() and tearDown() in the same test
sb_config.item_count_passed -= 1
sb_config.item_count_untested += 1
self.__passed_then_skipped = False
sb_config._results[test_id] = "Skipped"
sb_config.item_count_skipped += 1
sb_config.item_count_untested -= 1
elif (
self._multithreaded
and test_id in existing_res.keys()
and existing_res[test_id] == "Skipped"
):
sb_config._results[test_id] = "Skipped"
sb_config.item_count_skipped += 1
sb_config.item_count_untested -= 1
elif has_exception:
if test_id not in sb_config._results.keys():
sb_config._results[test_id] = "Failed"
sb_config.item_count_failed += 1
sb_config.item_count_untested -= 1
elif not sb_config._results[test_id] == "Failed":
# tearDown() was called more than once in the test
if sb_config._results[test_id] == "Passed":
# Passed earlier, but last run failed
sb_config._results[test_id] = "Failed"
sb_config.item_count_failed += 1
sb_config.item_count_passed -= 1
else:
sb_config._results[test_id] = "Failed"
sb_config.item_count_failed += 1
sb_config.item_count_untested -= 1
else:
# pytest-rerunfailures caused a duplicate failure
sb_config._results[test_id] = "Failed"
else:
if (
test_id in sb_config._results.keys()
and sb_config._results[test_id] == "Failed"
):
# pytest-rerunfailures reran a test that failed
sb_config._d_t_log_path[test_id] = os.path.join(
log_dir, ft_id
)
sb_config.item_count_failed -= 1
sb_config.item_count_untested += 1
elif (
test_id in sb_config._results.keys()
and sb_config._results[test_id] == "Passed"
):
# tearDown() was called more than once in the test
sb_config.item_count_passed -= 1
sb_config.item_count_untested += 1
sb_config._results[test_id] = "Passed"
sb_config.item_count_passed += 1
sb_config.item_count_untested -= 1
else:
pass # Only initialize the Dashboard on the first processing
num_passed = sb_config.item_count_passed
num_failed = sb_config.item_count_failed
num_skipped = sb_config.item_count_skipped
num_untested = sb_config.item_count_untested
self.create_pie_chart(title=constants.Dashboard.TITLE)
self.add_data_point("Passed", num_passed, color="#84d474")
self.add_data_point("Untested", num_untested, color="#eaeaea")
self.add_data_point("Skipped", num_skipped, color="#efd8b4")
self.add_data_point("Failed", num_failed, color="#f17476")
style = (
'<link rel="stylesheet" charset="utf-8" '
'href="%s">' % constants.Dashboard.STYLE_CSS
)
auto_refresh_html = ""
if num_untested > 0:
# Refresh every X seconds when waiting for more test results
auto_refresh_html = constants.Dashboard.META_REFRESH_HTML
else:
# The tests are complete
if sb_config._using_html_report:
# Add the pie chart to the pytest html report
sb_config._saved_dashboard_pie = self.extract_chart()
if self._multithreaded:
abs_path = os.path.abspath(".")
dash_pie = json.dumps(sb_config._saved_dashboard_pie)
dash_pie_loc = constants.Dashboard.DASH_PIE
pie_path = os.path.join(abs_path, dash_pie_loc)
pie_file = codecs.open(pie_path, "w+", encoding="utf-8")
pie_file.writelines(dash_pie)
pie_file.close()
head = (
'<head><meta charset="utf-8">'
'<meta name="viewport" content="shrink-to-fit=no">'
'<link rel="shortcut icon" href="%s">'
"%s"
"<title>Dashboard</title>"
"%s</head>"
% (constants.Dashboard.DASH_PIE_PNG_1, auto_refresh_html, style)
)
table_html = (
"<div></div>"
'<table border="1px solid #e6e6e6;" width="100%;" padding: 5px;'
' font-size="12px;" text-align="left;" id="results-table">'
'<thead id="results-table-head">'
'<tr style="background-color: #F7F7FD;">'
'<th col="result">Result</th><th col="name">Test</th>'
'<th col="duration">Duration</th><th col="links">Links</th>'
"</tr></thead>"
)
the_failed = []
the_skipped = []
the_passed_hl = [] # Passed and has logs
the_passed_nl = [] # Passed and no logs
the_untested = []
if dud2 in sb_config._results.keys():
sb_config._results.pop(dud2)
for key in sb_config._results.keys():
t_res = sb_config._results[key]
t_dur = sb_config._duration[key]
t_d_id = sb_config._display_id[key]
t_l_path = sb_config._d_t_log_path[key]
res_low = t_res.lower()
if sb_config._results[key] == "Failed":
if not sb_config._d_t_log_path[key]:
sb_config._d_t_log_path[key] = os.path.join(log_dir, ft_id)
the_failed.append([res_low, t_res, t_d_id, t_dur, t_l_path])
elif sb_config._results[key] == "Skipped":
the_skipped.append([res_low, t_res, t_d_id, t_dur, t_l_path])
elif sb_config._results[key] == "Passed" and t_l_path:
the_passed_hl.append([res_low, t_res, t_d_id, t_dur, t_l_path])
elif sb_config._results[key] == "Passed" and not t_l_path:
the_passed_nl.append([res_low, t_res, t_d_id, t_dur, t_l_path])
elif sb_config._results[key] == "Untested":
the_untested.append([res_low, t_res, t_d_id, t_dur, t_l_path])
for row in the_failed:
row = (
'<tbody class="%s results-table-row">'
'<tr style="background-color: #FFF8F8;">'
'<td class="col-result">%s</td><td>%s</td><td>%s</td>'
'<td><a href="%s">Logs</a> / <a href="%s/">Data</a>'
"</td></tr></tbody>"
"" % (row[0], row[1], row[2], row[3], log_dir, row[4])
)
table_html += row
for row in the_skipped:
if not row[4]:
row = (
'<tbody class="%s results-table-row">'
'<tr style="background-color: #FEFEF9;">'
'<td class="col-result">%s</td><td>%s</td><td>%s</td>'
"<td>-</td></tr></tbody>"
% (row[0], row[1], row[2], row[3])
)
else:
row = (
'<tbody class="%s results-table-row">'
'<tr style="background-color: #FEFEF9;">'
'<td class="col-result">%s</td><td>%s</td><td>%s</td>'
'<td><a href="%s">Logs</a> / <a href="%s/">Data</a>'
"</td></tr></tbody>"
"" % (row[0], row[1], row[2], row[3], log_dir, row[4])
)
table_html += row
for row in the_passed_hl:
# Passed and has logs
row = (
'<tbody class="%s results-table-row">'
'<tr style="background-color: #F8FFF8;">'
'<td class="col-result">%s</td><td>%s</td><td>%s</td>'
'<td><a href="%s">Logs</a> / <a href="%s/">Data</a>'
"</td></tr></tbody>"
"" % (row[0], row[1], row[2], row[3], log_dir, row[4])
)
table_html += row
for row in the_passed_nl:
# Passed and no logs
row = (
'<tbody class="%s results-table-row">'
'<tr style="background-color: #F8FFF8;">'
'<td class="col-result">%s</td><td>%s</td><td>%s</td>'
"<td>-</td></tr></tbody>" % (row[0], row[1], row[2], row[3])
)
table_html += row
for row in the_untested:
row = (
'<tbody class="%s results-table-row"><tr>'
'<td class="col-result">%s</td><td>%s</td><td>%s</td>'
"<td>-</td></tr></tbody>" % (row[0], row[1], row[2], row[3])
)
table_html += row
table_html += "</table>"
add_more = "<br /><b>Last updated:</b> "
timestamp, the_date, the_time = log_helper.get_master_time()
last_updated = "%s at %s" % (the_date, the_time)
add_more = add_more + "%s" % last_updated
status = "<p></p><div><b>Status:</b> Awaiting results..."
status += " (Refresh the page for updates)"
if num_untested == 0:
status = "<p></p><div><b>Status:</b> Test Run Complete:"
if num_failed == 0:
if num_passed > 0:
if num_skipped == 0:
status += " <b>Success!</b> (All tests passed)"
else:
status += " <b>Success!</b> (No failing tests)"
else:
status += " All tests were skipped!"
else:
latest_logs_dir = "latest_logs/"
log_msg = "See latest logs for details"
if num_failed == 1:
status += (
" <b>1 test failed!</b> --- "
'(<b><a href="%s">%s</a></b>)'
"" % (latest_logs_dir, log_msg)
)
else:
status += (
" <b>%s tests failed!</b> --- "
'(<b><a href="%s">%s</a></b>)'
"" % (num_failed, latest_logs_dir, log_msg)
)
status += "</div><p></p>"
add_more = add_more + status
gen_by = (
'<p><div>Generated by: <b><a href="https://seleniumbase.io/">'
"SeleniumBase</a></b></div></p><p></p>"
)
add_more = add_more + gen_by
# Have dashboard auto-refresh on updates when using an http server
refresh_line = (
'<script type="text/javascript" src="%s">'
"</script>" % constants.Dashboard.LIVE_JS
)
if num_untested == 0 and sb_config._using_html_report:
sb_config._dash_final_summary = status
add_more = add_more + refresh_line
the_html = (
'<html lang="en">'
+ head
+ self.extract_chart()
+ table_html
+ add_more
)
abs_path = os.path.abspath(".")
file_path = os.path.join(abs_path, "dashboard.html")
out_file = codecs.open(file_path, "w+", encoding="utf-8")
out_file.writelines(the_html)
out_file.close()
sb_config._dash_html = the_html
if self._multithreaded:
d_stats = (num_passed, num_failed, num_skipped, num_untested)
_results = sb_config._results
_display_id = sb_config._display_id
_rt = sb_config._duration # Run Time (RT)
_tlp = sb_config._d_t_log_path # Test Log Path (TLP)
dash_json = json.dumps((_results, _display_id, _rt, _tlp, d_stats))
dash_json_loc = constants.Dashboard.DASH_JSON
dash_jsonpath = os.path.join(abs_path, dash_json_loc)
dash_json_file = codecs.open(dash_jsonpath, "w+", encoding="utf-8")
dash_json_file.writelines(dash_json)
dash_json_file.close()
def has_exception(self):
"""(This method should ONLY be used in custom tearDown() methods.)
This method returns True if the test failed or raised an exception.
This is useful for performing additional steps in your tearDown()
method (based on whether or not the test passed or failed).
Example use cases:
* Performing cleanup steps if a test didn't complete.
* Sending test data and/or results to a dashboard service.
"""
return self.__has_exception()
def save_teardown_screenshot(self):
"""(Should ONLY be used at the start of custom tearDown() methods.)
This method takes a screenshot of the current web page for a
failing test (or when running your tests with --save-screenshot).
That way your tearDown() method can navigate away from the last
page where the test failed, and still get the correct screenshot
before performing tearDown() steps on other pages. If this method
is not included in your custom tearDown() method, a screenshot
will still be taken after the last step of your tearDown(), where
you should be calling "super(SubClassOfBaseCase, self).tearDown()"
"""
try:
self.__check_scope()
except Exception:
return
if self.__has_exception() or self.save_screenshot_after_test:
test_logpath = os.path.join(self.log_path, self.__get_test_id())
self.__create_log_path_as_needed(test_logpath)
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
if self.is_pytest:
self.__add_pytest_html_extra()
def tearDown(self):
"""
Be careful if a subclass of BaseCase overrides setUp()
You'll need to add the following line to the subclass's tearDown():
super(SubClassOfBaseCase, self).tearDown()
"""
if not hasattr(self, "_using_sb_fixture") and self.__called_teardown:
# This test already called tearDown()
return
if self.recorder_mode:
self.__process_recorded_actions()
self.__called_teardown = True
self.__called_setup = False
try:
is_pytest = self.is_pytest # This fails if overriding setUp()
if is_pytest:
with_selenium = self.with_selenium
except Exception:
sub_class_name = (
str(self.__class__.__bases__[0]).split(".")[-1].split("'")[0]
)
sub_file_name = str(self.__class__.__bases__[0]).split(".")[-2]
sub_file_name = sub_file_name + ".py"
class_name = str(self.__class__).split(".")[-1].split("'")[0]
file_name = str(self.__class__).split(".")[-2] + ".py"
class_name_used = sub_class_name
file_name_used = sub_file_name
if sub_class_name == "BaseCase":
class_name_used = class_name
file_name_used = file_name
fix_setup = "super(%s, self).setUp()" % class_name_used
fix_teardown = "super(%s, self).tearDown()" % class_name_used
message = (
"You're overriding SeleniumBase's BaseCase setUp() "
"method with your own setUp() method, which breaks "
"SeleniumBase. You can fix this by going to your "
"%s class located in your %s file and adding the "
"following line of code AT THE BEGINNING of your "
"setUp() method:\n%s\n\nAlso make sure "
"you have added the following line of code AT THE "
"END of your tearDown() method:\n%s\n"
% (class_name_used, file_name_used, fix_setup, fix_teardown)
)
raise Exception(message)
# *** Start tearDown() officially ***
self.__slow_mode_pause_if_active()
has_exception = self.__has_exception()
if self.__overrided_default_timeouts:
# Reset default timeouts in case there are more tests
# These were changed in set_default_timeout()
if sb_config._SMALL_TIMEOUT and sb_config._LARGE_TIMEOUT:
settings.SMALL_TIMEOUT = sb_config._SMALL_TIMEOUT
settings.LARGE_TIMEOUT = sb_config._LARGE_TIMEOUT
sb_config._is_timeout_changed = False
self.__overrided_default_timeouts = False
deferred_exception = None
if self.__deferred_assert_failures:
print(
"\nWhen using self.deferred_assert_*() methods in your tests, "
"remember to call self.process_deferred_asserts() afterwards. "
"Now calling in tearDown()...\nFailures Detected:"
)
if not has_exception:
try:
self.process_deferred_asserts()
except Exception as e:
deferred_exception = e
else:
self.process_deferred_asserts(print_only=True)
if self.is_pytest:
# pytest-specific code
test_id = self.__get_test_id()
if with_selenium:
# Save a screenshot if logging is on when an exception occurs
if has_exception:
self.__add_pytest_html_extra()
sb_config._has_exception = True
if (
self.with_testing_base
and not has_exception
and self.save_screenshot_after_test
):
test_logpath = os.path.join(self.log_path, test_id)
self.__create_log_path_as_needed(test_logpath)
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
log_helper.log_screenshot(
test_logpath,
self.driver,
self.__last_page_screenshot_png,
)
self.__add_pytest_html_extra()
if self.with_testing_base and has_exception:
test_logpath = os.path.join(self.log_path, test_id)
self.__create_log_path_as_needed(test_logpath)
if (
not self.with_screen_shots
and not self.with_basic_test_info
and not self.with_page_source
):
# Log everything if nothing specified (if testing_base)
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
log_helper.log_screenshot(
test_logpath,
self.driver,
self.__last_page_screenshot_png,
)
log_helper.log_test_failure_data(
self,
test_logpath,
self.driver,
self.browser,
self.__last_page_url,
)
log_helper.log_page_source(
test_logpath, self.driver, self.__last_page_source
)
else:
if self.with_screen_shots:
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
log_helper.log_screenshot(
test_logpath,
self.driver,
self.__last_page_screenshot_png,
)
if self.with_basic_test_info:
log_helper.log_test_failure_data(
self,
test_logpath,
self.driver,
self.browser,
self.__last_page_url,
)
if self.with_page_source:
log_helper.log_page_source(
test_logpath,
self.driver,
self.__last_page_source,
)
if self.dashboard:
if self._multithreaded:
with self.dash_lock:
self.__process_dashboard(has_exception)
else:
self.__process_dashboard(has_exception)
# (Pytest) Finally close all open browser windows
self.__quit_all_drivers()
if self.headless or self.xvfb:
if self.headless_active:
try:
self.display.stop()
except AttributeError:
pass
except Exception:
pass
self.display = None
if self.with_db_reporting:
if has_exception:
self.__insert_test_result(constants.State.FAILED, True)
else:
test_id = self.__get_test_id_2()
if test_id in sb_config._results.keys() and (
sb_config._results[test_id] == "Skipped"
):
self.__insert_test_result(
constants.State.SKIPPED, False
)
else:
self.__insert_test_result(
constants.State.PASSED, False
)
runtime = int(time.time() * 1000) - self.execution_start_time
self.testcase_manager.update_execution_data(
self.execution_guid, runtime
)
if self.with_s3_logging and has_exception:
""" If enabled, upload logs to S3 during test exceptions. """
import uuid
from seleniumbase.core.s3_manager import S3LoggingBucket
s3_bucket = S3LoggingBucket()
guid = str(uuid.uuid4().hex)
path = "%s/%s" % (self.log_path, test_id)
uploaded_files = []
for logfile in os.listdir(path):
logfile_name = "%s/%s/%s" % (
guid,
test_id,
logfile.split(path)[-1],
)
s3_bucket.upload_file(
logfile_name, "%s/%s" % (path, logfile)
)
uploaded_files.append(logfile_name)
s3_bucket.save_uploaded_file_names(uploaded_files)
index_file = s3_bucket.upload_index_file(test_id, guid)
print("\n\n*** Log files uploaded: ***\n%s\n" % index_file)
logging.info(
"\n\n*** Log files uploaded: ***\n%s\n" % index_file
)
if self.with_db_reporting:
from seleniumbase.core.testcase_manager import (
TestcaseDataPayload,
)
from seleniumbase.core.testcase_manager import (
TestcaseManager,
)
self.testcase_manager = TestcaseManager(self.database_env)
data_payload = TestcaseDataPayload()
data_payload.guid = self.testcase_guid
data_payload.logURL = index_file
self.testcase_manager.update_testcase_log_url(data_payload)
else:
# (Nosetests)
if has_exception:
test_id = self.__get_test_id()
test_logpath = os.path.join(self.log_path, test_id)
self.__create_log_path_as_needed(test_logpath)
log_helper.log_test_failure_data(
self,
test_logpath,
self.driver,
self.browser,
self.__last_page_url,
)
if len(self._drivers_list) > 0:
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
log_helper.log_screenshot(
test_logpath,
self.driver,
self.__last_page_screenshot_png,
)
log_helper.log_page_source(
test_logpath, self.driver, self.__last_page_source
)
elif self.save_screenshot_after_test:
test_id = self.__get_test_id()
test_logpath = os.path.join(self.log_path, test_id)
self.__create_log_path_as_needed(test_logpath)
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
log_helper.log_screenshot(
test_logpath, self.driver, self.__last_page_screenshot_png
)
if self.report_on:
self._last_page_screenshot = self.__last_page_screenshot_png
try:
self._last_page_url = self.get_current_url()
except Exception:
self._last_page_url = "(Error: Unknown URL)"
# (Nosetests) Finally close all open browser windows
self.__quit_all_drivers()
# Resume tearDown() for both Pytest and Nosetests
if has_exception and self.__visual_baseline_copies:
self.__process_visual_baseline_logs()
if deferred_exception:
# User forgot to call "self.process_deferred_asserts()" in test
raise deferred_exception
|
seleniumbase/SeleniumBase
|
seleniumbase/fixtures/base_case.py
|
Python
|
mit
| 524,441
|
[
"VisIt"
] |
e3341ed045e8d73c84c0a4295f08b28d8b58fc727b4f61169c21377807d71e7b
|
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from hyperspy._signals.eels import EELSSpectrum
from hyperspy._signals.signal1d import Signal1D
from hyperspy.components1d import Gaussian
from hyperspy.decorators import lazifyTestClass
@lazifyTestClass
class TestModelIndexing:
def setup_method(self, method):
np.random.seed(1)
axes = np.array([[100 * np.random.random() + np.arange(0., 600, 1)
for i in range(3)] for j in range(4)])
g = Gaussian()
g.A.value = 30000.
g.centre.value = 300.
g.sigma.value = 150.
data = g.function(axes)
s = Signal1D(data)
s.axes_manager[-1].offset = -150.
s.axes_manager[-1].scale = 0.5
s.add_gaussian_noise(2.0)
m = s.create_model()
g = Gaussian()
g.A.ext_force_positive = True
g.A.ext_bounded = True
m.append(g)
g.active_is_multidimensional = True
for index in m.axes_manager:
m.fit()
self.model = m
def test_model_signal_indexer_slice(self):
s = self.model.signal.isig[:300]
m = self.model.isig[:300]
m1 = self.model.isig[300:]
m2 = self.model.isig[:0.]
assert m1[0].A.ext_bounded is m[0].A.ext_bounded
np.testing.assert_array_almost_equal(s.data, m.signal.data)
np.testing.assert_array_almost_equal(s.data, m2.signal.data)
np.testing.assert_array_equal(m.dof.data, self.model.dof.data)
for ic, c in enumerate(m):
for p_new, p_old in zip(c.parameters, self.model[ic].parameters):
np.testing.assert_array_equal(p_old.map, p_new.map)
np.testing.assert_array_almost_equal(m.chisq.data + m1.chisq.data,
self.model.chisq.data)
self.model.channel_switches[0] = False
m = self.model.isig[:-100.]
assert not m.channel_switches[0]
assert np.all(m.channel_switches[1:])
def test_model_navigation_indexer_slice(self):
self.model.axes_manager.indices = (0, 0)
self.model[0].active = False
m = self.model.inav[0::2, :]
np.testing.assert_array_equal(
m.chisq.data, self.model.chisq.data[:, 0::2])
np.testing.assert_array_equal(m.dof.data, self.model.dof.data[:, 0::2])
assert (m.inav[:2][0].A.ext_force_positive is
m[0].A.ext_force_positive)
assert m.chisq.data.shape == (4, 2)
assert not m[0]._active_array[0, 0]
for ic, c in enumerate(m):
np.testing.assert_equal(
c._active_array,
self.model[ic]._active_array[:, 0::2])
for p_new, p_old in zip(c.parameters, self.model[ic].parameters):
assert (p_old.map[:, 0::2] == p_new.map).all()
# test that explicitly does the wrong thing by mixing up the order
def test_component_copying_order(self):
self.model.axes_manager.indices = (0, 0)
self.model[0].active = False
g = self.model[0]
g._slicing_order = ('_active_array', 'active_is_multidimensional',
'active')
assert not g._active_array[0, 0]
m = self.model.inav[0:2, 0:2]
assert m[0]._active_array[0, 0]
@lazifyTestClass
class TestModelIndexingClass:
def setup_method(self, method):
s_eels = EELSSpectrum([list(range(10))] * 3)
s_eels.metadata.set_item(
'Acquisition_instrument.TEM.Detector.EELS.collection_angle',
3.0)
s_eels.metadata.set_item('Acquisition_instrument.TEM.beam_energy', 1.0)
s_eels.metadata.set_item(
'Acquisition_instrument.TEM.convergence_angle',
2.0)
self.eels_m = s_eels.create_model(auto_background=False)
def test_model_class(self):
m_eels = self.eels_m
assert isinstance(m_eels, type(m_eels.isig[1:]))
assert isinstance(m_eels, type(m_eels.inav[1:]))
@lazifyTestClass
class TestEELSModelSlicing:
def setup_method(self, method):
data = np.random.random((10, 10, 600))
s = EELSSpectrum(data)
s.axes_manager[-1].offset = -150.
s.axes_manager[-1].scale = 0.5
s.metadata.set_item(
'Acquisition_instrument.TEM.Detector.EELS.collection_angle',
3.0)
s.metadata.set_item('Acquisition_instrument.TEM.beam_energy', 1.0)
s.metadata.set_item(
'Acquisition_instrument.TEM.convergence_angle',
2.0)
m = s.create_model(
ll=s + 1,
auto_background=False,
auto_add_edges=False)
g = Gaussian()
m.append(g)
self.model = m
def test_slicing_low_loss_inav(self):
m = self.model
m1 = m.inav[::2]
assert m1.signal.data.shape == m1.low_loss.data.shape
def test_slicing_low_loss_isig(self):
m = self.model
m1 = m.isig[::2]
assert m.signal.data.shape == m1.low_loss.data.shape
|
dnjohnstone/hyperspy
|
hyperspy/tests/model/test_fancy_indexing.py
|
Python
|
gpl-3.0
| 5,715
|
[
"Gaussian"
] |
6f40185e327106f6ac4c0238cc961b11773173beb320e0986817d340dac693c9
|
# coding:utf-8
from custor.logger import logger
from db.mysql_model.post import PostCategory, PostTopic, Post
import psutil, datetime, time
# Trick cache
topic_category_cache = {'categorys': [], 'topics': []}
hot_post_cache = {'reply': [], 'visit': []}
system_status_cache = [0, 0, 0, 0]
def update_topic_category_cache():
"""
update topic
:return:
"""
topic_category_cache['categorys'] = []
topic_category_cache['topics'] = []
categorys = PostCategory.select()
for t in range(len(categorys)):
topic_category_cache['categorys'].append(categorys[t])
tmp = []
topics = PostTopic.select().where(PostTopic.category == categorys[t])
for i in range(len(topics)):
tmp.append(topics[i])
topic_category_cache['topics'].append(tmp)
topics = PostTopic.select().where(PostTopic.category == None)
tmp = []
for i in range(len(topics)):
tmp.append(topics[i])
topic_category_cache['topics'].append(tmp)
def update_hot_post_cache():
"""
ignore...
:return:
"""
hot_post_cache['reply'] = []
hot_post_cache['visit'] = []
posts = Post.select().where(Post.is_delete == False).order_by(Post.reply_count.desc()).limit(4)
for post in posts:
hot_post_cache['reply'].append(post)
def update_system_status_cache():
"""
ignore...
:return:
"""
from threading import Thread
class MonitorWorker(Thread):
def __init__(self, name, system_status_cache):
Thread.__init__(self)
self.name = name
self.systatus = system_status_cache
def run(self):
logger.debug("start monitor system status...")
while True:
try:
s1 = psutil.cpu_percent()
s2 = psutil.virtual_memory()[2]
try:
s3 = len(psutil.net_connections())
except:
s3 = 'unkown'
s4 = datetime.datetime.fromtimestamp(psutil.boot_time()).strftime("%Y-%m-%d")
self.systatus[0] = s1
self.systatus[1] = s2
self.systatus[2] = s3
self.systatus[3] = s4
from app.api.api import SystemStatusWebsocketHandler
SystemStatusWebsocketHandler.write2all(self.systatus)
time.sleep(30)
except KeyboardInterrupt:
break
monitor = MonitorWorker('system', system_status_cache)
monitor.start()
def update_cache():
logger.debug('start update cache...')
update_topic_category_cache()
update_hot_post_cache()
update_system_status_cache()
|
jmpews/torweb
|
app/cache.py
|
Python
|
mit
| 2,751
|
[
"VisIt"
] |
dbad7ac5ea9b5b143fb871367fdf332d699aa04426daf02473732a27bac62009
|
""" DeadCodeElimination remove useless code. """
from pythran.analyses import PureExpressions, DefUseChains, Ancestors
from pythran.openmp import OMPDirective
from pythran.passmanager import Transformation
import pythran.metadata as metadata
import gast as ast
class ClumsyOpenMPDependencyHandler(ast.NodeVisitor):
def __init__(self):
self.blacklist = set()
def visit_OMPDirective(self, node):
for dep in node.deps:
if isinstance(dep, ast.Name):
self.blacklist.add(dep.id)
return node
class DeadCodeElimination(Transformation):
"""
Remove useless statement like:
- assignment to unused variables
- remove alone pure statement
- remove empty if
>>> import gast as ast
>>> from pythran import passmanager, backend
>>> pm = passmanager.PassManager("test")
>>> node = ast.parse("def foo(): a = [2, 3]; return 1")
>>> _, node = pm.apply(DeadCodeElimination, node)
>>> print(pm.dump(backend.Python, node))
def foo():
pass
return 1
>>> node = ast.parse("def foo(): 'a simple string'; return 1")
>>> _, node = pm.apply(DeadCodeElimination, node)
>>> print(pm.dump(backend.Python, node))
def foo():
pass
return 1
>>> node = ast.parse('''
... def bar(a):
... return a
... def foo(a):
... bar(a)
... return 1''')
>>> _, node = pm.apply(DeadCodeElimination, node)
>>> print(pm.dump(backend.Python, node))
def bar(a):
return a
def foo(a):
pass
return 1
"""
def __init__(self):
super(DeadCodeElimination, self).__init__(PureExpressions,
DefUseChains,
Ancestors)
self.blacklist = set()
def used_target(self, node):
if isinstance(node, ast.Name):
if node.id in self.blacklist:
return True
chain = self.def_use_chains.chains[node]
return bool(chain.users())
return True
def visit_FunctionDef(self, node):
codh = ClumsyOpenMPDependencyHandler()
codh.visit(node)
self.blacklist = codh.blacklist
return self.generic_visit(node)
def visit_Pass(self, node):
ancestor = self.ancestors[node][-1]
if getattr(ancestor, 'body', ()) == [node]:
return node
if getattr(ancestor, 'orelse', ()) == [node]:
return node
if metadata.get(node, OMPDirective):
return node
return None
def visit_Assign(self, node):
targets = [target for target in node.targets
if self.used_target(target)]
if len(targets) == len(node.targets):
return node
node.targets = targets
self.update = True
if targets:
return node
if node.value in self.pure_expressions:
return ast.Pass()
else:
return ast.Expr(value=node.value)
def visit_Expr(self, node):
if (node in self.pure_expressions and
not isinstance(node.value, ast.Yield)):
self.update = True
return ast.Pass()
self.generic_visit(node)
return node
def visit_If(self, node):
self.generic_visit(node)
try:
if ast.literal_eval(node.test):
if not metadata.get(node, OMPDirective):
self.update = True
return node.body
else:
if not metadata.get(node, OMPDirective):
self.update = True
return node.orelse
except ValueError:
# not a constant expression
pass
have_body = any(not isinstance(x, ast.Pass) for x in node.body)
have_else = any(not isinstance(x, ast.Pass) for x in node.orelse)
# If the "body" is empty but "else content" is useful, switch branches
# and remove else content
if not have_body and have_else:
test = ast.UnaryOp(op=ast.Not(), operand=node.test)
self.update = True
return ast.If(test=test, body=node.orelse, orelse=list())
# if neither "if" and "else" are useful, keep test if it is not pure
elif not have_body:
self.update = True
if node.test in self.pure_expressions:
return ast.Pass()
else:
node = ast.Expr(value=node.test)
self.generic_visit(node)
return node
def visit(self, node):
""" Add OMPDirective from the old node to the new one. """
old_omp = metadata.get(node, OMPDirective)
node = super(DeadCodeElimination, self).visit(node)
if not metadata.get(node, OMPDirective):
for omp_directive in old_omp:
metadata.add(node, omp_directive)
return node
|
serge-sans-paille/pythran
|
pythran/optimizations/dead_code_elimination.py
|
Python
|
bsd-3-clause
| 4,981
|
[
"VisIt"
] |
36ff06dc563ae9c7d201c2c7d1568bee8e1b337c57d79b86de3acc755a8f932b
|
# This file is part of the Fluggo Media Library for high-quality
# video and audio processing.
#
# Copyright 2012 Brian J. Crowell <brian@fluggo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, os.path, weakref, sys, traceback
import configparser
from PyQt4 import QtCore
from fluggo import signal, logging
_log = logging.getLogger(__name__)
# TODO: Need redo/undo support
class _AlertTracker(object):
def __init__(self, trackee, tracker):
self.trackee = weakref.ref(trackee, self.stop_tracking)
self.tracker = tracker
self.alerts = None
trackee.alert_added.connect(self.item_added)
trackee.alert_removed.connect(self.item_removed)
for alert in trackee._alerts.values():
self.item_added(alert)
def stop_tracking(self, weakref_=None):
# For this to work via weak references, the alerts themselves
# must not have strong references to the object in question
# BJC: The "trackee" attribute seems to be missing sometimes; I
# suspect this is because I'm being torn down when this is called
if hasattr(self, 'trackee'):
trackee = self.trackee()
if trackee is not None:
trackee.alert_added.disconnect(self.item_added)
trackee.alert_removed.disconnect(self.item_removed)
if hasattr(self, 'alerts') and self.alerts is not None:
for alert in self.alerts.values():
self.tracker.hide_alert(alert)
self.alerts = None
def item_added(self, alert):
if self.alerts is None:
self.alerts = {}
self.alerts[alert.key] = alert
self.tracker.show_alert(alert)
def item_removed(self, alert):
del self.alerts[alert.key]
self.tracker.hide_alert(alert)
class AlertPublisher(object):
'''Mixin class that reports errors and give the user ways to manage them.'''
def __init__(self):
self.alert_added = signal.Signal()
self.alert_removed = signal.Signal()
self._alerts = {}
self._tracked_publishers = None
def show_alert(self, alert):
'''Add an alert to the list of alerts shown to the user.'''
self.hide_alert(alert)
self._alerts[alert.key] = alert
self.alert_added(alert)
def hide_alert(self, alert):
if alert.key in self._alerts:
del self._alerts[alert.key]
self.alert_removed(alert)
@property
def alerts(self):
return list(self._alerts.values())
def follow_alerts(self, publisher):
'''Re-publishes alerts published by *publisher*. The publisher is tracked with
a weak reference; if the publisher disappears, its alerts will be unpublished.'''
if self._tracked_publishers is None:
self._tracked_publishers = weakref.WeakKeyDictionary()
if publisher not in self._tracked_publishers:
self._tracked_publishers[publisher] = _AlertTracker(publisher, self)
def unfollow_alerts(self, publisher):
'''Stops tracking alerts from the given *publisher*.'''
if self._tracked_publishers is None:
return
tracker = self._tracked_publishers.pop(publisher, None)
if tracker is not None:
tracker.stop_tracking()
class AlertIcon(object):
NoIcon, Information, Warning, Error = list(range(4))
class Alert(object):
'''An alert for use with the AlertPublisher.'''
def __init__(self, description, icon=AlertIcon.NoIcon, source='', model_obj=None, actions=[], exc_info=False, key=None):
'''Create an alert. *key* is a way to uniquely identify this alert.
*description* is the text to show. *icon* is either one of the values from AlertIcon,
a QIcon, or a path to an image (Qt resource paths allowed). *source* gives the user a way to sort similar alerts together;
give a name that would be useful for that. *actions* is a list of QActions to show the user for resolving
the issue. *model_obj* is an object in that could be found in the model for this alert.'''
# TODO: Add exc_info like on logging to allow capturing tracebacks on exceptions
self.key = key or self
self._description = description
self._source = source
self._icon = icon
self._actions = actions
self._model_obj = model_obj
self._exc_info = None
if exc_info:
_log.debug('Alert with error: {0}', description, exc_info=True)
self._exc_info = sys.exc_info()
@property
def description(self):
'''Localized description of the error or warning.'''
return self._description
@property
def source(self):
return self._source
@property
def icon(self):
'''A value from AlertIcon.'''
return self._icon
@property
def actions(self):
'''Return a list of QActions the user can choose from to resolve the alert.'''
# TODO: Add a general hide command?
return self._actions
@property
def model_object(self):
'''Optional object in the model that is associated with this alert. Having this
object lets the user navigate from this alert to the object.'''
return self._model_obj
@property
def exc_info(self):
'''Optional exception info captured at the time of the alert.'''
return self._exc_info
def __str__(self):
result = str(self.description)
if self._source:
result = self._source + ': ' + result
if self._exc_info:
result = result + '\r\n' + ''.join(traceback.format_exception(*self._exc_info))
return result
# TODO: Create standard alerts for things like file/plugin missing;
# the UI can possibly coalesce these and treat them as a group, or
# provide special assistance to the user
# Standard alerts:
#
# Plain warning/error messages (only option is to dismiss)
# Plugin missing
# File missing
# Failure to bring online?
class Plugin:
'''Alert publisher for this plugin. Create your own alert publisher at the
class level.'''
alerts = None
'''The name of the plugin.'''
name = None
'''A short (one-line) description of the plugin.'''
description = None
'''Return a URN that uniquely identifies all versions of this plugin.'''
plugin_urn = None
@classmethod
def activate(cls):
'''Called when the user has activated the plugin. This is usually
the time to install any hooks into the interface.'''
pass
@classmethod
def deactivate(cls):
'''Called when the user has deactivated the plugin. This is usually
the time to remove any hooks from the interface.'''
pass
# TODO: Dialogs for settings/about, global vs. project settings,
# notifications from these stored objects that settings have changed
PLUGINS_PREFIX = 'plugins/'
DECODERS_PREFIX = 'decoders/'
class PluginManager(object):
plugin_modules = None
plugins = None
enabled_plugins = None
codecs = []
enabled_codecs = {} # urn -> (priority, codec)
codec_priorities = {}
codecs_by_priority = [] # Enabled codecs in preference order
alert_manager = AlertPublisher()
@classmethod
def load_all(cls):
if cls.plugin_modules is not None:
return
# TODO: For now, this will just load all plugins here in the plugins directory
# In the future, we need this to search some standard paths
cls.plugin_modules = list(cls.find_all_modules([os.path.dirname(__file__)]))
plugin_classes = []
for module in cls.plugin_modules:
module.load()
if not module.module:
continue
# Scan through the module's dictionary for plugins we can use
plugin_classes.extend(plugin for (name, plugin) in module.module.__dict__.items()
if not name.startswith('_') and issubclass(type(plugin), type) and issubclass(plugin, Plugin))
plugins = {}
for plugin_cls in plugin_classes:
try:
existing_plugin = plugins.setdefault(plugin_cls.plugin_urn, plugin_cls)
if plugin_cls is not existing_plugin:
_log.error('Two plugins tried to claim the URN "{0}"', plugin_cls.plugin_urn)
except Exception as ex:
_log.error('Could not add {0} plugin class: {1}', plugin_cls.__name__, ex, exc_info=True)
cls.plugins = plugins
cls.enabled_plugins = {}
# Read config file for enabled plugins
for (key, plugin) in cls.plugins.items():
settings = QtCore.QSettings()
settings.beginGroup(PLUGINS_PREFIX + key)
enabled = settings.value('enabled', False, type=bool)
settings.endGroup()
if enabled:
try:
plugin.activate()
if plugin.alerts:
cls.alert_manager.follow_alerts(plugin.alerts)
cls.enabled_plugins[key] = plugin
except:
_log.error('Failed to activate plugin "{0}"', plugin.name, exc_info=True)
cls.reset_codecs()
@classmethod
def find_plugins(cls, baseclass=Plugin, enabled_only=True):
cls.load_all()
plugins = cls.enabled_plugins if enabled_only else cls.plugins
return [plugin for plugin in plugins.values() if issubclass(plugin, baseclass)]
@classmethod
def find_plugin_by_urn(cls, urn):
return cls.enabled_plugins.get(urn, None)
@classmethod
def is_plugin_enabled(cls, plugin):
return plugin.plugin_urn in cls.enabled_plugins
@classmethod
def set_plugin_enabled(cls, plugin, enable):
if plugin.plugin_urn not in cls.plugins:
raise ValueError('Given plugin is not in the list of available plugins.')
enabled = cls.is_plugin_enabled(plugin)
settings = QtCore.QSettings()
settings.beginGroup(PLUGINS_PREFIX + plugin.plugin_urn)
if enable and not enabled:
try:
plugin.activate()
if plugin.alerts:
cls.alert_manager.follow_alerts(plugin.alerts)
cls.enabled_plugins[plugin.plugin_urn] = plugin
settings.setValue('enabled', True)
cls.reset_codecs()
except Exception as ex:
_log.error('Failed to activate plugin "{0}"', plugin.name, exc_info=True)
elif not enable and enabled:
try:
plugin.deactivate()
if plugin.alerts:
cls.alert_manager.unfollow_alerts(plugin)
del cls.enabled_plugins[plugin.plugin_urn]
settings.setValue('enabled', False)
cls.reset_codecs()
except Exception as ex:
_log.error('Failed to deactivate plugin "{0}"', plugin.name, exc_info=True)
settings.endGroup()
@classmethod
def find_all_modules(cls, paths):
for directory in paths:
_log.info('Searching {0} for plugins...', directory)
for filename in os.listdir(directory):
if not filename.endswith('.plugin'):
continue
_log.info('Found {0}...', filename)
try:
for plugin in PluginModule.from_file(os.path.join(directory, filename)):
yield plugin
except:
_log.warning('Could not read the plugin {0}', filename, exc_info=True)
@classmethod
def reset_codecs(cls):
'''Clear out all codecs and start over.'''
cls.codecs = []
cls.enabled_codecs = {}
for plugin in cls.find_plugins(CodecPlugin):
try:
cls.codecs.extend(plugin.get_all_codecs())
except:
_log.warning('Could not get a list of codecs from a plugin', exc_info=True)
for codec in cls.codecs:
settings = QtCore.QSettings()
settings.beginGroup(DECODERS_PREFIX + codec.urn)
enabled = settings.value('enabled', True, type=bool)
priority = settings.value('priority', codec.default_priority, type=int)
settings.endGroup()
codec.priority = priority
if enabled:
cls.enabled_codecs[codec.urn] = codec
cls.codecs_by_priority = list(cls.enabled_codecs.values())
cls.codecs_by_priority.sort(key=lambda i: (i.priority, i.urn), reverse=True)
@classmethod
def find_codec_by_urn(cls, urn):
'''Return the codec with the given URN, or None if it isn't enabled.'''
return cls.enabled_codecs.get(urn)
@classmethod
def find_decoders(cls, format_urn=None, enabled_only=True):
'''Return a list of codecs supporting the given *format_urn* in descending
order of preference. If *format_urn* is not given, get all codecs.'''
# TODO: Make can_decode a method, not a property
if enabled_only:
return [codec for codec in cls.codecs_by_priority if
codec.can_decode and (format_urn is None or format_urn in codec.format_urns)]
else:
result = [codec for codec in cls.codecs if
codec.can_decode and (format_urn is None or format_urn in codec.format_urns)]
result.sort(key=lambda i: (i.priority, i.urn), reverse=True)
return result
@classmethod
def is_decoder_enabled(cls, codec=None, codec_urn=None):
return (codec_urn or codec.urn) in cls.enabled_codecs
@classmethod
def set_decoder_enabled(cls, codec, enable):
if codec not in cls.codecs:
raise ValueError('Given codec is not in the list of available codecs.')
enabled = cls.is_decoder_enabled(codec=codec)
settings = QtCore.QSettings()
settings.beginGroup(DECODERS_PREFIX + codec.urn)
if enable and not enabled:
try:
settings.setValue('enabled', True)
cls.reset_codecs()
except Exception as ex:
_log.error('Failed to enable decoder "{0}"', codec.name, exc_info=True)
elif not enable and enabled:
try:
settings.setValue('enabled', False)
cls.reset_codecs()
except Exception as ex:
_log.error('Failed to disable decoder "{0}"', codec.name, exc_info=True)
settings.endGroup()
@classmethod
def set_decoder_priority(cls, codec, priority):
if codec not in cls.codecs:
raise ValueError('Given codec is not in the list of available codecs.')
settings = QtCore.QSettings()
settings.beginGroup(DECODERS_PREFIX + codec.urn)
try:
settings.setValue('priority', priority)
cls.reset_codecs()
except Exception as ex:
_log.error('Failed to set priority for decoder "{0}"', codec.name, exc_info=True)
settings.endGroup()
class PluginModule(object):
def __init__(self, name, module_name):
self.name = name
self.module_name = module_name
self.module = None
self.load_error = None
@classmethod
def from_file(cls, path):
parser = configparser.RawConfigParser()
parser.read(path)
for section in parser.sections():
name = parser.get(section, 'name')
module = parser.get(section, 'module')
yield cls(name=name or section, module_name=module)
def load(self):
if self.module:
return
module_name = self.module_name
from_module = None
dot = self.module_name.rfind('.')
if dot != -1:
module_name = self.module_name[dot + 1:]
from_module = self.module_name[:dot]
try:
if from_module:
self.module = __import__(from_module, fromlist=[module_name]).__dict__[module_name]
else:
self.module = __import__(module_name)
self.load_error = None
except Exception as ex:
_log.warning('Plugin "{0}" failed to load: {1}', self.name, ex, exc_info=True)
self.load_error = ex
from ._source import *
from ._codec import *
|
fluggo/Canvas
|
fluggo/editor/plugins/_base.py
|
Python
|
gpl-3.0
| 17,052
|
[
"Brian"
] |
4e6969b7bbe132b16354115065ac4e401f4ff27cdee545ce8c1ca23cee685fd3
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def correlation_categories(data_retina):
""" assign numerical values to stimuli based on correlation category as per variable stim_categories"""
stim_categories = {'white': 3, 'fullfield': 0, 'multiscale': 1, 'spatexp': 2, 'spattempexp': 2,
'tempexp': 4, 'natmov': 1, 'scramnat': 3, 'lowcont_white': 3, 'lowcont_multiscale': 1}
stim = data_retina['stimTimes'][0, 0]
categories_stim = stim.dtype
stims = [descr[0] for descr in categories_stim.descr]
corr_categories = [stim_categories[i]
for j in stims for i in list(stim_categories.keys())if i in j]
return corr_categories
def spikes_to_discrete(spikes, duration, res):
"""
convert spike times to discrete vectors
INPUT
duration and res in same units
"""
n_bins = int(np.floor(duration / res))
discrete_spikes = np.zeros(n_bins)
spike_times = list(np.floor(spikes / res).astype(int))
discrete_spikes[spike_times] = 1
return discrete_spikes
def sim_spikes(brian_params, discrete_params):
""" Use brian to simulate spiking output of neural circuits"""
epoch_sizes = brian_params['epoch_sizes']
# neurons in each cluster, in each epoch
cluster_neurons = brian_params['cluster_neurons']
jitter_neurons = brian_params['sigma']
time_constants = brian_params['time_constant']
num_epochs = len(epoch_sizes)
spike_trains = []
df_spikes = pd.DataFrame(columns=['Epoch', 'Spikes', 'Discrete_spikes'])
for _i in brian_params:
(epoch_number, epoch_size, num_neurons, jitter, time_constant) = brian_params.loc[
_i, ['epoch_number', 'epoch_size', 'cluster_neurons', 'jitter', 'time_constant']]
spikes = brian_spikes(N=num_neurons, tau=time_constant,
duration=epoch_size, sigma=jitter)
res = discrete_params['res']
discrete_spikes = brian_spikes_to_discrete_array(
spikes, epoch_size, res)
df_spikes.append([epoch_number, spikes, discrete_spikes])
for i, epoch in enumerate(epoch_sizes):
for epoch_neurons in cluster_neurons:
for neurons in epoch_neurons:
pass
def brian_spikes(N=15, tau=20, v0_max=3.0, duration=7000, sigma=0.01):
"""
generate spikes based on biophysical parameters for a neural population
"""
#import binary_array_networks
import brian2
from brian2 import start_scope, ms, NeuronGroup, SpikeMonitor
start_scope()
#N = 15
tau = tau * ms
#v0_max = 3.
duration = duration * ms
#sigma = 0.01
# case='1_3'
# res=2*ms
eqs = '''
dv/dt = (v0-v)/tau+sigma*xi*tau**-0.5 : 1
v0 : 1
'''
# dv/dt = (2-v)/tau : 1
G = NeuronGroup(N, eqs, threshold='v>1', reset='v=0')
G.v = 'rand()'
spikemon = SpikeMonitor(G)
G.v0 = 1.1
brian2.run(duration)
return spikemon
def brian_spikes_to_discrete_array(spikemon, duration, res):
"""
convert brian2 spikemonitor object into binary array
"""
import brian2
from brian2 import start_scope, ms, NeuronGroup, SpikeMonitor
res = res * ms
duration = duration * ms
n_bins = np.floor(duration / res)
n_neurons = spikemon.source.N
array_bins = np.zeros([n_neurons, n_bins], dtype=np.int8)
neurons_spiketimes = list(spikemon.i)
bins_with_spikes = list(np.floor(spikemon.t / res))
array_bins[neurons_spiketimes, bins_with_spikes] = 1
return array_bins
def sim_brian_spikes(n_epoch1=np.array([30, 30, 30]), n_epoch2=np.array([44, 45, 1]), duration_epochs=5400, num_tot_neurons=90, tau_epoch1=np.array([15, 30, 45]), tau_epoch2=np.array([15, 53, 75]), res=2):
#import brian2
#from brian2 import start_scope, ms, NeuronGroup, SpikeMonitor
# res unit: ms
# unit: ms
# thresh=0.05
# window_bins=19
# conv_filters=[5,10,20,50,100]
# tau_epoch1=np.array([15,37,53])
# tau_epoch1=np.array([20,33,47])
# tau_epoch1=np.array([15,37,53])
# tau_epoch1=np.array([20,33,47])
# smooth_bin_width=11
# block_width=37
#gauss_width=10 *res
import brian2
from brian2 import start_scope, ms, NeuronGroup, SpikeMonitor
sigma_equal = 1
sigma_all = 0.1
if sigma_equal == 1:
sigma_epoch1 = sigma_all * np.ones(n_epoch1.shape)
sigma_epoch2 = sigma_all * np.ones(n_epoch2.shape)
else:
sigma_epoch1 = 0.01 * np.ones(3)
sigma_epoch2 = 0.08 * np.ones(3)
def params_3_2_const_coactivations():
res = 2 # unit: ms
duration_epochs = 1800 # unit: ms
num_tot_neurons = 90
thresh = 0.05
window_bins = 19
n_epoch1 = np.array([30, 30, 30])
n_epoch2 = np.array([44, 45, 1])
smooth_bin_width = 11
block_width = 50
gauss_width = 10 * res
sigma_equal = 1
sigma_all = 0.1
if sigma_equal == 1:
sigma_epoch1 = sigma_all * np.ones(n_epoch1.shape)
sigma_epoch2 = sigma_all * np.ones(n_epoch2.shape)
else:
sigma_epoch1 = 0.01 * np.ones(3)
sigma_epoch2 = 0.08 * np.ones(3)
# vectorise the compression distance function
# vec_ncd=np.vectorize(cp_utils.ncd)
#%%
# generate spikes
# spikes=[]
#spikes_epoch1=cp_utils.brian_spikes(N=num_tot_neurons, duration=duration_epochs)
spikes_epoch1_cluster1 = brian_spikes(N=n_epoch1[0], tau=tau_epoch1[
0], duration=duration_epochs, sigma=sigma_epoch1[0])
spikes_epoch1_cluster2 = brian_spikes(N=n_epoch1[1], tau=tau_epoch1[
1], duration=duration_epochs, sigma=sigma_epoch1[1])
spikes_epoch1_cluster3 = brian_spikes(N=n_epoch1[2], tau=tau_epoch1[
2], duration=duration_epochs, sigma=sigma_epoch1[2])
# spikes.extend([[spikes_epoch1]])
spikes_epoch2_cluster1 = brian_spikes(N=n_epoch2[0], tau=tau_epoch2[
0], duration=duration_epochs, sigma=sigma_epoch2[0])
spikes_epoch2_cluster2 = brian_spikes(N=n_epoch2[1], tau=tau_epoch2[
1], duration=duration_epochs, sigma=sigma_epoch2[1])
spikes_epoch2_cluster3 = brian_spikes(N=n_epoch2[2], tau=tau_epoch2[
2], duration=duration_epochs, sigma=sigma_epoch2[2])
duration = duration_epochs
#discrete_epoch1=cp_utils.brian_spikes_to_discrete_array(spikes_epoch1, duration, res)
def plot_spikes():
plt.figure(figsize=(10, 5))
plt.plot((spikes_epoch1_cluster1.t), spikes_epoch1_cluster1.i, '.k')
plt.plot((spikes_epoch1_cluster2.t),
spikes_epoch1_cluster2.i + sum(n_epoch1[:1]), '.k')
plt.plot((spikes_epoch1_cluster3.t),
spikes_epoch1_cluster3.i + sum(n_epoch1[:2]), '.k')
plt.plot((spikes_epoch2_cluster1.t + duration_epochs * ms),
spikes_epoch2_cluster1.i, '.k')
plt.plot((spikes_epoch2_cluster2.t + duration_epochs * ms),
spikes_epoch2_cluster2.i + sum(n_epoch2[:1]), '.k')
plt.plot((spikes_epoch2_cluster3.t + duration_epochs * ms),
spikes_epoch2_cluster3.i + sum(n_epoch2[:2]), '.k')
plt.xlabel('Time')
plt.ylabel('Neuron')
plot_spikes()
discrete_epoch1_cluster1 = brian_spikes_to_discrete_array(
spikes_epoch1_cluster1, duration, res)
discrete_epoch1_cluster2 = brian_spikes_to_discrete_array(
spikes_epoch1_cluster2, duration, res)
discrete_epoch1_cluster3 = brian_spikes_to_discrete_array(
spikes_epoch1_cluster3, duration, res)
discrete_epoch1 = np.concatenate(
(discrete_epoch1_cluster1, discrete_epoch1_cluster2, discrete_epoch1_cluster3), axis=0)
discrete_epoch1.shape
discrete_epoch2_cluster1 = brian_spikes_to_discrete_array(
spikes_epoch2_cluster1, duration, res)
discrete_epoch2_cluster2 = brian_spikes_to_discrete_array(
spikes_epoch2_cluster2, duration, res)
discrete_epoch2_cluster3 = brian_spikes_to_discrete_array(
spikes_epoch2_cluster3, duration, res)
discrete_epoch2 = np.concatenate(
(discrete_epoch2_cluster1, discrete_epoch2_cluster2, discrete_epoch2_cluster3), axis=0)
circuit_binary_array = np.concatenate(
(discrete_epoch1, discrete_epoch2), axis=1)
return circuit_binary_array
|
curious-abhinav/change-point
|
src/features/build_features.py
|
Python
|
mit
| 8,628
|
[
"Brian",
"NEURON"
] |
590c44efceb617d0617282d89d3b0680037870c3542c049c449a1454dc7a8093
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Libraries for the Stackoverflow dataset for federated learning simulation."""
import collections
import json
import os.path
from typing import Optional
import tensorflow as tf
from tensorflow_federated.python.simulation.datasets import download
from tensorflow_federated.python.simulation.datasets import from_tensor_slices_client_data
from tensorflow_federated.python.simulation.datasets import sql_client_data
def _add_proto_parsing(dataset: tf.data.Dataset) -> tf.data.Dataset:
"""Add parsing of the tf.Example proto to the dataset pipeline."""
def parse_proto(tensor_proto):
parse_spec = collections.OrderedDict(
creation_date=tf.io.FixedLenFeature(dtype=tf.string, shape=()),
score=tf.io.FixedLenFeature(dtype=tf.int64, shape=()),
tags=tf.io.FixedLenFeature(dtype=tf.string, shape=()),
title=tf.io.FixedLenFeature(dtype=tf.string, shape=()),
tokens=tf.io.FixedLenFeature(dtype=tf.string, shape=()),
type=tf.io.FixedLenFeature(dtype=tf.string, shape=()))
parsed_features = tf.io.parse_example(tensor_proto, parse_spec)
return collections.OrderedDict(
(key, parsed_features[key]) for key in parse_spec.keys())
return dataset.map(parse_proto, num_parallel_calls=tf.data.AUTOTUNE)
def load_data(cache_dir=None):
"""Loads the federated Stack Overflow dataset.
Downloads and caches the dataset locally. If previously downloaded, tries to
load the dataset from cache.
This dataset is derived from the Stack Overflow Data hosted by kaggle.com and
available to query through Kernels using the BigQuery API:
https://www.kaggle.com/stackoverflow/stackoverflow. The Stack Overflow Data
is licensed under the Creative Commons Attribution-ShareAlike 3.0 Unported
License. To view a copy of this license, visit
http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to
Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
The data consists of the body text of all questions and answers. The bodies
were parsed into sentences, and any user with fewer than 100 sentences was
expunged from the data. Minimal preprocessing was performed as follows:
1. Lowercase the text,
2. Unescape HTML symbols,
3. Remove non-ascii symbols,
4. Separate punctuation as individual tokens (except apostrophes and hyphens),
5. Removing extraneous whitespace,
6. Replacing URLS with a special token.
In addition the following metadata is available:
1. Creation date
2. Question title
3. Question tags
4. Question score
5. Type ('question' or 'answer')
The data is divided into three sets:
- Train: Data before 2018-01-01 UTC except the held-out users. 342,477
unique users with 135,818,730 examples.
- Held-out: All examples from users with user_id % 10 == 0 (all dates).
38,758 unique users with 16,491,230 examples.
- Test: All examples after 2018-01-01 UTC except from held-out users.
204,088 unique users with 16,586,035 examples.
The `tf.data.Datasets` returned by
`tff.simulation.datasets.ClientData.create_tf_dataset_for_client` will yield
`collections.OrderedDict` objects at each iteration, with the following keys
and values, in lexicographic order by key:
- `'creation_date'`: a `tf.Tensor` with `dtype=tf.string` and shape []
containing the date/time of the question or answer in UTC format.
- `'score'`: a `tf.Tensor` with `dtype=tf.int64` and shape [] containing
the score of the question.
- `'tags'`: a `tf.Tensor` with `dtype=tf.string` and shape [] containing
the tags of the question, separated by '|' characters.
- `'title'`: a `tf.Tensor` with `dtype=tf.string` and shape [] containing
the title of the question.
- `'tokens'`: a `tf.Tensor` with `dtype=tf.string` and shape []
containing the tokens of the question/answer, separated by space (' ')
characters.
- `'type'`: a `tf.Tensor` with `dtype=tf.string` and shape []
containing either the string 'question' or 'answer'.
Args:
cache_dir: (Optional) directory to cache the downloaded file. If `None`,
caches in Keras' default cache directory.
Returns:
Tuple of (train, held_out, test) where the tuple elements are
`tff.simulation.datasets.ClientData` objects.
"""
database_path = download.get_compressed_file(
origin='https://storage.googleapis.com/tff-datasets-public/stackoverflow.sqlite.lzma',
cache_dir=cache_dir)
train_client_data = sql_client_data.SqlClientData(
database_path, 'train').preprocess(_add_proto_parsing)
heldout_client_data = sql_client_data.SqlClientData(
database_path, 'heldout').preprocess(_add_proto_parsing)
test_client_data = sql_client_data.SqlClientData(
database_path, 'test').preprocess(_add_proto_parsing)
return train_client_data, heldout_client_data, test_client_data
def load_word_counts(cache_dir=None, vocab_size: Optional[int] = None):
"""Loads the word counts for the Stack Overflow dataset.
Args:
cache_dir: (Optional) directory to cache the downloaded file. If `None`,
caches in Keras' default cache directory.
vocab_size: (Optional) when specified, only load the first `vocab_size`
unique words in the vocab file (i.e. the most frequent `vocab_size`
words).
Returns:
A collections.OrderedDict where the keys are string tokens, and the values
are the counts of unique users who have at least one example in the training
set containing that token in the body text.
Raises:
TypeError if vocab_size is not None or int.
ValueError if vocab_size is not None but <= 0.
"""
if vocab_size is not None:
if not isinstance(vocab_size, int):
raise TypeError(
f'vocab_size should be None or int, got {type(vocab_size)}.')
if vocab_size <= 0:
raise ValueError(f'vocab_size must be positive, got {vocab_size}.')
path = tf.keras.utils.get_file(
'stackoverflow.word_count.tar.bz2',
origin='https://storage.googleapis.com/tff-datasets-public/stackoverflow.word_count.tar.bz2',
file_hash='1dc00256d6e527c54b9756d968118378ae14e6692c0b3b6cad470cdd3f0c519c',
hash_algorithm='sha256',
extract=True,
archive_format='tar',
cache_dir=cache_dir)
word_counts = collections.OrderedDict()
dir_path = os.path.dirname(path)
file_path = os.path.join(dir_path, 'stackoverflow.word_count')
with open(file_path) as f:
for line in f:
word, count = line.split()
word_counts[word] = int(count)
if vocab_size is not None and len(word_counts) >= vocab_size:
break
return word_counts
def load_tag_counts(cache_dir=None):
"""Loads the tag counts for the Stack Overflow dataset.
Args:
cache_dir: (Optional) directory to cache the downloaded file. If `None`,
caches in Keras' default cache directory.
Returns:
A collections.OrderedDict where the keys are string tags, and the values
are the counts of unique users who have at least one example in the training
set containing with that tag. The dictionary items are in decreasing order
of tag frequency.
"""
path = tf.keras.utils.get_file(
'stackoverflow.tag_count.tar.bz2',
origin='https://storage.googleapis.com/tff-datasets-public/stackoverflow.tag_count.tar.bz2',
file_hash='6fe281cec490d9384a290d560072438e7e2b377bbb823876ce7bd6f82696772d',
hash_algorithm='sha256',
extract=True,
archive_format='tar',
cache_dir=cache_dir)
dir_path = os.path.dirname(path)
file_path = os.path.join(dir_path, 'stackoverflow.tag_count')
with open(file_path) as f:
tag_counts = json.load(f)
return collections.OrderedDict(
sorted(tag_counts.items(), key=lambda item: item[1], reverse=True))
def get_synthetic():
"""Returns a small synthetic dataset for testing.
Provides two clients, each client with only 3 examples. The examples are
derived from a fixed set of examples in the larger dataset, but are not exact
copies.
Returns:
A `tff.simulation.datasets.ClientData` object that matches the
characteristics (other than size) of those provided by
`tff.simulation.datasets.stackoverflow.load_data`.
"""
return from_tensor_slices_client_data.TestClientData(
create_synthetic_data_dictionary())
def create_synthetic_data_dictionary():
return {
'synthetic_1':
collections.OrderedDict(
creation_date=[
b'2010-01-08 09:34:05 UTC',
b'2008-08-10 08:28:52.1 UTC',
b'2008-08-10 08:28:52.1 UTC',
],
score=tf.constant([172, 80, 80], dtype=tf.int64),
tags=[
b'sql|sql-server|aggregate-functions|median',
b'css|cross-browser|rounded-corners|css3',
b'css|cross-browser|rounded-corners|css3',
],
title=[
b'function to calculate median in sql server',
b'creating rounded corners using css',
b'creating rounded corners using css',
],
tokens=[
b"if you're using sql 2005 or better this is a nice , simple-ish median calculation for a single column in a table :",
b'css3 does finally define the',
b"which is exactly how you'd want it to work .",
],
type=[
b'answer',
b'question',
b'answer',
]),
'synthetic_2':
collections.OrderedDict(
creation_date=[
b'2008-08-05 19:01:55.2 UTC',
b'2010-07-15 18:15:58.5 UTC',
b'2010-07-15 18:15:58.5 UTC',
],
score=tf.constant([3, 12, -1], dtype=tf.int64),
tags=[
b'git|svn|version-control|language-agnostic|dvcs',
b'android|android-emulator|monkey',
b'android|android-emulator|monkey',
],
title=[
b'getting started with version control',
b'writing to / system / framework in emulator',
b'writing to / system / framework in emulator',
],
tokens=[
b'if you are on mac osx , i found <URL> " > versions to be an incredible ( free ) gui front-end to svn .',
b'edit :',
b'thanks .',
],
type=[
b'answer',
b'question',
b'question',
],
),
'synthetic_3':
collections.OrderedDict(
creation_date=[
b'2008-10-30 16:49:26.9 UTC',
b'2008-10-30 16:49:26.9 UTC',
],
score=tf.constant([1, 1], dtype=tf.int64),
tags=[
b'vb . net|design-patterns|iterator|yield',
b'vb . net|design-patterns|iterator|yield',
],
title=[
b'iterator pattern in vb . net ( c # would use yield ! )',
b'iterator pattern in vb . net ( c # would use yield ! )',
],
tokens=[
b'edit :',
b'the spec is available here .',
],
type=[
b'answer',
b'answer',
],
)
}
def get_synthetic_word_counts():
"""Returns a small dictionary of word counts.
Specifically, this provides the aggregate word counts for all sentences in
`tff.simulation.datasets.stackoverflow.get_synthetic`.
Returns:
An ordered dictionary mapping words to their respective count.
"""
return collections.OrderedDict([('.', 4), ('is', 3), ('a', 3), (':', 3),
('to', 3), ('if', 2), (',', 2), ('the', 2),
('edit', 2), ("you're", 1), ('using', 1),
('sql', 1), ('2005', 1), ('or', 1),
('better', 1), ('this', 1), ('nice', 1),
('simple-ish', 1), ('median', 1),
('calculation', 1), ('for', 1), ('single', 1),
('column', 1), ('in', 1), ('table', 1),
('css3', 1), ('does', 1), ('finally', 1),
('define', 1), ('which', 1), ('exactly', 1),
('how', 1), ("you'd", 1), ('want', 1),
('it', 1), ('work', 1),
('you', 1), ('are', 1), ('on', 1), ('mac', 1),
('osx', 1), ('i', 1), ('found', 1),
('<URL>', 1), ('"', 1), ('>', 1),
('versions', 1), ('be', 1), ('an', 1),
('incredible', 1), ('(', 1), ('free', 1),
(')', 1), ('gui', 1), ('front-end', 1),
('svn', 1), ('thanks', 1), ('spec', 1),
('available', 1), ('here', 1)])
def get_synthetic_tag_counts():
"""Returns a small dictionary of tag counts.
Specifically, this provides the aggregate tag counts for all tags in
`tff.simulation.datasets.stackoverflow.get_synthetic`.
Returns:
An ordered dictionary mapping tags to their respective count.
"""
return collections.OrderedDict([('css', 2), ('cross-browser', 2),
('rounded-corners', 2), ('css3', 2),
('android', 2), ('android-emulator', 2),
('monkey', 2), ('vb . net', 2),
('design-patterns', 2), ('iterator', 2),
('yield', 2), ('sql', 1), ('sql-server', 1),
('aggregate-functions', 1), ('median', 1),
('git', 1), ('svn', 1),
('version-control', 1),
('language-agnostic', 1), ('dvcs', 1)])
|
tensorflow/federated
|
tensorflow_federated/python/simulation/datasets/stackoverflow.py
|
Python
|
apache-2.0
| 14,910
|
[
"VisIt"
] |
e36b692c4df4e0ec44ccb2fc42e2dfe104710e199f50d12d0ec0a6ab562c6ea0
|
#!/usr/bin/env python
from multiprocessing import Pool
import time
import os
import sys
import argparse
# Copyright(C) 2015 David Ream
# Released under GPL version 3 licence. http://www.gnu.org/licenses/lgpl.html
# Do not remove this comment
# This exists to make the main function easier to read. It contains code to run the argument parser, and does nothing else.
def parser_code():
parser = argparse.ArgumentParser(description='Run BLAST on a set of searchable database using a query that is specified by the user. Results will be stored by the accession number of the database. Currently this script will only accept protein queries, but I will update to automatically run on all types of genes, as most of the information needed for this behavior exists.')
parser.add_argument("-d", "--database_folder", dest="database_folder", metavar="DIRECTORY", default='./db/',
help="Folder containing all BLAST searchable databases to be used by the program.")
parser.add_argument("-o", "--outfolder", dest="outfolder", metavar="DIRECTORY", default='./blast_result/',
help="Folder where the BLAST results will be stored.")
parser.add_argument("-f", "--filter", dest="filter", metavar="FILE", default='NONE',
help="File restrictiong which accession numbers this script will process. If no file is provided, filtering is not performed.")
parser.add_argument("-n", "--num_proc", dest="num_proc", metavar="INT", default = os.sysconf("SC_NPROCESSORS_CONF"), type=int,
help="Number of processors that you want this script to run on. The default is every CPU that the system has.")
# Fix this option, ultimately it should be a folder, that reads in a gene block file with the name of gene block(s) with a list of gene names and types.
# The program will then take two files, protein and nucleic acid queries and run them. (This would require that there are two seperate, and complementary
# blast databases within this folder. My desire is that it would be two subfolders, 'protein/' and 'rna/' which house these sets of data.
parser.add_argument("-u", "--query", dest="query", default='gene_block_query.fa', metavar="FILE",
help="A file that contains the BLAST query for every gene of interest in the dataset.")
parser.add_argument("-e", "--eval", dest="eval", default='1e-6', metavar="FLOAT", type=float,
help="eval for the BLAST search.")
parser.add_argument("-q", "--quiet", dest="quiet", action="store_true", default=False,
help="Suppresses most program text outputs.")
return parser.parse_args()
def check_options(parsed_args):
# section of code that checks the database entry
if os.path.isdir(parsed_args.database_folder):
database_folder = parsed_args.database_folder
else:
print(("The database directory %s does not exist." % parsed_args.database_folder))
sys.exit()
# if the directory that the user specifies does not exist, then the program makes it for them.
if not os.path.isdir(parsed_args.outfolder):
os.makedirs(parsed_args.outfolder)
outfolder = parsed_args.outfolder
# Check the filter file
if parsed_args.filter == 'NONE' or os.path.exists(parsed_args.filter):
filter_file = parsed_args.filter
else:
print(("The filter file %s does not exist." % parsed_args.filter))
sys.exit()
# section of code that deals determining the number of CPU cores that will be used by the program
if parsed_args.num_proc > os.sysconf("SC_NPROCESSORS_CONF"):
num_proc = os.sysconf("SC_NPROCESSORS_CONF")
elif parsed_args.num_proc < 1:
num_proc = 1
else:
num_proc = int(parsed_args.num_proc)
# Check the query file
if os.path.exists(parsed_args.query):
query_file = parsed_args.query
else:
print(("The query file %s does not exist." % parsed_args.query))
sys.exit()
e_val = parsed_args.eval
quiet = parsed_args.quiet
return database_folder, outfolder, filter_file, num_proc, query_file, e_val, quiet
#this function will return all of the files that are in a directory. os.walk is recursive traversal.
def returnRecursiveDirFiles(root_dir):
result = []
for path, dir_name, flist in os.walk(root_dir):
for f in flist:
fname = os.path.join(path, f)
if os.path.isfile(fname):
result.append(fname)
return result
# This code right now only deals with protein, but I will add functionality later for nucleotides.
# Just moving the project along here, but this is a critical flaw moving forward.
def do_parallel_blast(arg_tuple):
db, query_file, blast_result_folder, num_processors, eval_threshold = arg_tuple
out_file = "%s%s.txt" % (blast_result_folder, db.split('/')[-1].split('.')[0])
#print "db", db
#print "got here"
#cmd = "blastall -p tblastn -a %i -i %s -d %s -e %s -o %s -m 9" % (num_processors, query_file, db, eval_threshold, out_file)
#cmd = "blastall -p tblastn -a %i -i %s -d %s -e %s -o %s -m 9" % (1, query_file, db, eval_threshold, out_file)
#cmd = "blastall -p blastp -a %i -i %s -d %s -e %s -o %s -m 9" % (1, query_file, db, eval_threshold, out_file)
cmd ="blastp -num_threads %i -query %s -db %s -evalue %s -out %s -outfmt 6 -seg yes" % (1, query_file, db, eval_threshold, out_file)
#print cmd
os.system( cmd )
#def parallel_blast(infile, query, folder, num_proc, e_val = '1e-10'):
def parallel_blast(database_folder, outfolder, filter_file, num_proc, query_file, e_val):
# you kinda have to trust me here, but having blast run on as many threads per CPU as you have total processors is fastest
# I have no idea why this is... ugh.
unfiltered_db_list = [i for i in returnRecursiveDirFiles(database_folder) if i.split('/')[-1].split('.')[-1] == 'ffc']
# print 'filter_file',filter_file
if filter_file == '' or filter_file == 'NONE' or len(filter_file)<5:
db_list = unfiltered_db_list
else:
filter_list = [i.strip() for i in open(filter_file).readlines()]
db_list = [i for i in unfiltered_db_list if i.split('/')[-1].split('.')[0] in filter_list]
#print len(unfiltered_db_list), len(db_list)
#blast_arg_list = [(i, query_file, outfolder, num_proc, e_val) for i in db_list]
blast_arg_list = [(i, query_file, outfolder, 1, e_val) for i in db_list]
pool = Pool(processes = num_proc)
pool.map(do_parallel_blast, blast_arg_list)
def main():
start = time.time()
parsed_args = parser_code()
database_folder, outfolder, filter_file, num_proc, query_file, e_val, quiet = check_options(parsed_args)
if not quiet:
print((database_folder, outfolder, filter_file, num_proc, query_file, e_val, quiet))
#parallel_blast(infile, query, folder, num_proc, e_val)
parallel_blast(database_folder, outfolder, filter_file, num_proc, query_file, e_val)
if not quiet:
print((time.time() - start))
# ./blast_script.py -d ./db/ -o ./blast_result/ -q ./gene_block_query.fa
if __name__ == '__main__':
main()
|
nguyenngochuy91/Ancestral-Blocks-Reconstruction
|
blast_script.py
|
Python
|
gpl-3.0
| 7,346
|
[
"BLAST"
] |
58edb7ccc2788081d305e1164445c0e3c33a6e510ecb2f0eba202dfd45a846cc
|
from os.path import join
import os
import numpy as n
import glob
import sys
import time
import astropy.io.fits as fits
from os.path import join
import astropy.cosmology as co
cosmo = co.Planck13
import astropy.io.fits as fits
# for one galaxy spectrum
import GalaxySpectrumFIREFLY as gs
import StellarPopulationModel as spm
init_cat=join(os.environ['DEEP2_DIR'], "catalogs", "zcat.deep2.dr4.v4.LFcatalogTC.Planck15.fits")
summary_catalog = join(os.environ['DEEP2_DIR'], "catalogs", "zcat.deep2.dr4.v4.LFcatalogTC.Planck15.spm.fits")
hdu_orig_table = fits.open(init_cat)
orig_table = hdu_orig_table[1].data
orig_cols = orig_table.columns
dV=-9999.99
def get_table_entry_full(hduSPM):
# print "gets entry"
hduSPM.header
prefix = hduSPM.header['IMF'] + "_" + hduSPM.header['library'] + "_"
#print prefix
headerA =" "+prefix+"age_lightW "+prefix+"age_lightW_err_plus "+prefix+"age_lightW_err_minus "+prefix+"metallicity_lightW "+prefix+"metallicity_lightW_err_plus "+prefix+"metallicity_lightW_err_minus "+prefix+"age_massW "+prefix+"age_massW_err_plus "+prefix+"age_massW_err_minus "+prefix+"metallicity_massW "+prefix+"metallicity_massW_err_plus "+prefix+"metallicity_massW_err_minus "+prefix+"stellar_mass "+prefix+"stellar_mass_err_plus "+prefix+"stellar_mass_err_minus "+prefix+"spm_EBV "+prefix+"nComponentsSSP "+prefix+"chi2 "+prefix+"ndof "
table_entry = [10**hduSPM.header['age_lightW']
, 10**hduSPM.header['age_lightW_up']
, 10**hduSPM.header['age_lightW_low']
, hduSPM.header['metallicity_lightW']
, hduSPM.header['metallicity_lightW_up']
, hduSPM.header['metallicity_lightW_low']
, 10**hduSPM.header['age_massW']
, 10**hduSPM.header['age_massW_up']
, 10**hduSPM.header['age_massW_low']
, hduSPM.header['metallicity_massW']
, hduSPM.header['metallicity_massW_up']
, hduSPM.header['metallicity_massW_low']
, hduSPM.header['stellar_mass']
, hduSPM.header['stellar_mass_up']
, hduSPM.header['stellar_mass_low']
, hduSPM.header['EBV']
, hduSPM.header['ssp_number']
, hduSPM.header['chi2']
, hduSPM.header['ndof']
]
#print hduSPM.header
for iii in n.arange(hduSPM.header['ssp_number']):
table_entry.append( hduSPM.header['stellar_mass_ssp_'+str(iii)] )
table_entry.append( hduSPM.header['age_ssp_'+str(iii)] )
table_entry.append( hduSPM.header['metal_ssp_'+str(iii)] )
table_entry.append( hduSPM.header['SFR_ssp_'+str(iii)] )
table_entry.append( hduSPM.header['weightMass_ssp_'+str(iii)] )
table_entry.append( hduSPM.header['weightLight_ssp_'+str(iii)] )
headerA += ' '+prefix+'stellar_mass_ssp_'+str(iii) + ' '+prefix+'ndofage_ssp_'+str(iii) + ' '+prefix+'metal_ssp_'+str(iii) + ' '+prefix+'SFR_ssp_'+str(iii) + ' '+prefix+'weightMass_ssp_'+str(iii) + ' '+prefix+'weightLight_ssp_'+str(iii)
if hduSPM.header['ssp_number']<8 :
for iii in n.arange(hduSPM.header['ssp_number'], 8, 1):
table_entry.append([dV, dV, dV, dV, dV, dV])
headerA += ' '+prefix+'stellar_mass_ssp_'+str(iii) + ' '+prefix+'age_ssp_'+str(iii) + ' '+prefix+'metal_ssp_'+str(iii) + ' '+prefix+'SFR_ssp_'+str(iii) + ' '+prefix+'weightMass_ssp_'+str(iii) + ' '+prefix+'weightLight_ssp_'+str(iii)
table_entry = n.array( n.hstack((table_entry)) )
#print table_entry
return n.hstack((table_entry)), headerA
N_cols = 603
table_all = []
for mask, objno in zip(orig_table['MASK'], orig_table['OBJNO']):
headers = ""
fitFile = join( os.environ['DEEP2_DIR'], 'stellarpop', "spFly-deep2-"+str(mask)+"-"+str(objno)+".fits")
if os.path.isfile(fitFile):
hdus = fits.open(fitFile)
if len(hdus)==10:
table_entry = []
for ii in range(1,len(hdus)):
table_entry_i, headers_i = get_table_entry_full( hdus[ii] )
table_entry.append(table_entry_i)
headers += headers_i
header_out = headers
else:
print fitFile
table_all.append(n.ones(N_cols)*dV)
else:
#print fitFile
table_all.append(n.ones(N_cols)*dV)
newDat = n.transpose(table_all)
c0 = fits.Column(name="MASK", format='I', array=orig_table['MASK'])
c1 = fits.Column(name="OBJNO", format='I', array=orig_table['OBJNO'])
all_cols = [c0, c1]
for data_array, head in zip(newDat, header_out.split()):
all_cols.append(fits.Column(name=head, format='D', array=data_array))
new_cols = fits.ColDefs(all_cols)
hdu = fits.BinTableHDU.from_columns(orig_cols + new_cols)
if os.path.isfile(summary_catalog):
os.remove(summary_catalog)
hdu.writeto(summary_catalog)
|
JohanComparat/pySU
|
spm/bin_deep_surveys/create_summary_table_DEEP2.py
|
Python
|
cc0-1.0
| 4,443
|
[
"Galaxy"
] |
3dee5f01d26e2e72c3c28b1ed88a083de48faa39a0e6b8b19934a5941bbe6da1
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import unittest
import ctypes
import numpy
import numpy as np
from pyscf.pbc import gto as pgto
L = 1.5
n = 41
cl = pgto.Cell()
cl.build(
a = [[L,0,0], [0,L,0], [0,0,L]],
mesh = [n,n,n],
atom = 'He %f %f %f' % ((L/2.,)*3),
basis = 'ccpvdz')
numpy.random.seed(1)
cl1 = pgto.Cell()
cl1.build(a = numpy.random.random((3,3)).T,
precision = 1e-9,
mesh = [n,n,n],
atom ='''He .1 .0 .0
He .5 .1 .0
He .0 .5 .0
He .1 .3 .2''',
basis = 'ccpvdz')
def finger(a):
w = numpy.cos(numpy.arange(a.size))
return numpy.dot(a.ravel(), w)
class KnownValues(unittest.TestCase):
def test_nimgs(self):
self.assertTrue(list(cl.get_nimgs(9e-1)), [1,1,1])
self.assertTrue(list(cl.get_nimgs(1e-2)), [2,2,2])
self.assertTrue(list(cl.get_nimgs(1e-4)), [3,3,3])
self.assertTrue(list(cl.get_nimgs(1e-6)), [4,4,4])
self.assertTrue(list(cl.get_nimgs(1e-9)), [5,5,5])
def test_Gv(self):
a = cl1.get_Gv()
self.assertAlmostEqual(finger(a), -99.791927068519939, 10)
def test_SI(self):
a = cl1.get_SI()
self.assertAlmostEqual(finger(a), (16.506917823339265+1.6393578329869585j), 10)
np.random.seed(2)
Gv = np.random.random((5,3))
a = cl1.get_SI(Gv)
self.assertAlmostEqual(finger(a), (0.65237631847195221-1.5736011413431059j), 10)
def test_mixed_basis(self):
cl = pgto.Cell()
cl.build(
a = [[L,0,0], [0,L,0], [0,0,L]],
mesh = [n,n,n],
atom = 'C1 %f %f %f; C2 %f %f %f' % ((L/2.,)*6),
basis = {'C1':'ccpvdz', 'C2':'gthdzv'})
def test_dumps_loads(self):
cl1.loads(cl1.dumps())
def test_get_lattice_Ls(self):
#self.assertEqual(cl1.get_lattice_Ls([0,0,0]).shape, (1 , 3))
#self.assertEqual(cl1.get_lattice_Ls([1,1,1]).shape, (13 , 3))
#self.assertEqual(cl1.get_lattice_Ls([2,2,2]).shape, (57 , 3))
#self.assertEqual(cl1.get_lattice_Ls([3,3,3]).shape, (137, 3))
#self.assertEqual(cl1.get_lattice_Ls([4,4,4]).shape, (281, 3))
#self.assertEqual(cl1.get_lattice_Ls([5,5,5]).shape, (493, 3))
cell = pgto.M(atom = '''
C 0.000000000000 0.000000000000 0.000000000000
C 1.685068664391 1.685068664391 1.685068664391''',
unit='B',
basis = 'gth-dzvp',
pseudo = 'gth-pade',
a = '''
0.000000000 3.370137329 3.370137329
3.370137329 0.000000000 3.370137329
3.370137329 3.370137329 0.000000000''',
mesh = [15]*3)
rcut = max([cell.bas_rcut(ib, 1e-8) for ib in range(cell.nbas)])
self.assertEqual(cell.get_lattice_Ls(rcut=rcut).shape, (1097, 3))
rcut = max([cell.bas_rcut(ib, 1e-9) for ib in range(cell.nbas)])
self.assertEqual(cell.get_lattice_Ls(rcut=rcut).shape, (1241, 3))
def test_ewald(self):
cell = pgto.Cell()
cell.unit = 'B'
Lx = Ly = Lz = 5.
cell.a = numpy.diag([Lx,Ly,Lz])
cell.mesh = numpy.array([41]*3)
cell.atom = [['He', (2, 0.5*Ly, 0.5*Lz)],
['He', (3, 0.5*Ly, 0.5*Lz)]]
cell.basis = {'He': [[0, (1.0, 1.0)]]}
cell.verbose = 5
cell.output = '/dev/null'
cell.build()
ew_cut = (20,20,20)
self.assertAlmostEqual(cell.ewald(.05, 100), -0.468640671931, 9)
self.assertAlmostEqual(cell.ewald(0.1, 100), -0.468640671931, 9)
self.assertAlmostEqual(cell.ewald(0.2, 100), -0.468640671931, 9)
self.assertAlmostEqual(cell.ewald(1 , 100), -0.468640671931, 9)
def check(precision, eta_ref, ewald_ref):
ew_eta0, ew_cut0 = cell.get_ewald_params(precision, mesh=[41]*3)
self.assertAlmostEqual(ew_eta0, eta_ref)
self.assertAlmostEqual(cell.ewald(ew_eta0, ew_cut0), ewald_ref, 9)
check(0.001, 3.15273336976, -0.468640679947)
check(1e-05, 2.77596886114, -0.468640671968)
check(1e-07, 2.50838938833, -0.468640671931)
check(1e-09, 2.30575091612, -0.468640671931)
cell = pgto.Cell()
numpy.random.seed(10)
cell.a = numpy.random.random((3,3))*2 + numpy.eye(3) * 2
cell.mesh = [41]*3
cell.atom = [['He', (1, 1, 2)],
['He', (3, 2, 1)]]
cell.basis = {'He': [[0, (1.0, 1.0)]]}
cell.verbose = 5
cell.output = '/dev/null'
cell.build()
self.assertAlmostEqual(cell.ewald(1, 20), -2.3711356723457615, 9)
self.assertAlmostEqual(cell.ewald(2, 10), -2.3711356723457615, 9)
self.assertAlmostEqual(cell.ewald(2, 5), -2.3711356723457615, 9)
def test_ewald_2d_inf_vacuum(self):
cell = pgto.Cell()
cell.a = numpy.eye(3) * 4
cell.atom = 'He 0 0 0; He 0 1 1'
cell.unit = 'B'
cell.mesh = [9,9,60]
cell.verbose = 0
cell.dimension = 2
cell.low_dim_ft_type = 'inf_vacuum'
cell.rcut = 3.6
cell.build()
self.assertAlmostEqual(cell.ewald(), 3898143.7149599474, 4)
a = numpy.eye(3) * 3
a[0,1] = .2
c = pgto.M(atom='H 0 0.1 0; H 1.1 2.0 0; He 1.2 .3 0.2',
a=a, dimension=2, verbose=0)
self.assertAlmostEqual(c.ewald(), -3.0902098018260418, 9)
def test_ewald_1d_inf_vacuum(self):
cell = pgto.Cell()
cell.a = numpy.eye(3) * 4
cell.atom = 'He 0 0 0; He 0 1 1'
cell.unit = 'B'
cell.mesh = [9,60,60]
cell.verbose = 0
cell.dimension = 1
cell.low_dim_ft_type = 'inf_vacuum'
cell.rcut = 3.6
cell.build()
self.assertAlmostEqual(cell.ewald(), 70.875156940393225, 8)
def test_ewald_0d_inf_vacuum(self):
cell = pgto.Cell()
cell.a = numpy.eye(3)
cell.atom = 'He 0 0 0; He 0 1 1'
cell.unit = 'B'
cell.mesh = [60] * 3
cell.verbose = 0
cell.dimension = 0
cell.low_dim_ft_type = 'inf_vacuum'
cell.build()
eref = cell.to_mol().energy_nuc()
self.assertAlmostEqual(cell.ewald(), eref, 2)
def test_ewald_2d(self):
cell = pgto.Cell()
cell.a = numpy.eye(3) * 4
cell.atom = 'He 0 0 0; He 0 1 1'
cell.unit = 'B'
cell.mesh = [9,9,60]
cell.verbose = 0
cell.dimension = 2
cell.rcut = 3.6
cell.build()
self.assertAlmostEqual(cell.ewald(), -5.1194779101355596, 9)
# def test_ewald_1d(self):
# cell = pgto.Cell()
# cell.a = numpy.eye(3) * 4
# cell.atom = 'He 0 0 0; He 0 1 1'
# cell.unit = 'B'
# cell.mesh = [9,60,60]
# cell.verbose = 0
# cell.dimension = 1
# cell.rcut = 3.6
# cell.build()
# self.assertAlmostEqual(cell.ewald(), 70.875156940393225, 8)
#
# def test_ewald_0d(self):
# cell = pgto.Cell()
# cell.a = numpy.eye(3)
# cell.atom = 'He 0 0 0; He 0 1 1'
# cell.unit = 'B'
# cell.mesh = [60] * 3
# cell.verbose = 0
# cell.dimension = 0
# cell.build()
# eref = cell.to_mol().energy_nuc()
# self.assertAlmostEqual(cell.ewald(), eref, 2)
def test_pbc_intor(self):
numpy.random.seed(12)
kpts = numpy.random.random((4,3))
kpts[0] = 0
self.assertEqual(list(cl1.nimgs), [32,21,19])
s0 = cl1.pbc_intor('int1e_ovlp_sph', hermi=0, kpts=kpts)
self.assertAlmostEqual(finger(s0[0]), 492.30658304804126, 4)
self.assertAlmostEqual(finger(s0[1]), 37.812956255000756-28.972806230140314j, 4)
self.assertAlmostEqual(finger(s0[2]),-26.113285893260819-34.448501789693566j, 4)
self.assertAlmostEqual(finger(s0[3]), 186.58921213429491+123.90133823378201j, 4)
s1 = cl1.pbc_intor('int1e_ovlp_sph', hermi=1, kpts=kpts[0])
self.assertAlmostEqual(finger(s1), 492.30658304804126, 4)
def test_ecp_pseudo(self):
from pyscf.pbc.gto import ecp
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'Cu 0 0 1; Na 0 1 0',
ecp = {'Na':'lanl2dz'},
pseudo = {'Cu': 'gthbp'})
self.assertTrue(all(cell._ecpbas[:,0] == 1))
cell = pgto.Cell()
cell.a = numpy.eye(3) * 8
cell.mesh = [11] * 3
cell.atom='''Na 0. 0. 0.
H 0. 0. 1.'''
cell.basis={'Na':'lanl2dz', 'H':'sto3g'}
cell.ecp = {'Na':'lanl2dz'}
cell.build()
v1 = ecp.ecp_int(cell)
mol = cell.to_mol()
v0 = mol.intor('ECPscalar_sph')
self.assertAlmostEqual(abs(v0 - v1).sum(), 0.029005926114411891, 8)
def test_ecp_keyword_in_pseudo(self):
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'S 0 0 1',
ecp = 'lanl2dz',
pseudo = {'O': 'gthbp', 'Cu': 'stuttgartrsc'})
self.assertEqual(cell.ecp, 'lanl2dz')
self.assertEqual(cell.pseudo, {'O': 'gthbp'})
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'S 0 0 1',
ecp = {'na': 'lanl2dz'},
pseudo = {'O': 'gthbp', 'Cu': 'stuttgartrsc'})
self.assertEqual(cell.ecp, {'na': 'lanl2dz', 'Cu': 'stuttgartrsc'})
self.assertEqual(cell.pseudo, {'O': 'gthbp'})
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'S 0 0 1',
pseudo = {'O': 'gthbp', 'Cu': 'stuttgartrsc'})
self.assertEqual(cell.ecp, {'Cu': 'stuttgartrsc'})
self.assertEqual(cell.pseudo, {'O': 'gthbp'})
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'S 0 0 1',
ecp = {'S': 'gthbp', 'na': 'lanl2dz'},
pseudo = {'O': 'gthbp', 'Cu': 'stuttgartrsc'})
self.assertEqual(cell.ecp, {'na': 'lanl2dz', 'Cu': 'stuttgartrsc'})
self.assertEqual(cell.pseudo, {'S': 'gthbp', 'O': 'gthbp'})
def test_pseudo_suffix(self):
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'Mg 0 0 1',
pseudo = {'Mg': 'gth-lda'})
self.assertEqual(cell.atom_nelec_core(0), 2)
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'Mg 0 0 1',
pseudo = {'Mg': 'gth-lda q2'})
self.assertEqual(cell.atom_nelec_core(0), 10)
def pbc_intor_symmetry(self):
a = cl1.lattice_vectors()
b = numpy.linalg.inv(a).T * (numpy.pi*2)
kpts = numpy.random.random((4,3))
kpts[1] = b[0]+b[1]+b[2]-kpts[0]
kpts[2] = b[0]-b[1]-b[2]-kpts[0]
kpts[3] = b[0]-b[1]+b[2]+kpts[0]
s = cl1.pbc_intor('int1e_ovlp', kpts=kpts)
self.assertAlmostEqual(abs(s[0]-s[1].conj()).max(), 0, 12)
self.assertAlmostEqual(abs(s[0]-s[2].conj()).max(), 0, 12)
self.assertAlmostEqual(abs(s[0]-s[3] ).max(), 0, 12)
def test_basis_truncation(self):
b = pgto.basis.load('gthtzvp@3s1p', 'C')
self.assertEqual(len(b), 2)
self.assertEqual(len(b[0][1]), 4)
self.assertEqual(len(b[1][1]), 2)
def test_getattr(self):
from pyscf.pbc import scf, dft, cc, tdscf
cell = pgto.M(atom='He', a=np.eye(3)*4, basis={'He': [[0, (1, 1)]]})
self.assertEqual(cell.HF().__class__, scf.HF(cell).__class__)
self.assertEqual(cell.KS().__class__, dft.KS(cell).__class__)
self.assertEqual(cell.UKS().__class__, dft.UKS(cell).__class__)
self.assertEqual(cell.KROHF().__class__, scf.KROHF(cell).__class__)
self.assertEqual(cell.KKS().__class__, dft.KKS(cell).__class__)
self.assertEqual(cell.CCSD().__class__, cc.ccsd.RCCSD)
self.assertEqual(cell.TDA().__class__, tdscf.rhf.TDA)
self.assertEqual(cell.TDBP86().__class__, tdscf.rks.TDDFTNoHybrid)
self.assertEqual(cell.TDB3LYP().__class__, tdscf.rks.TDDFT)
self.assertEqual(cell.KCCSD().__class__, cc.kccsd_rhf.KRCCSD)
self.assertEqual(cell.KTDA().__class__, tdscf.krhf.TDA)
self.assertEqual(cell.KTDBP86().__class__, tdscf.krks.TDDFTNoHybrid)
self.assertRaises(AttributeError, lambda: cell.xyz)
self.assertRaises(AttributeError, lambda: cell.TDxyz)
if __name__ == '__main__':
print("Full Tests for pbc.gto.cell")
unittest.main()
|
gkc1000/pyscf
|
pyscf/pbc/gto/test/test_cell.py
|
Python
|
apache-2.0
| 13,097
|
[
"PySCF"
] |
9ad7678721ee715842235d24151dd3e755b5c44f9f93c13be3b9f7bcc5a34249
|
# (C) British Crown Copyright 2010 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
This system test module is useful to identify if some of the key components required for Iris are available.
The system tests can be run with ``python setup.py test --system-tests``.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before importing anything else
import cf_units
import numpy as np
import iris
import iris.fileformats.netcdf as netcdf
import iris.fileformats.pp as pp
import iris.tests as tests
class SystemInitialTest(tests.IrisTest):
def system_test_supported_filetypes(self):
nx, ny = 60, 60
data = np.arange(nx * ny, dtype='>f4').reshape(nx, ny)
laty = np.linspace(0, 59, ny).astype('f8')
lonx = np.linspace(30, 89, nx).astype('f8')
horiz_cs = lambda : iris.coord_systems.GeogCS(6371229)
cm = iris.cube.Cube(data, 'wind_speed', units='m s-1')
cm.add_dim_coord(
iris.coords.DimCoord(laty, 'latitude', units='degrees',
coord_system=horiz_cs()),
0)
cm.add_dim_coord(
iris.coords.DimCoord(lonx, 'longitude', units='degrees',
coord_system=horiz_cs()),
1)
cm.add_aux_coord(iris.coords.AuxCoord(np.array([9], 'i8'),
'forecast_period', units='hours'))
hours_since_epoch = cf_units.Unit('hours since epoch',
cf_units.CALENDAR_GREGORIAN)
cm.add_aux_coord(iris.coords.AuxCoord(np.array([3], 'i8'),
'time', units=hours_since_epoch))
cm.add_aux_coord(iris.coords.AuxCoord(np.array([99], 'i8'),
long_name='pressure', units='Pa'))
filetypes = ('.nc', '.pp')
if tests.GRIB_AVAILABLE:
filetypes += ('.grib2',)
for filetype in filetypes:
saved_tmpfile = iris.util.create_temp_filename(suffix=filetype)
iris.save(cm, saved_tmpfile)
new_cube = iris.load_cube(saved_tmpfile)
self.assertCML(new_cube,
('system',
'supported_filetype_%s.cml' % filetype))
@tests.skip_grib
def system_test_grib_patch(self):
import gribapi
gm = gribapi.grib_new_from_samples("GRIB2")
result = gribapi.grib_get_double(gm, "missingValue")
new_missing_value = 123456.0
gribapi.grib_set_double(gm, "missingValue", new_missing_value)
new_result = gribapi.grib_get_double(gm, "missingValue")
self.assertEqual(new_result, new_missing_value)
def system_test_imports_general(self):
if tests.MPL_AVAILABLE:
import matplotlib
import netCDF4
if __name__ == '__main__':
tests.main()
|
QuLogic/iris
|
lib/iris/tests/system_test.py
|
Python
|
gpl-3.0
| 3,658
|
[
"NetCDF"
] |
d23ea6591ef5a1c65f489bddba54b8ae7fdd4a99c94d5ca7998c6c4aaba3fb51
|
# Copyright (C) 2016 Henrique Pereira Coutada Miranda
# All rights reserved.
#
# This file is part of yamboparser
#
#
import os
import re
import numpy as np
#we try to use netcdf
try:
from netCDF4 import Dataset
except ImportError:
_has_netcdf = False
else:
_has_netcdf = True
class YamboFile():
"""
This is the Yambo file class.
It takes as input a filename produced by yambo.
Can be a netcdf or a text file
List of supported NETCDF files:
-> ndb.QP
List of supported text files:
-> r-*_em?1_*_gw0
-> o-*.qp
"""
_output_prefixes = ['o-']
_report_prefixes = ['r-','r.']
_log_prefixes = ['l-','l.']
_netcdf_prefixes = ['ns','ndb']
_netcdf_sufixes = {'QP':'gw','HF_and_locXC':'hf'}
def __init__(self,filename,folder='.'):
self.filename = filename
self.folder = folder
self.type = None
self.errors = [] #list of errors
self.warnings = [] #list of warnings
self.memstats = [] #list of memory allocation statistics
self.data = {} #dictionary containing all the important data from the file
self.kpoints = {}
self.timing = []
if any(filename.startswith(prefix) for prefix in self._output_prefixes):
#read lines from file
f = open("%s/%s"%(folder,filename),'r')
self.lines = f.readlines()
f.close()
#get the line with the title
try:
title = self.lines[14]
except:
self.errors.append('error reading title')
return
if 'GW' in title:
self.type = 'output_gw'
elif any(filename.startswith(prefix) for prefix in self._report_prefixes):
self.type = 'report'
elif any(filename.startswith(prefix) for prefix in self._log_prefixes):
self.type = 'log'
elif any(filename.startswith(prefix) for prefix in self._netcdf_prefixes) and _has_netcdf:
for sufix in self._netcdf_sufixes:
if filename.endswith(sufix):
self.type = 'netcdf_%s'%self._netcdf_sufixes[sufix]
break
if self.type is None: self.type = 'unknown'
#parse the file
self.parse()
def parse(self):
""" Parse the file
Add here things to read log and report files...
"""
if self.type == 'netcdf_gw': self.parse_netcdf_gw()
elif self.type == 'netcdf_hf': self.parse_netcdf_hf()
elif self.type == 'output_gw': self.parse_output()
elif self.type == 'log': self.parse_log()
elif self.type == 'report' : self.parse_report()
def parse_output(self):
""" Parse an output file from yambo,
"""
#get the tags of the columns
if self.type == "output_absorption":
tags = [tag.strip() for tag in re.findall('([ `0-9a-zA-Z\-\/]+)\[[0-9]\]',''.join(self.lines))]
if self.type == "output_gw":
tags = [line.replace('(meV)','').replace('Sc(Eo)','Sc|Eo') for line in self.lines if all(tag in line for tag in ['K-point','Band','Eo'])][0]
tags = tags[2:].strip().split()
table = np.genfromtxt(self.lines)
_kdata ={}
k_index =[ str(int(i)) for i in table[:,0]] # first column has kpoints
for ind in range(len(k_index)):
for itag in range(len(tags)):
if k_index[ind] not in _kdata.keys():
_kdata[k_index[ind]] = {}
try:
_kdata[k_index[ind]][tags[itag]].append(table[ind,itag])
except KeyError:
_kdata[k_index[ind]][tags[itag]] = [ table[ind,itag] ]
self.data = _kdata
#self.data = dict(zip(tags,table.T))
def parse_netcdf_gw(self):
""" Parse the netcdf gw file
"""
if _has_netcdf:
data = {}
f = Dataset('%s/%s'%(self.folder,self.filename))
#quasiparticles table
qp_table = f.variables['QP_table'][:]
data['Kpoint_index'] = qp_table[2]
data['Band'] = qp_table[0]
if qp_table.shape[1] == 4: # spin polarized
data['Spin_pol'] = qp_table[:,3]
data['qp_table'] = qp_table[:,1:] # ib, ik, ,(isp if spin polarized)
#qpoints
data['Kpoint'] = f.variables['QP_kpts'][:].T
#quasi-particles
#old format
if 'QP_E_Eo_Z' in f.variables:
qp = f.variables['QP_E_Eo_Z'][:]
qp = qp[0]+qp[1]*1j
data['E'], data['Eo'], data['Z'] = qp.T
data['E-Eo'] = data['E'] - data['Eo']
self.data=data
#new format
else:
E = f.variables['QP_E'][:]
data['E'] = E[:,0] + E[:,1]*1j
Eo = f.variables['QP_Eo'][:]
data['Eo']= Eo
Z = f.variables['QP_Z'][:]
data['Z'] = Z[:,0] + Z[:,1]*1j
data['E-Eo'] = data['E'] - data['Eo']
self.data=data
f.close()
def parse_netcdf_hf(self):
""" Parse the netcdf hf file (ndb.HF_and_locXC)
"""
if _has_netcdf:
data = {}
f = Dataset('%s/%s'%(self.folder,self.filename))
hf = f.variables['Sx_Vxc'][:]
if hf.shape[0]%8 ==0 :
qp = hf.reshape(-1,8)
ib, ibp, ik, isp, rsx, isx, revx, imvx = qp.T
else:
qp = hf.reshape(-1,7)
ib, ibp, ik, rsx, isx, revx, imvx = qp.T
data['Sx'] = rsx + isx*1j
data['Vxc'] = revx + imvx*1j
self.data=data
f.close()
def parse_report(self):
""" Parse the report files.
produces output of this nature:
{ k-index1 : { 'dft_enrgy':[...], 'qp_energy':[...] },
k-index2 :{...}
}
k-index is the kpoint at which the yambo calculation was
done.
"""
if not hasattr(self, 'lines'):
with open('%s/%s'%(self.folder,self.filename)) as fl:
self.lines = fl.readlines()
# start with check for failure due to error:
err = re.compile('^\s+?\[ERROR\]\s+?(.*)$')
kpoints = re.compile('^ [A-X*]+\sK\s\[([0-9]+)\]\s[:](?:\s+)?([0-9.E-]+\s+[0-9.E-]+\s+[0-9.E-]+)\s[A-Za-z()\s*.]+[0-9]+[A-Za-z()\s*.]+([0-9.]+)')
memory = re.compile('^\s+?<([0-9a-z-]+)> ([A-Z0-9]+)[:] \[M ([0-9.]+) Gb\]? ([a-zA-Z0-9\s.()\[\]]+)?')
timing = re.compile('\s+?[A-Za-z]+iming\s+?[A-Za-z/\[\]]+[:]\s+?([a-z0-9-]+)[/]([a-z0-9-]+)[/]([a-z0-9-]+)')
self.memstats.extend([ line for line in self.lines if memory.match(line)])
for line in self.lines:
if err.match(line):
if 'STOP' in err.match(line).groups()[0]:
# stop parsing, this is a failed calc.
self.errors.append(err.match(line).groups()[0])
return
if timing.match(line):
self.timing.append(timing.match(line).groups()[0] )
if kpoints.match(line):
kindx, kpt, wgt = kpoints.match(line).groups()
self.kpoints[str(int(kindx))] = [ float(i.strip()) for i in kpt.split()]
full_lines = '\n'.join(self.lines)
qp_regx = re.compile('(^\s+?QP\s\[eV\]\s@\sK\s\[\d+\][a-z0-9E:()\s.-]+)(.*?)(?=^$)',re.M|re.DOTALL)
kp_regex = re.compile('^\s+?QP\s\[eV\]\s@\sK\s\[(\d+)\][a-z0-9E:()\s.-]+$')
spliter = re.compile('^(B[=]\d+\sEo[=]\s+?[E0-9.-]+\sE[=]\s+?[E0-9.-]+\sE[-]Eo[=]\s+?[E0-9.-]+\sRe[(]Z[)][=]\s+?[E0-9.-]+\sIm[(]Z[)][=]\s?[E0-9.-]+\snlXC[=]\s+?[E0-9.-]+\slXC[=]\s+?[E0-9.-]+\sSo[=]\s+?[E0-9.-]+)')
extract = re.compile('B[=](\d+)\sEo[=](?:\s+)?([E0-9.-]+)\sE[=](?:\s+)?([E0-9.-]+)\sE[-]Eo[=](?:\s+)?([E0-9.-]+)\sRe[(]Z[)][=](?:\s+)?([E0-9.-]+)\sIm[(]Z[)][=](?:\s+)?[E0-9.-]+\snlXC[=](?:\s+)?([E0-9.-]+)\slXC[=](?:\s+)?([E0-9.-]+)\sSo[=](?:\s+)?([E0-9.-]+)')
qp_lines = qp_regx.findall(full_lines)
qp_results ={}
for each in qp_lines: # first group of qp data, shares k-point index
kp_index = None
kp_results={'bindex':[],'dft_energy':[],'qp_energy':[],'qp_correction':[],
'z_factor':[],'non_local_xc':[],'local_xc':[],'selfenergy_c':[]}
for line in each: # different band indexes =B
if kp_regex.match(line):
kp_index = str(int(kp_regex.match(line).groups()[0]))
else: # data line B=x Eo = y ..
data_lines = [ i for i in spliter.split(line) if i.strip()]
for qp_data in data_lines:
bindex, dft_energy, qp_energy, qp_correction, z_factor, \
non_local_xc, local_xc, selfenergy_c = [float (i) for i in extract.match(qp_data).groups()]
kp_results['bindex'].append(bindex)
kp_results['dft_energy'].append(dft_energy)
kp_results['qp_energy'].append(qp_energy)
kp_results['qp_correction'].append(qp_correction)
kp_results['z_factor'].append(z_factor)
kp_results['non_local_xc'].append(non_local_xc)
kp_results['local_xc'].append(local_xc)
kp_results['selfenergy_c'].append(selfenergy_c)
qp_results[kp_index] = kp_results
self.data = qp_results
def get_type(self):
""" Get the type of file
"""
return self.type
def has_errors(self):
#check if the list is empty
return not not self.errors
def get_errors(self):
""" Check if this is a report file and if it contains errors
"""
if self.type == 'report':
return self.errors
return False
def get_data(self):
""" Get the data from this file as a dictionary
"""
pass
def parse_log(self):
""" Get ERRORS and WARNINGS from l-* file, useful for debugging
"""
if not hasattr(self, 'lines'):
with open('%s/%s'%(self.folder,self.filename)) as fl:
self.lines = fl.readlines()
warning = re.compile('^\s+?<([0-9a-z-]+)> ([A-Z0-9]+)[:] \[(WARNING)\]? ([a-zA-Z0-9\s.()\[\]]+)?')
error = re.compile('^\s+?<([0-9a-z-]+)> ([A-Z0-9]+)[:] \[(ERROR)\]? ([a-zA-Z0-9\s.()\[\]]+)?')
self.warnings.extend([ line for line in self.lines if warning.match(line)])
self.errors.extend([ line for line in self.lines if error.match(line)])
def __bool__(self):
if self.type == 'unknown':
return False
else:
return True
__nonzero__=__bool__
def __str__(self):
return "type: %9s file: %s/%s"%(self.type, self.folder, self.filename)
|
henriquemiranda/yambopy
|
yamboparser/yambofile.py
|
Python
|
bsd-3-clause
| 11,075
|
[
"NetCDF",
"Yambo"
] |
8bacba086cb99afd2a45d481223231f5847cd52bbb3f69bd7bf9fe54991b4a48
|
#!/usr/bin/env python
""" Where you at? """
import sys,os
import logging
from collections import OrderedDict as odict
from datetime import datetime,timedelta,tzinfo
import dateutil.parser
import mpl_toolkits.basemap as basemap
from matplotlib.patches import Ellipse, Circle
import matplotlib.patheffects as patheffects
from _tkinter import TclError
import numpy as np
import pylab as plt
import ephem
__author__ = "Alex Drlica-Wagner"
__email__ = "kadrlica@fnal.gov"
__version__ = "2.1.3"
MAXREF=5000 # Maximum number of refreshes
DECAM=1.1 # DECam radius (deg)
# Accurate DECam marker size depends on figsize and DPI
# This is a mess...
FIGSIZE=(10.5,8.5)
SCALE=np.sqrt((8.0*6.0)/(FIGSIZE[0]*FIGSIZE[1]))
DPI=80;
FILTERS = ['u','g','r','i','z','Y','VR']
BANDS = FILTERS + ['all']
COLORS = odict([
('none','black'),
('u','blue'),
('g','green'),
('r','red'),
('i','gold'),
('z','magenta'),
('Y','black'),
('VR','gray'),
])
# Allowed map projections
PROJ = odict([
('ortho' , dict(projection='ortho',celestial=True)),
('moll' , dict(projection='moll',celestial=True)),
('mol' , dict(projection='moll',celestial=True)),
('ait' , dict(projection='hammer',celestial=True)),
('mbt' , dict(projection='mbtfpq',celestial=True)),
('mbtfpq' , dict(projection='mbtfpq',celestial=True)),
('mcbryde', dict(projection='mbtfpq',celestial=True)),
])
# Derived from telra,teldec of 10000 exposures
SN = odict([
('E1',(7.874, -43.010)),
('E2',(9.500, -43.999)),
('X1',(34.476, -4.931)),
('X2',(35.664,-6.413)),
('X3',(36.449, -4.601)),
('S1',(42.818, 0.000)),
('S2',(41.193, -0.991)),
('C1',(54.274, -27.113)),
('C2',(54.274, -29.090)),
('C3',(52.647, -28.101)),
])
SN_LABELS = odict([
('SN-E',(8,-41)),
('SN-X',(35,-12)),
('SN-S',(45,1)),
('SN-C',(55,-35)),
])
# The allowed footprint outlines
FOOTPRINTS = ['none','des','des-sn','smash','maglites','bliss','decals','delve']
# CTIO location taken from:
#http://www.ctio.noao.edu/noao/content/Coordinates-Observatories-Cerro-Tololo-and-Cerro-Pachon
#http://arxiv.org/pdf/1210.1616v3.pdf
#(-30h 10m 10.73s, -70h 48m 23.52s, 2213m)
TEL_LON = -70.80653
TEL_LAT = -30.169647
TEL_HEIGHT = 2213
# Create the observatory object
CTIO = ephem.Observer()
CTIO.lon,CTIO.lat = str(TEL_LON),str(TEL_LAT)
CTIO.elevation = TEL_HEIGHT
def get_datadir():
""" Path to data directory. """
return os.path.join(os.path.dirname(os.path.realpath(__file__)),'data')
def setdefaults(kwargs,defaults):
""" set dictionary with defaults. """
for k,v in defaults.items():
kwargs.setdefault(k,v)
return kwargs
def gal2cel(glon, glat):
"""
Converts Galactic (deg) to Celestial J2000 (deg) coordinates
"""
glat = np.radians(glat)
sin_glat = np.sin(glat)
cos_glat = np.cos(glat)
glon = np.radians(glon)
ra_gp = np.radians(192.85948)
de_gp = np.radians(27.12825)
lcp = np.radians(122.932)
sin_lcp_glon = np.sin(lcp - glon)
cos_lcp_glon = np.cos(lcp - glon)
sin_d = (np.sin(de_gp) * sin_glat) \
+ (np.cos(de_gp) * cos_glat * cos_lcp_glon)
ramragp = np.arctan2(cos_glat * sin_lcp_glon,
(np.cos(de_gp) * sin_glat) \
- (np.sin(de_gp) * cos_glat * cos_lcp_glon))
dec = np.arcsin(sin_d)
ra = (ramragp + ra_gp + (2. * np.pi)) % (2. * np.pi)
return np.degrees(ra), np.degrees(dec)
def cel2gal(ra, dec):
"""
Converts Celestial J2000 (deg) to Calactic (deg) coordinates
"""
dec = np.radians(dec)
sin_dec = np.sin(dec)
cos_dec = np.cos(dec)
ra = np.radians(ra)
ra_gp = np.radians(192.85948)
de_gp = np.radians(27.12825)
sin_ra_gp = np.sin(ra - ra_gp)
cos_ra_gp = np.cos(ra - ra_gp)
lcp = np.radians(122.932)
sin_b = (np.sin(de_gp) * sin_dec) \
+ (np.cos(de_gp) * cos_dec * cos_ra_gp)
lcpml = np.arctan2(cos_dec * sin_ra_gp,
(np.cos(de_gp) * sin_dec) \
- (np.sin(de_gp) * cos_dec * cos_ra_gp))
glat = np.arcsin(sin_b)
glon = (lcp - lcpml + (2. * np.pi)) % (2. * np.pi)
return np.degrees(glon), np.degrees(glat)
# Stupid timezone definition
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def safe_proj(bmap,lon,lat,inverse=False):
""" Remove points outside of projection
Parameters:
-----------
bmap : basemap
lon : longitude
lat : latitude
inverse : inverse projection
Returns:
--------
x,y : projected coordinates
"""
x,y = bmap(np.atleast_1d(lon),np.atleast_1d(lat),inverse=inverse)
x[np.abs(x) > 1e29] = None
y[np.abs(y) > 1e29] = None
return x,y
def get_boundary(bmap,projection,fact=0.99):
# Check that point inside boundary
# Doesn't work for 'ait' and 'moll' projections
if projection in basemap._pseudocyl:
# This was estimated by eye...
rminor=9.00995e6; rmajor = 2*rminor
boundary = Ellipse((rmajor,rminor),
2*(fact*rmajor),2*(fact*rminor))
else:
boundary = Ellipse((bmap.rmajor,bmap.rminor),
2*(fact*bmap.rmajor),2*(fact*bmap.rminor))
return boundary
def airmass_angle(x=1.4):
""" Zenith angle for a given airmass limit """
return 90.-np.degrees(np.arcsin(1./x))
def load_data(opts):
""" Load the data (either from DB of file).
Parameters:
-----------
opts : command line options
Returns:
--------
data : numpy recarray
"""
since = parse_since(opts.since)
propid = '%' if opts.propid is None else opts.propid
dtype=[('expnum',int),('telra',float),('teldec',float),('filter',object)]
if opts.infile is None:
selection = ['id','telra','teldec','filter']
#filter = "exposed = TRUE AND flavor LIKE '%s' AND date > '%s' AND propid LIKE '%s' ORDER BY id DESC"%(opts.flavor,since.isoformat(),propid)
filter = "exposed = TRUE AND flavor SIMILAR TO '%s' AND date > '%s' AND propid LIKE '%s' ORDER BY id DESC"%(opts.flavor,since.isoformat(),propid)
# Use the FNAL mirror to avoid overloading CTIO
try: from database import Database
except ImportError: from pointing.database import Database
db = Database(dbname='db-'+opts.db)
db.connect()
query = "SELECT %s FROM exposure WHERE %s"%(','.join(selection),filter)
#query = "SELECT id as expnum,telra as ra,teldec as dec,filter as band FROM exposure WHERE exposed = TRUE AND flavor LIKE 'object' and telra between 80 and 82 AND teldec between -71 and -69"
data = db.execute(query)
if len(data): ret = np.rec.array(data,dtype=dtype)
else: ret = np.rec.recarray(0,dtype=dtype)
return ret
else:
return np.loadtxt(opts.infile,dtype=dtype)
def mjd(datetime):
""" Modified Julian Date (MJD) """
mjd_epoch = dateutil.parser.parse('1858-11-17T00:00:00Z')
mjd_date = (datetime-mjd_epoch).total_seconds()/float(24*60*60)
return mjd_date
def lmst(observatory):
""" Calculate Local Mean Sidereal Time (LMST) """
lmst = np.degrees(observatory.sidereal_time())
logging.debug('Using pyephem for LMST: %.3f'%lmst)
return lmst
def moon(datetime):
""" Moon location
Parameters:
-----------
datetime : the datetime of moon location request
Returns:
--------
(ra, dec), phase : moon parameters [(deg, deg), %]
"""
moon = ephem.Moon()
moon.compute(CTIO)
moon_phase = moon.moon_phase * 100
moon_ra,moon_dec = np.degrees([moon.ra,moon.dec])
return (moon_ra, moon_dec),moon_phase
def boolean(string):
""" Convert strings to booleans for argparse """
string = string.lower()
if string in ['0', 'f', 'false', 'no', 'off']:
return False
elif string in ['1', 't', 'true', 'yes', 'on']:
return True
else:
raise ValueError()
def splash_screen():
""" Splash text to print """
splash = """Running Alex Drlica-Wagner's DECam pointing script..."""
logging.info(splash)
def parse_utc(value):
""" Parse isoformat 'utc' option string. """
if value is None:
utc = datetime.now(tz=UTC())
elif isinstance(value,datetime):
utc = value
else:
utc = dateutil.parser.parse(value,tzinfos={'UTC':UTC})
logging.debug("UTC: %s"%utc.strftime('%Y-%m-%d %H:%M:%S'))
return utc
def parse_since(value):
""" Parse isoformat 'since' option string. """
if value is None:
since = datetime.now(tz=UTC()) - timedelta(hours=12)
elif isinstance(value,datetime):
since = value
elif value.lower() in ['all','none','forever']:
since = dateutil.parser.parse('2012-01-01 12:00',tzinfos={'UTC':UTC})
else:
since = dateutil.parser.parse(value,tzinfos={'UTC':UTC})
logging.debug("Since: %s"%since.strftime('%Y-%m-%d %H:%M:%S'))
return since
def draw_constellation(bmap,name):
""" Draw a map of the constellations (work in progress). """
from constellations import CONSTELLATIONS
points = np.array(CONSTELLATIONS[name])
drawtype = points[:,0]
radeg = points[:,1] * 1.0 / 1800 * 15
decdeg = points[:,2] * 1.0 / 60
print(radeg,decdeg)
verts = zip(safe_proj(bmap,radeg,decdeg))
codes = [XEPHEM2PATH[c] for c in points[:,0]]
print(x,y)
def draw_milky_way(bmap,width=10,**kwargs):
""" Draw the Milky Way galaxy. """
defaults = dict(color='k',lw=1.5,ls='-')
setdefaults(kwargs,defaults)
logging.debug("Plotting the Milky Way")
glon = np.linspace(0,360,500)
glat = np.zeros_like(glon)
ra,dec = gal2cel(glon,glat)
ra -= 360*(ra > 180)
proj = safe_proj(bmap,ra,dec)
bmap.plot(*proj,**kwargs)
if width:
kwargs.update(dict(ls='--',lw=1))
for delta in [+width,-width]:
ra,dec = gal2cel(glon,glat+delta)
proj = safe_proj(bmap,ra,dec)
bmap.plot(*proj,**kwargs)
def draw_des(bmap,**kwargs):
"""
Plot the DES wide-field footprint.
Parameters:
-----------
bmap : The basemap object
kwargs : Various plotting arguments
Returns:
--------
None
"""
# Plot the wide-field survey footprint
logging.debug("Plotting footprint: %s"%opts.footprint)
#basedir = os.path.dirname(os.path.abspath(__file__))
infile = os.path.join(get_datadir(),'des-round19-poly.txt')
perim = np.loadtxt(infile,dtype=[('ra',float),('dec',float)])
proj = safe_proj(bmap,perim['ra'],perim['dec'])
bmap.plot(*proj,**kwargs)
def draw_des_sn(bmap,**kwargs):
"""
Plot the DES supernova fields.
Parameters:
-----------
bmap : The basemap object
kwargs : Various plotting arguments
Returns:
--------
None
"""
# Plot the SN fields
logging.debug("Plotting DES supernova fields.")
boundary = get_boundary(bmap,kwargs.pop('projection',None),fact=0.99)
for v in SN.values():
if not boundary.contains_point(bmap(*v)):
continue
# This does the projection correctly, but fails at boundary
bmap.tissot(v[0],v[1],DECAM,100,**kwargs)
# The SN labels
sntxt_kwargs = dict(zorder=kwargs['zorder'],fontsize=12,
bbox=dict(boxstyle='round,pad=0',fc='w',ec='none',
alpha=0.25))
for k,v in SN_LABELS.items():
plt.gca().annotate(k,bmap(*v),**sntxt_kwargs)
def draw_smash(bmap,**kwargs):
""" Draw the SMASH fields
Parameters:
-----------
bmap : The basemap object
kwargs : Various plotting arguments
Returns:
--------
None
"""
filename = os.path.join(get_datadir(),'smash_fields_final.txt')
smash=np.genfromtxt(filename,dtype=[('ra',float),('dec',float)],usecols=[4,5])
smash_x,smash_y = safe_proj(bmap,smash['ra'],smash['dec'])
kwargs.update(dict(facecolor='none'))
bmap.scatter(smash_x,smash_y,color='k',**kwargs)
def draw_maglites(bmap,**kwargs):
"""
Plot the MagLiteS Phase-I footprint.
Parameters:
-----------
bmap : The basemap object
kwargs : Various plotting arguments
Returns:
--------
None
"""
# Plot the wide-field survey footprint
logging.debug("Plotting MagLiteS footprint")
infile = os.path.join(get_datadir(),'maglites-poly.txt')
perim = np.loadtxt(infile,dtype=[('ra',float),('dec',float)])
proj = safe_proj(bmap,perim['ra'],perim['dec'])
bmap.plot(*proj,**kwargs)
def draw_maglites2(bmap,**kwargs):
"""
Plot the MagLiteS Phase-II footprint.
Parameters:
-----------
bmap : The basemap object
kwargs : Various plotting arguments
Returns:
--------
None
"""
# Plot the wide-field survey footprint
logging.debug("Plotting footprint: %s"%opts.footprint)
infile = os.path.join(get_datadir(),'maglitesII-poly.txt')
perim = np.loadtxt(infile,dtype=[('ra',float),('dec',float),('poly',int)])
for p in np.unique(perim['poly']):
sel = (perim['poly'] == p)
proj = safe_proj(bmap,perim[sel]['ra'],perim[sel]['dec'])
bmap.plot(*proj,**kwargs)
def draw_bliss(bmap,**kwargs):
"""
Plot the BLISS wide-field footprint.
Parameters:
-----------
bmap : The basemap object
kwargs : Various plotting arguments
Returns:
--------
None
"""
# Plot the wide-field survey footprint
logging.debug("Plotting footprint: %s"%opts.footprint)
infile = os.path.join(get_datadir(),'bliss-poly.txt')
perim = np.loadtxt(infile,dtype=[('ra',float),('dec',float),('poly',int)])
for p in np.unique(perim['poly']):
sel = (perim['poly'] == p)
proj = safe_proj(bmap,perim[sel]['ra'],perim[sel]['dec'])
bmap.plot(*proj,**kwargs)
def draw_decals(bmap,**kwargs):
"""
Plot the DECaLS wide-field footprint.
Parameters:
-----------
bmap : The basemap object
kwargs : Various plotting arguments
Returns:
--------
None
"""
# Plot the wide-field survey footprint
logging.debug("Plotting footprint: %s"%opts.footprint)
infile = os.path.join(get_datadir(),'decals-poly.txt')
perim = np.loadtxt(infile,dtype=[('ra',float),('dec',float),('poly',int)])
for p in np.unique(perim['poly']):
sel = (perim['poly'] == p)
proj = safe_proj(bmap,perim[sel]['ra'],perim[sel]['dec'])
bmap.plot(*proj,**kwargs)
def draw_delve(bmap,**kwargs):
""" Draw DELVE footprint """
defaults=dict(color='red', lw=2)
setdefaults(kwargs,defaults)
logging.debug("Plotting footprint: %s"%opts.footprint)
deep = odict([
('SextansB', (150.00, 5.33, 3.0)),
('IC5152', (330.67, -51.30, 3.0)),
('NGC300', ( 13.72, -37.68, 3.0)),
('NGC55', ( 3.79, -39.22, 3.0)),
])
boundary = get_boundary(bmap,kwargs.pop('projection',None),fact=0.98)
for ra,dec,radius in deep.values():
if not boundary.contains_point(bmap(ra,dec)): continue
# This does the projection correctly, but fails at boundary
bmap.tissot(ra,dec,radius,100,fc='none',edgecolor=kwargs['color'],lw=kwargs['lw'])
#for ra,dec,radius in deep.values():
# # This doesn't deal with boundaries well
# #self.tissot(ra, dec, radius, 100, fc='none',**kwargs)
# x,y = safe_proj(bmap,np.array([ra]), np.array([dec]))
# bmap.scatter(x,y,facecolor='none',edgecolor=kwargs['color'],lw=2,s=400)
filename = os.path.join(get_datadir(),'delve-poly.txt')
perim = np.loadtxt(filename,dtype=[('ra',float),('dec',float),('poly',int)])
for p in np.unique(perim['poly']):
sel = (perim['poly'] == p)
proj = safe_proj(bmap,perim[sel]['ra'],perim[sel]['dec'])
bmap.plot(*proj,**kwargs)
def plot(opts):
"""
Core plotting function. Creates the basemap, overplots all of the
requested features, and returns the map object.
Parameters:
-----------
opts : command line options
Returns:
--------
m : the basemap object
"""
utc = parse_utc(opts.utc)
CTIO.date = utc
since = parse_since(opts.since)
# Grab the data
data = load_data(opts)
# Subselect the data
sel = np.in1d(data['filter'],FILTERS)
if opts.band in FILTERS:
sel &= (data['filter'] == opts.band)
data = data[sel]
expnum,telra,teldec,band = data['expnum'],data['telra'],data['teldec'],data['filter']
# Set the colors
if opts.color:
nexp = len(expnum)
ncolors = len(COLORS)
color_repeat = np.repeat(COLORS.keys(),nexp).reshape(ncolors,nexp)
color_idx = np.argmax(band==color_repeat,axis=0)
color = np.array(COLORS.values())[color_idx]
else:
color = COLORS['none']
# Select the exposure of interest
if opts.expnum:
match = np.char.array(expnum).endswith(str(opts.expnum))
if not match.any():
msg = "Exposure matching %s not found"%opts.expnum
raise ValueError(msg)
idx = np.nonzero(match)[0][0]
elif len(data)==0:
idx = slice(None)
else:
idx = 0
# Create the figure
if plt.get_fignums():
fig,ax = plt.gcf(),plt.gca()
else:
fig,ax = plt.subplots(figsize=FIGSIZE,dpi=DPI)
fig.canvas.set_window_title("DECam Pointings")
#fig,ax = plt.subplots()
# Zenith position
lon_zen=lmst(CTIO); lat_zen = TEL_LAT
# Create the Basemap
proj_kwargs = PROJ[opts.proj]
# Centering position
if proj_kwargs['projection'] in basemap._pseudocyl:
### This should work, but doesn't.
### Compare lon_0=-80.58345277606 to lon_0=-80.6 or lon_0=-80.5
#lon_0=lon_zen-360*(lon_zen>180),lat_zen=0
lon_0,lat_0 = 0,0
else:
lon_0,lat_0 = -lon_zen, lat_zen # Center position
proj_kwargs.update(lon_0=lon_0,lat_0=lat_0)
bmap = basemap.Basemap(**proj_kwargs)
def format_coord(x,y):
#Format matplotlib cursor to display RA, Dec
lon,lat = safe_proj(bmap,x,y,inverse=True)
lon += 360*(lon < 0)
return 'ra=%1.3f, dec=%1.3f'%(lon,lat)
plt.gca().format_coord = format_coord
parallels = np.arange(-90.,120.,30.)
bmap.drawparallels(parallels)
meridians = np.arange(0.,420.,60.)
bmap.drawmeridians(meridians)
for mer in meridians[:-1]:
plt.annotate(r'$%i^{\circ}$'%mer,bmap(mer,5),ha='center')
plt.annotate('West',xy=(1.0,0.5),ha='left',xycoords='axes fraction')
plt.annotate('East',xy=(0.0,0.5),ha='right',xycoords='axes fraction')
# markersize defined at minimum distortion point
if proj_kwargs['projection'] in basemap._pseudocyl:
x1,y1=ax.transData.transform(bmap(lon_0,lat_0+DECAM))
x2,y2=ax.transData.transform(bmap(lon_0,lat_0-DECAM))
else:
x1,y1=ax.transData.transform(bmap(lon_zen,lat_zen+DECAM))
x2,y2=ax.transData.transform(bmap(lon_zen,lat_zen-DECAM))
# Since markersize defined in "points" in scales with figsize/dpi
size = SCALE * (y1-y2)**2
# Scale the marker size to the size of an exposure
exp_zorder = 10
exp_kwargs = dict(s=size,marker='H',zorder=exp_zorder,edgecolor='k',lw=1)
# Projected exposure locations
x,y = safe_proj(bmap,telra,teldec)
# Plot exposure of interest
if len(data):
logging.debug("Plotting exposure: %i (%3.2f,%3.2f)"%(expnum[idx],telra[idx],teldec[idx]))
# Hacked path effect (fix if matplotlib is updated)
bmap.scatter(x[idx],y[idx],color='w',**dict(exp_kwargs,edgecolor='w',s=70,lw=2))
bmap.scatter(x[idx],y[idx],color=color,**dict(exp_kwargs,alpha=1.0,linewidth=2))
# Once matplotlib is updated
#x = bmap.scatter(x[idx],y[idx],color=color,**exp_kwargs)
#ef = patheffects.withStroke(foreground="w", linewidth=3)
#x.set_path_effects([ef])
# Plot previous exposures
nexp_kwargs = dict(exp_kwargs)
nexp_kwargs.update(zorder=exp_zorder-1,alpha=0.2,edgecolor='none')#,lw=0)
exp_slice = slice(None,opts.numexp)
numexp = len(x[exp_slice])
logging.debug("Plotting last %s exposures"%(numexp))
bmap.scatter(x[exp_slice],y[exp_slice],color=color[exp_slice],**nexp_kwargs)
# Plot zenith position & focal plane scale
zen_x,zen_y = bmap(lon_zen,lat_zen)
#zen_kwargs = dict(color='green',alpha=0.75,lw=1,zorder=0)
zen_kwargs = dict(color='green',alpha=0.75,lw=1,zorder=1000)
if opts.zenith:
logging.debug("Plotting zenith: (%.2f,%.2f)"%(lon_zen,lat_zen))
bmap.plot(zen_x,zen_y,'+',ms=10,**zen_kwargs)
logging.debug("Plotting focal plane scale.")
bmap.tissot(lon_zen, lat_zen, DECAM, 100, fc='none', **zen_kwargs)
# To test exposure size
#bmap.tissot(lon_zen, lat_zen, DECAM, 100, fc='none', **zen_kwargs)
#bmap.scatter(*bmap(lon_zen,lat_zen),**nexp_kwargs)
#bmap.tissot(0, 0, DECAM, 100, fc='none', **zen_kwargs)
#bmap.scatter(*bmap(0,0),**nexp_kwargs)
# Plot airmass circle
if opts.airmass < 1:
logging.warning("Airmass must be greater than one.")
opts.airmass = np.nan
else:
logging.debug("Plotting airmass: %s"%opts.airmass)
angle = airmass_angle(opts.airmass)
bmap.tissot(lon_zen, lat_zen, angle, 100, fc='none',**zen_kwargs)
# Moon location and phase
(moon_ra,moon_dec),moon_phase = moon(utc)
if opts.moon:
logging.debug("Plotting moon: %i%%,(%.1f,%.1f)"%(moon_phase,moon_ra,moon_dec))
moon_txt = '%i%%'%moon_phase
#bbox = dict(boxstyle='circle,pad=0.4',fc='k',ec='k',alpha=0.25,lw=2)
moon_kwargs = dict(zorder=exp_zorder-1,fontsize=11,va='center',ha='center',weight='bold')
ax.annotate(moon_txt,bmap(moon_ra,moon_dec),**moon_kwargs)
# Again old matplotlib making things difficult
moon_kwargs2 = dict(facecolor='k',alpha=0.25,lw=2,s=2000)
ax.scatter(*bmap(moon_ra,moon_dec),**moon_kwargs2)
if opts.mw:
mw_kwargs = dict(color='k')
draw_milky_way(bmap,**mw_kwargs)
# Plot footprint(s)
fp_zorder=exp_zorder-1
fp_kwargs=dict(marker='o',mew=0,mfc='none',color='k',lw=2,zorder=fp_zorder)
if 'none' in opts.footprint:
opts.footprint = ['none']
if 'des' in opts.footprint:
des_kwargs = dict(fp_kwargs,color='b')
draw_des(bmap,**des_kwargs)
if 'des' in opts.footprint or 'des-sn' in opts.footprint:
sn_kwargs = dict(facecolor='none',edgecolor='b',projection=proj_kwargs['projection'],zorder=fp_zorder)
draw_des_sn(bmap,**sn_kwargs)
if 'smash' in opts.footprint:
smash_kwargs = dict(facecolor='none',**exp_kwargs)
smash_kwargs.update(zorder=exp_zorder+1)
draw_smash(bmap,**smash_kwargs)
if 'maglites' in opts.footprint:
maglites_kwargs = dict(fp_kwargs,color='r')
draw_maglites(bmap,**maglites_kwargs)
draw_maglites2(bmap,**maglites_kwargs)
if 'bliss' in opts.footprint:
bliss_kwargs = dict(fp_kwargs,color='r')
draw_bliss(bmap,**bliss_kwargs)
if 'decals' in opts.footprint:
decals_kwargs = dict(fp_kwargs,color='m')
draw_decals(bmap,**decals_kwargs)
if 'delve' in opts.footprint:
delve_kwargs = dict(fp_kwargs,color='r')
draw_delve(bmap,**delve_kwargs)
# Annotate with some information
if opts.legend:
logging.debug("Adding info text.")
bbox_props = dict(boxstyle='round', facecolor='white')
textstr= "%s %s\n"%("UTC:",utc.strftime('%Y-%m-%d %H:%M:%S'))
if len(data):
textstr+="%s %i (%s)\n"%("Exposure:",expnum[idx],band[idx])
textstr+="%s %i\n"%("Num. Exp.:",numexp)
textstr+="%s (%.1f$^{\circ}$, %.1f$^{\circ}$)\n"%("Zenith:",lon_zen,lat_zen)
textstr+="%s %s\n"%("Airmass:",np.nan_to_num(opts.airmass))
textstr+="%s %i%% (%.1f$^{\circ}$, %.1f$^{\circ}$)\n"%("Moon:",moon_phase,moon_ra,moon_dec)
textstr+="%s %s"%("Footprint:",', '.join(opts.footprint))
ax.annotate(textstr, xy=(0.90,1.05), xycoords='axes fraction',
fontsize=10,ha='left',va='top', bbox=bbox_props)
# Plot filter legend
if opts.color:
logging.debug("Adding filter legend.")
leg_kwargs = dict(scatterpoints=1,fontsize=10,bbox_to_anchor=(0.08,0.20))
handles, labels = [],[]
for k in FILTERS:
if k == 'VR' and not (band=='VR').any(): continue
labels.append(k)
handles.append(plt.scatter(None,None,color=COLORS[k],**exp_kwargs))
plt.legend(handles,labels,**leg_kwargs)
# Plot the version number
vers_kwargs = dict(xy=(0.985,0.015),ha='right',va='bottom',
xycoords='figure fraction',size=8)
plt.annotate('pointing v.%s'%__version__,**vers_kwargs)
# Plot the author's name
auth_kwargs = dict(xy=(0.015,0.015),ha='left',va='bottom',
xycoords='figure fraction',size=8)
plt.annotate(u'\u00a9'+' %s'%__author__,**auth_kwargs)
return bmap
if __name__ == "__main__":
import argparse
description = __doc__
parser = argparse.ArgumentParser(description=description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('expnum',nargs='?',type=int,default=None,
help="exposure number to plot")
parser.add_argument('-a','--airmass',default=1.4,type=float,
help='draw airmass limit')
parser.add_argument('-b','--band',default='all',choices=BANDS,
help='draw exposures in specific band')
parser.add_argument('-c','--color',default=True,type=boolean,
help='color corresponding to filter')
parser.add_argument('--db',default='fnal',choices=['ctio','fnal'],
help='database to query for exposures')
parser.add_argument('-f','--footprint',action='append',choices=FOOTPRINTS,
help='footprint to draw')
parser.add_argument('--flavor',default='object|standard',type=str,
help='exposure type [object,zero,dome flat,etc.]')
parser.add_argument('-i','--infile',default=None,
help='list of exposures to draw')
parser.add_argument('--legend',default=True,type=boolean,
help='draw figure legend')
parser.add_argument('-m','--moon',default=True,type=boolean,
help='draw moon location and phase')
parser.add_argument('--mw',action='store_true',
help='draw the Milky Way plane')
parser.add_argument('-n','--numexp',default=None,type=int,
help='number of most recent exposures to plot')
parser.add_argument('-o','--outfile',default=None,
help='output file for saving figure')
parser.add_argument('--propid',default=None,
help='draw exposures from specific propid')
parser.add_argument('--proj',default='ortho',choices=PROJ.keys(),
help='projection for plot')
parser.add_argument('--refresh',nargs='?',default=None,const=60,type=int,
help="refresh interval for figure (seconds).")
parser.add_argument('--since',default=None,
help="UTC for first exposure (defaults to 12 hours)")
parser.add_argument('--utc',default=None,
help="UTC for zenith position (defaults to 'now')")
parser.add_argument('-v','--verbose',action='store_true',
help='output verbosity')
parser.add_argument('--version',action='version',
version='%(prog)s '+__version__)
parser.add_argument('-z','--zenith',default=True,type=boolean,
help="draw zenith position")
opts = parser.parse_args()
# Set logging level
logging.basicConfig(level=logging.DEBUG if opts.verbose else logging.INFO,
format='%(message)s',stream=sys.stdout)
if not opts.footprint: opts.footprint = ['des']
# Do the plotting
m = plot(opts)
# In interactive session
if sys.flags.interactive: plt.ion()
if opts.outfile:
# Save the figure
logging.debug("Saving figure to: %s"%opts.outfile)
plt.savefig(opts.outfile,dpi=250)
elif not opts.refresh:
# Show plot
plt.show()
else:
# Refresh the plot
plt.show(block=False)
for i in range(MAXREF): # safer than while loop
try:
plt.pause(opts.refresh)
except TclError:
# Catch the TclError thrown when window closed
break
logging.debug("Refreshing plot...")
plt.cla()
m = plot(opts)
if i == MAXREF:
logging.info("Reached max refresh number.")
|
kadrlica/pointing
|
pointing/pointing.py
|
Python
|
mit
| 29,180
|
[
"Galaxy"
] |
57175f7c96750874b1188048bffa941a31462810d6a0a1bdb387015299a3e32d
|
"""
Assume the production of Individual instances.
"""
import random
from functools import partial
import neural_world.default as default
from neural_world.commons import NeuronType
from neural_world.individual import Individual
from neural_world.commons import Configurable
from neural_world.neural_network import NeuralNetwork
class Incubator(Configurable):
"""Spawn Individual instances.
A simple way to create a new Incubator behavior is to subclass Incubator,
and redefine the following methods: memory_size,
nb_inter_neuron, nb_edges, random_neuron_id.
"""
def __init__(self, config):
super().__init__(config=config, config_fields=[
'memory_min_size', 'memory_max_size',
'neuron_inter_mincount', 'neuron_inter_maxcount',
'neuron_edges_mincount', 'neuron_edges_maxcount',
'mutator',
])
self.neuron_types = NeuronType.xano()
def memory_size(self):
"""Return the memory size"""
return random.randint(self.memory_min_size, self.memory_max_size)
def nb_inter_neuron(self):
"""Return number of intermediate neuron"""
return random.randint(self.neuron_inter_mincount, self.neuron_inter_maxcount)
def nb_edges(self):
"""Return number of edges"""
return random.randint(self.neuron_edges_mincount, self.neuron_edges_maxcount)
def random_neuron_type(self):
"""Return a random neuron type"""
return random.choice(self.neuron_types)
def spawn(self):
"""Spawn Individual instances"""
# Construction of the neural network
neural_network = NeuralNetwork(
nb_inter_neuron=self.nb_inter_neuron(),
memory_size=self.memory_size(),
)
nb_neuron_type = neural_network.nb_neuron_type
random.nid = lambda: (random.randint(NeuralNetwork.MINIMAL_NEURON_ID,
neural_network.nb_neuron))
random.eid = lambda: (random.nid(), random.nid())
neural_network.build(
edges=(random.eid() for _ in range(self.nb_edges())),
neuron_types=(self.random_neuron_type() for _ in range(nb_neuron_type))
)
# Spawn of the individual itself
return Individual(
neural_network=neural_network,
energy=10,
)
def clone(self, indiv, energy=None):
"""Clone given individual, and apply self mutator on clone, which will
get the given energy.
If energy is None, half of the energy keeped by self will be given
to the new clone.
"""
return indiv.clone(self.mutator, energy)
|
Aluriak/neural_world
|
neural_world/incubator.py
|
Python
|
gpl-2.0
| 2,689
|
[
"NEURON"
] |
ced6217cf7f11096bc4aeb602e421038cfb578be7e7f340517826a3ed745a02b
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
The initial version of this module was based on a similar implementation
present in FireWorks (https://pypi.python.org/pypi/FireWorks).
Work done by D. Waroquiers, A. Jain, and M. Kocher.
The main difference wrt the Fireworks implementation is that the QueueAdapter
objects provide a programmatic interface for setting important attributes
such as the number of MPI nodes, the number of OMP threads and the memory requirements.
This programmatic interface is used by the `TaskManager` for optimizing the parameters
of the run before submitting the job (Abinit provides the autoparal option that
allows one to get a list of parallel configuration and their expected efficiency).
"""
from __future__ import print_function, division, unicode_literals
import sys
import os
import abc
import string
import copy
import getpass
import six
import json
import math
from . import qutils as qu
from collections import namedtuple
from subprocess import Popen, PIPE
from pymatgen.util.io_utils import AtomicFile
from monty.string import is_string, list_strings
from monty.collections import AttrDict
from monty.functools import lazy_property
from monty.inspect import all_subclasses
from monty.io import FileLock
from monty.json import MSONable
from pymatgen.core.units import Memory
from .utils import Condition
from .launcher import ScriptEditor
from .qjobs import QueueJob
import logging
logger = logging.getLogger(__name__)
__all__ = [
"make_qadapter",
]
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
class SubmitResults(namedtuple("SubmitResult", "qid, out, err, process")):
"""
named tuple createc by the concrete implementation of _submit_to_que to pass the results of the process of
submitting the jobfile to the que.
qid: queue id of the submission
out: stdout of the submission
err: stdrr of the submisison
process: process object of the submission
"""
class MpiRunner(object):
"""
This object provides an abstraction for the mpirunner provided
by the different MPI libraries. It's main task is handling the
different syntax and options supported by the different mpirunners.
"""
def __init__(self, name, type=None, options=""):
self.name = name
self.type = None
self.options = options
def string_to_run(self, qad, executable, stdin=None, stdout=None, stderr=None, exec_args=None):
stdin = "< " + stdin if stdin is not None else ""
stdout = "> " + stdout if stdout is not None else ""
stderr = "2> " + stderr if stderr is not None else ""
if exec_args:
executable = executable + " " + " ".join(list_strings(exec_args))
basename = os.path.basename(self.name)
if basename in ["mpirun", "mpiexec", "srun"]:
if self.type is None:
#$MPIRUN -n $MPI_PROCS $EXECUTABLE < $STDIN > $STDOUT 2> $STDERR
num_opt = "-n " + str(qad.mpi_procs)
cmd = " ".join([self.name, num_opt, executable, stdin, stdout, stderr])
else:
raise NotImplementedError("type %s is not supported!" % self.type)
elif basename == "runjob":
#runjob --ranks-per-node 2 --exp-env OMP_NUM_THREADS --exe $ABINIT < $STDIN > $STDOUT 2> $STDERR
#runjob -n 2 --exp-env=OMP_NUM_THREADS --exe $ABINIT < $STDIN > $STDOUT 2> $STDERR
# exe must be absolute path or relative to cwd.
bg_size, rpn = qad.bgsize_rankspernode()
#num_opt = "-n " + str(qad.mpi_procs)
num_opt = "--ranks-per-node " + str(rpn)
cmd = " ".join([self.name, num_opt, "--exp-env OMP_NUM_THREADS",
"--exe `which " + executable + "` ", stdin, stdout, stderr])
else:
if qad.mpi_procs != 1:
raise ValueError("Cannot use mpi_procs > when mpi_runner basename=%" % basename)
cmd = " ".join([executable, stdin, stdout, stderr])
return cmd
#@property
#def has_mpirun(self):
# """True if we are running via mpirun, mpiexec ..."""
# return self.name is not None
class OmpEnv(AttrDict):
"""
Dictionary with the OpenMP environment variables
see https://computing.llnl.gov/tutorials/openMP/#EnvironmentVariables
"""
_KEYS = [
"OMP_SCHEDULE",
"OMP_NUM_THREADS",
"OMP_DYNAMIC",
"OMP_PROC_BIND",
"OMP_NESTED",
"OMP_STACKSIZE",
"OMP_WAIT_POLICY",
"OMP_MAX_ACTIVE_LEVELS",
"OMP_THREAD_LIMIT",
"OMP_STACKSIZE",
"OMP_PROC_BIND",
]
@classmethod
def as_ompenv(cls, obj):
"""Convert an object into a OmpEnv"""
if isinstance(obj, cls): return obj
if obj is None: return cls()
return cls(**obj)
def __init__(self, *args, **kwargs):
"""
Constructor method inherited from dictionary:
>>> assert OmpEnv(OMP_NUM_THREADS=1).OMP_NUM_THREADS == 1
To create an instance from an INI file, use:
OmpEnv.from_file(filename)
"""
super(OmpEnv, self).__init__(*args, **kwargs)
err_msg = ""
for key, value in self.items():
self[key] = str(value)
if key not in self._KEYS:
err_msg += "unknown option %s\n" % key
if err_msg:
raise ValueError(err_msg)
def export_str(self):
"""Return a string with the bash statements needed to setup the OMP env."""
return "\n".join("export %s=%s" % (k, v) for k, v in self.items())
class Hardware(object):
"""
This object collects information on the hardware available in a given queue.
Basic definitions:
- A node refers to the physical box, i.e. cpu sockets with north/south switches connecting memory systems
and extension cards, e.g. disks, nics, and accelerators
- A cpu socket is the connector to these systems and the cpu cores
- A cpu core is an independent computing with its own computing pipeline, logical units, and memory controller.
Each cpu core will be able to service a number of cpu threads, each having an independent instruction stream
but sharing the cores memory controller and other logical units.
"""
def __init__(self, **kwargs):
self.num_nodes = int(kwargs.pop("num_nodes"))
self.sockets_per_node = int(kwargs.pop("sockets_per_node"))
self.cores_per_socket = int(kwargs.pop("cores_per_socket"))
# Convert memory to megabytes.
m = str(kwargs.pop("mem_per_node"))
self.mem_per_node = int(Memory.from_string(m).to("Mb"))
if self.mem_per_node <= 0 or self.sockets_per_node <= 0 or self.cores_per_socket <= 0:
raise ValueError("invalid parameters: %s" % kwargs)
if kwargs:
raise ValueError("Found invalid keywords in the partition section:\n %s" % list(kwargs.keys()))
def __str__(self):
"""String representation."""
lines = []
app = lines.append
app(" num_nodes: %d, sockets_per_node: %d, cores_per_socket: %d, mem_per_node %s," %
(self.num_nodes, self.sockets_per_node, self.cores_per_socket, self.mem_per_node))
return "\n".join(lines)
@property
def num_cores(self):
"""Total number of cores available"""
return self.cores_per_socket * self.sockets_per_node * self.num_nodes
@property
def cores_per_node(self):
"""Number of cores per node."""
return self.cores_per_socket * self.sockets_per_node
@property
def mem_per_core(self):
"""Memory available on a single node."""
return self.mem_per_node / self.cores_per_node
def can_use_omp_threads(self, omp_threads):
"""True if omp_threads fit in a node."""
return self.cores_per_node >= omp_threads
def divmod_node(self, mpi_procs, omp_threads):
"""Use divmod to compute (num_nodes, rest_cores)"""
return divmod(mpi_procs * omp_threads, self.cores_per_node)
def as_dict(self):
return {'num_nodes': self.num_nodes,
'sockets_per_node': self.sockets_per_node,
'cores_per_socket': self.cores_per_socket,
'mem_per_node': str(Memory(val=self.mem_per_node, unit='Mb'))}
@classmethod
def from_dict(cls, dd):
return cls(num_nodes=dd['num_nodes'],
sockets_per_node=dd['sockets_per_node'],
cores_per_socket=dd['cores_per_socket'],
mem_per_node=dd['mem_per_node'])
class _ExcludeNodesFile(object):
"""
This file contains the list of nodes to be excluded.
Nodes are indexed by queue name.
"""
DIRPATH = os.path.join(os.path.expanduser("~"), ".abinit", "abipy")
FILEPATH = os.path.join(DIRPATH, "exclude_nodes.json")
def __init__(self):
if not os.path.exists(self.FILEPATH):
if not os.path.exists(self.DIRPATH): os.makedirs(self.DIRPATH)
with FileLock(self.FILEPATH):
with open(self.FILEPATH, "w") as fh:
json.dump({}, fh)
def read_nodes(self, qname):
with open(self.FILEPATH, "w") as fh:
return json.load(fh).get(qname, [])
def add_nodes(self, qname, nodes):
nodes = (nodes,) if not isinstance(nodes, (tuple, list)) else nodes
with FileLock(self.FILEPATH):
with AtomicFile(self.FILEPATH, mode="w+") as fh:
d = json.load(fh)
if qname in d:
d["qname"].extend(nodes)
d["qname"] = list(set(d["qname"]))
else:
d["qname"] = nodes
json.dump(d, fh)
_EXCL_NODES_FILE = _ExcludeNodesFile()
def show_qparams(qtype, stream=sys.stdout):
"""Print to the given stream the template of the :class:`QueueAdapter` of type `qtype`."""
for cls in all_subclasses(QueueAdapter):
if cls.QTYPE == qtype: return stream.write(cls.QTEMPLATE)
raise ValueError("Cannot find class associated to qtype %s" % qtype)
def all_qtypes():
"""Return sorted list with all qtypes supported."""
return sorted([cls.QTYPE for cls in all_subclasses(QueueAdapter)])
def make_qadapter(**kwargs):
"""
Return the concrete :class:`QueueAdapter` class from a string.
Note that one can register a customized version with:
.. example::
from qadapters import SlurmAdapter
class MyAdapter(SlurmAdapter):
QTYPE = "myslurm"
# Add your customized code here
# Register your class.
SlurmAdapter.register(MyAdapter)
make_qadapter(qtype="myslurm", **kwargs)
.. warning::
MyAdapter should be pickleable, hence one should declare it
at the module level so that pickle can import it at run-time.
"""
# Get all known subclasses of QueueAdapter.
d = {c.QTYPE: c for c in all_subclasses(QueueAdapter)}
# Preventive copy before pop
kwargs = copy.deepcopy(kwargs)
qtype = kwargs["queue"].pop("qtype")
return d[qtype](**kwargs)
class QScriptTemplate(string.Template):
delimiter = '$$'
class QueueAdapterError(Exception):
"""Base Error class for exceptions raise by QueueAdapter."""
class MaxNumLaunchesError(QueueAdapterError):
"""Raised by `submit_to_queue` if we try to submit more than `max_num_launches` times."""
class QueueAdapter(six.with_metaclass(abc.ABCMeta, MSONable)):
"""
The `QueueAdapter` is responsible for all interactions with a specific queue management system.
This includes handling all details of queue script format as well as queue submission and management.
This is the **abstract** base class defining the methods that must be implemented by the concrete classes.
Concrete classes should extend this class with implementations that work on specific queue systems.
.. note::
A `QueueAdapter` has a handler (:class:`QueueJob`) defined in qjobs.py that allows one
to contact the resource manager to get info about the status of the job.
Each concrete implementation of `QueueAdapter` should have a corresponding `QueueJob`.
"""
Error = QueueAdapterError
MaxNumLaunchesError = MaxNumLaunchesError
@classmethod
def all_qtypes(cls):
"""Return sorted list with all qtypes supported."""
return sorted([subcls.QTYPE for subcls in all_subclasses(cls)])
@classmethod
def autodoc(cls):
return """
# Dictionary with info on the hardware available on this queue.
hardware:
num_nodes: # Number of nodes available on this queue (integer, MANDATORY).
sockets_per_node: # Number of sockets per node (integer, MANDATORY).
cores_per_socket: # Number of cores per socket (integer, MANDATORY).
# The total number of cores available on this queue is
# `num_nodes * sockets_per_node * cores_per_socket`.
# Dictionary with the options used to prepare the enviroment before submitting the job
job:
setup: # List of commands (strings) executed before running (DEFAULT: empty)
omp_env: # Dictionary with OpenMP environment variables (DEFAULT: empty i.e. no OpenMP)
modules: # List of modules to be imported before running the code (DEFAULT: empty).
# NB: Error messages produced by module load are redirected to mods.err
shell_env: # Dictionary with shell environment variables.
mpi_runner: # MPI runner. Possible values in [mpirun, mpiexec, None]
# DEFAULT: None i.e. no mpirunner is used.
shell_runner: # Used for running small sequential jobs on the front-end. Set it to None
# if mpirun or mpiexec are not available on the fron-end. If not
# given, small sequential jobs are executed with `mpi_runner`.
pre_run: # List of commands (strings) executed before the run (DEFAULT: empty)
post_run: # List of commands (strings) executed after the run (DEFAULT: empty)
# dictionary with the name of the queue and optional parameters
# used to build/customize the header of the submission script.
queue:
qname: # Name of the queue (string, MANDATORY)
qparams: # Dictionary with values used to generate the header of the job script
# See pymatgen.io.abinit.qadapters.py for the list of supported values.
# dictionary with the constraints that must be fulfilled in order to run on this queue.
limits:
min_cores: # Minimum number of cores (integer, DEFAULT: 1)
max_cores: # Maximum number of cores (integer, MANDATORY). Hard limit to hint_cores:
# it's the limit beyond which the scheduler will not accept the job (MANDATORY).
hint_cores: # The limit used in the initial setup of jobs.
# Fix_Critical method may increase this number until max_cores is reached
min_mem_per_proc: # Minimum memory per MPI process in Mb, units can be specified e.g. 1.4 Gb
# (DEFAULT: hardware.mem_per_core)
max_mem_per_proc: # Maximum memory per MPI process in Mb, units can be specified e.g. `1.4Gb`
# (DEFAULT: hardware.mem_per_node)
timelimit: # Initial time-limit. Accepts time according to slurm-syntax i.e:
# "days-hours" or "days-hours:minutes" or "days-hours:minutes:seconds" or
# "minutes" or "minutes:seconds" or "hours:minutes:seconds",
timelimit_hard: # The hard time-limit for this queue. Same format as timelimit.
# Error handlers could try to submit jobs with increased timelimit
# up to timelimit_hard. If not specified, timelimit_hard == timelimit
condition: # MongoDB-like condition (DEFAULT: empty, i.e. not used)
allocation: # String defining the policy used to select the optimal number of CPUs.
# possible values are in ["nodes", "force_nodes", "shared"]
# "nodes" means that we should try to allocate entire nodes if possible.
# This is a soft limit, in the sense that the qadapter may use a configuration
# that does not fulfill this requirement. In case of failure, it will try to use the
# smallest number of nodes compatible with the optimal configuration.
# Use `force_nodes` to enfore entire nodes allocation.
# `shared` mode does not enforce any constraint (DEFAULT: shared).
max_num_launches: # Limit to the number of times a specific task can be restarted (integer, DEFAULT: 5)
"""
def __init__(self, **kwargs):
"""
Args:
qname: Name of the queue.
qparams: Dictionary with the parameters used in the template.
setup: String or list of commands to execute during the initial setup.
modules: String or list of modules to load before running the application.
shell_env: Dictionary with the environment variables to export before running the application.
omp_env: Dictionary with the OpenMP variables.
pre_run: String or list of commands to execute before launching the calculation.
post_run: String or list of commands to execute once the calculation is completed.
mpi_runner: Path to the MPI runner or :class:`MpiRunner` instance. None if not used
max_num_launches: Maximum number of submissions that can be done for a specific task. Defaults to 5
qverbatim:
min_cores, max_cores, hint_cores: Minimum, maximum, and hint limits of number of cores that can be used
min_mem_per_proc=Minimun memory per process in megabytes.
max_mem_per_proc=Maximum memory per process in megabytes.
timelimit: initial time limit in seconds
timelimit_hard: hard limelimit for this queue
priority: Priority level, integer number > 0
condition: Condition object (dictionary)
.. note::
priority is a non-negative integer used to order the qadapters. The :class:`TaskManager` will
try to run jobs on the qadapter with the highest priority if possible
"""
# TODO
#task_classes
# Make defensive copies so that we can change the values at runtime.
kwargs = copy.deepcopy(kwargs)
self.priority = int(kwargs.pop("priority"))
self.hw = Hardware(**kwargs.pop("hardware"))
self._parse_queue(kwargs.pop("queue"))
self._parse_limits(kwargs.pop("limits"))
self._parse_job(kwargs.pop("job"))
self.set_master_mem_overhead(kwargs.pop("master_mem_overhead", 0))
# List of dictionaries with the parameters used to submit jobs
# The launcher will use this information to increase the resources
self.launches = []
if kwargs:
raise ValueError("Found unknown keywords:\n%s" % list(kwargs.keys()))
self.validate_qparams()
# Initialize some values from the info reported in the partition.
self.set_mpi_procs(self.min_cores)
self.set_mem_per_proc(self.min_mem_per_proc)
# Final consistency check.
self.validate_qparams()
def as_dict(self):
"""
Provides a simple though not complete dict serialization of the object (OMP missing, not all limits are
kept in the dictionary, ... other things to be checked)
Raise:
`ValueError` if errors.
"""
if self.has_omp:
raise NotImplementedError('as_dict method of QueueAdapter not yet implemented when OpenMP is activated')
return {'@module': self.__class__.__module__,
'@class': self.__class__.__name__,
'priority': self.priority,
'hardware': self.hw.as_dict(),
'queue': {'qtype': self.QTYPE,
'qname': self._qname,
'qnodes': self.qnodes,
'qparams': self._qparams},
'limits': {'timelimit_hard': self._timelimit_hard,
'timelimit': self._timelimit,
'min_cores': self.min_cores,
'max_cores': self.max_cores,
'min_mem_per_proc': self.min_mem_per_proc,
'max_mem_per_proc': self.max_mem_per_proc,
},
'job': {},
'mpi_procs': self._mpi_procs,
'mem_per_proc': self._mem_per_proc,
'master_mem_overhead': self._master_mem_overhead
}
@classmethod
def from_dict(cls, dd):
priority = dd.pop('priority')
hardware = dd.pop('hardware')
queue = dd.pop('queue')
limits = dd.pop('limits')
job = dd.pop('job')
qa = make_qadapter(priority=priority, hardware=hardware, queue=queue, limits=limits, job=job)
qa.set_mpi_procs(dd.pop('mpi_procs'))
qa.set_mem_per_proc(dd.pop('mem_per_proc'))
qa.set_master_mem_overhead(dd.pop('master_mem_overhead', 0))
timelimit = dd.pop('timelimit', None)
if timelimit is not None:
qa.set_timelimit(timelimit=timelimit)
dd.pop('@module', None)
dd.pop('@class', None)
if dd:
raise ValueError("Found unknown keywords:\n%s" % list(dd.keys()))
return qa
def validate_qparams(self):
"""
Check if the keys specified by the user in qparams are supported.
Raise:
`ValueError` if errors.
"""
# No validation for ShellAdapter.
if isinstance(self, ShellAdapter): return
# Parse the template so that we know the list of supported options.
err_msg = ""
for param in self.qparams:
if param not in self.supported_qparams:
err_msg += "Unsupported QUEUE parameter name %s\n" % param
err_msg += "Supported parameters:\n"
for param_sup in self.supported_qparams:
err_msg += " %s \n" % param_sup
if err_msg:
raise ValueError(err_msg)
def _parse_limits(self, d):
# Time limits.
self.set_timelimit(qu.timelimit_parser(d.pop("timelimit")))
tl_hard = d.pop("timelimit_hard",None)
tl_hard = qu.timelimit_parser(tl_hard) if tl_hard is not None else self.timelimit
self.set_timelimit_hard(tl_hard)
# Cores
self.min_cores = int(d.pop("min_cores", 1))
self.max_cores = int(d.pop("max_cores"))
self.hint_cores = int(d.pop("hint_cores", self.max_cores))
if self.min_cores > self.max_cores:
raise ValueError("min_cores %s cannot be greater than max_cores %s" % (self.min_cores, self.max_cores))
# Memory
# FIXME: Neeed because autoparal 1 with paral_kgb 1 is not able to estimate memory
self.min_mem_per_proc = qu.any2mb(d.pop("min_mem_per_proc", self.hw.mem_per_core))
self.max_mem_per_proc = qu.any2mb(d.pop("max_mem_per_proc", self.hw.mem_per_node))
# Misc
self.max_num_launches = int(d.pop("max_num_launches", 5))
self.condition = Condition(d.pop("condition", {}))
self.allocation = d.pop("allocation", "shared")
if self.allocation not in ("nodes", "force_nodes", "shared"):
raise ValueError("Wrong value for `allocation` option")
if d:
raise ValueError("Found unknown keyword(s) in limits section:\n %s" % list(d.keys()))
def _parse_job(self, d):
setup = d.pop("setup", None)
if is_string(setup): setup = [setup]
self.setup = setup[:] if setup is not None else []
omp_env = d.pop("omp_env", None)
self.omp_env = omp_env.copy() if omp_env is not None else {}
modules = d.pop("modules", None)
if is_string(modules): modules = [modules]
self.modules = modules[:] if modules is not None else []
shell_env = d.pop("shell_env", None)
self.shell_env = shell_env.copy() if shell_env is not None else {}
self.mpi_runner = d.pop("mpi_runner", None)
if not isinstance(self.mpi_runner, MpiRunner):
self.mpi_runner = MpiRunner(self.mpi_runner)
self.shell_runner = d.pop("shell_runner", None)
if self.shell_runner is not None:
self.shell_runner = MpiRunner(self.shell_runner)
pre_run = d.pop("pre_run", None)
if is_string(pre_run): pre_run = [pre_run]
self.pre_run = pre_run[:] if pre_run is not None else []
post_run = d.pop("post_run", None)
if is_string(post_run): post_run = [post_run]
self.post_run = post_run[:] if post_run is not None else []
if d:
raise ValueError("Found unknown keyword(s) in job section:\n %s" % list(d.keys()))
def _parse_queue(self, d):
# Init params
qparams = d.pop("qparams", None)
self._qparams = copy.deepcopy(qparams) if qparams is not None else {}
self.set_qname(d.pop("qname", ""))
self.qnodes = d.pop("qnodes", "standard")
if self.qnodes not in ["standard", "shared", "exclusive"]:
raise ValueError("Nodes must be either in standard, shared or exclusive mode "
"while qnodes parameter was {}".format(self.qnodes))
if d:
raise ValueError("Found unknown keyword(s) in queue section:\n %s" % list(d.keys()))
def __str__(self):
lines = ["%s:%s" % (self.__class__.__name__, self.qname)]
app = lines.append
app("Hardware:\n" + str(self.hw))
#lines.extend(["qparams:\n", str(self.qparams)])
if self.has_omp: app(str(self.omp_env))
return "\n".join(lines)
@property
def qparams(self):
"""Dictionary with the parameters used to construct the header."""
return self._qparams
@lazy_property
def supported_qparams(self):
"""
Dictionary with the supported parameters that can be passed to the
queue manager (obtained by parsing QTEMPLATE).
"""
import re
return re.findall("\$\$\{(\w+)\}", self.QTEMPLATE)
@property
def has_mpi(self):
"""True if we are using MPI"""
return bool(self.mpi_runner)
@property
def has_omp(self):
"""True if we are using OpenMP threads"""
return hasattr(self, "omp_env") and bool(getattr(self, "omp_env"))
@property
def num_cores(self):
"""Total number of cores employed"""
return self.mpi_procs * self.omp_threads
@property
def omp_threads(self):
"""Number of OpenMP threads."""
if self.has_omp:
return self.omp_env["OMP_NUM_THREADS"]
else:
return 1
@property
def pure_mpi(self):
"""True if only MPI is used."""
return self.has_mpi and not self.has_omp
@property
def pure_omp(self):
"""True if only OpenMP is used."""
return self.has_omp and not self.has_mpi
@property
def hybrid_mpi_omp(self):
"""True if we are running in MPI+Openmp mode."""
return self.has_omp and self.has_mpi
@property
def run_info(self):
"""String with info on the run."""
return "MPI: %d, OMP: %d" % (self.mpi_procs, self.omp_threads)
def deepcopy(self):
"""Deep copy of the object."""
return copy.deepcopy(self)
def record_launch(self, queue_id): # retcode):
"""Save submission"""
self.launches.append(
AttrDict(queue_id=queue_id, mpi_procs=self.mpi_procs, omp_threads=self.omp_threads,
mem_per_proc=self.mem_per_proc, timelimit=self.timelimit))
return len(self.launches)
def remove_launch(self, index):
"""Remove launch with the given index."""
self.launches.pop(index)
@property
def num_launches(self):
"""Number of submission tried with this adapter so far."""
return len(self.launches)
@property
def last_launch(self):
"""Return the last launch."""
if len(self.launches) > 0:
return self.launches[-1]
else:
return None
def validate(self):
"""Validate the parameters of the run. Raises self.Error if invalid parameters."""
errors = []
app = errors.append
if not self.hint_cores >= self.mpi_procs * self.omp_threads >= self.min_cores:
app("self.hint_cores >= mpi_procs * omp_threads >= self.min_cores not satisfied")
if self.omp_threads > self.hw.cores_per_node:
app("omp_threads > hw.cores_per_node")
if self.mem_per_proc > self.hw.mem_per_node:
app("mem_mb >= self.hw.mem_per_node")
if not self.max_mem_per_proc >= self.mem_per_proc >= self.min_mem_per_proc:
app("self.max_mem_per_proc >= mem_mb >= self.min_mem_per_proc not satisfied")
if self.priority <= 0:
app("priority must be > 0")
if not (1 <= self.min_cores <= self.hw.num_cores >= self.hint_cores):
app("1 <= min_cores <= hardware num_cores >= hint_cores not satisfied")
if errors:
raise self.Error(str(self) + "\n".join(errors))
def set_omp_threads(self, omp_threads):
"""Set the number of OpenMP threads."""
self.omp_env["OMP_NUM_THREADS"] = omp_threads
@property
def mpi_procs(self):
"""Number of CPUs used for MPI."""
return self._mpi_procs
def set_mpi_procs(self, mpi_procs):
"""Set the number of MPI processes to mpi_procs"""
self._mpi_procs = mpi_procs
@property
def qname(self):
"""The name of the queue."""
return self._qname
def set_qname(self, qname):
"""Set the name of the queue."""
self._qname = qname
# todo this assumes only one wall time. i.e. the one in the mananager file is the one always used.
# we should use the standard walltime to start with but also allow to increase the walltime
@property
def timelimit(self):
"""Returns the walltime in seconds."""
return self._timelimit
@property
def timelimit_hard(self):
"""Returns the walltime in seconds."""
return self._timelimit_hard
def set_timelimit(self, timelimit):
"""Set the start walltime in seconds, fix method may increase this one until timelimit_hard is reached."""
self._timelimit = timelimit
def set_timelimit_hard(self, timelimit_hard):
"""Set the maximal possible walltime in seconds."""
self._timelimit_hard = timelimit_hard
@property
def mem_per_proc(self):
"""The memory per process in megabytes."""
return self._mem_per_proc
@property
def master_mem_overhead(self):
"""The memory overhead for the master process in megabytes."""
return self._master_mem_overhead
def set_mem_per_proc(self, mem_mb):
"""
Set the memory per process in megabytes. If mem_mb <=0, min_mem_per_proc is used.
"""
# Hack needed because abinit is still not able to estimate memory.
# COMMENTED by David.
# This is not needed anymore here because the "hack" is performed directly in select_qadapter/_use_qadpos_pconf
# methods of TaskManager. Moreover, this hack should be performed somewhere else (this part should be
# independent of abinit ... and if we want to have less memory than the average memory available per node, we
# have to allow it!)
#if mem_mb <= self.min_mem_per_proc: mem_mb = self.min_mem_per_proc
self._mem_per_proc = int(mem_mb)
def set_master_mem_overhead(self, mem_mb):
"""
Set the memory overhead for the master process in megabytes.
"""
if mem_mb < 0:
raise ValueError("Memory overhead for the master process should be >= 0")
self._master_mem_overhead = int(mem_mb)
@property
def total_mem(self):
"""Total memory required by the job in megabytes."""
return Memory(self.mem_per_proc * self.mpi_procs + self.master_mem_overhead, "Mb")
@abc.abstractmethod
def cancel(self, job_id):
"""
Cancel the job.
Args:
job_id: Job identifier.
Returns:
Exit status.
"""
def can_run_pconf(self, pconf):
"""True if the qadapter in principle is able to run the :class:`ParalConf` pconf"""
if not self.hint_cores >= pconf.num_cores >= self.min_cores: return False
if not self.hw.can_use_omp_threads(self.omp_threads): return False
if pconf.mem_per_proc > self.hw.mem_per_node: return False
if self.allocation == "force_nodes" and pconf.num_cores % self.hw.cores_per_node != 0:
return False
return self.condition(pconf)
def distribute(self, mpi_procs, omp_threads, mem_per_proc):
"""
Returns (num_nodes, mpi_per_node)
Aggressive: When Open MPI thinks that it is in an exactly- or under-subscribed mode
(i.e., the number of running processes is equal to or less than the number of available processors),
MPI processes will automatically run in aggressive mode, meaning that they will never voluntarily give
up the processor to other processes. With some network transports, this means that Open MPI will spin
in tight loops attempting to make message passing progress, effectively causing other processes to not get
any CPU cycles (and therefore never make any progress)
"""
class Distrib(namedtuple("Distrib", "num_nodes mpi_per_node exact")):
pass
#@property
#def mem_per_node
# return self.mpi_per_node * mem_per_proc
#def set_nodes(self, nodes):
hw = self.hw
# TODO: Add check on user-memory
if mem_per_proc <= 0:
logger.warning("mem_per_proc <= 0")
mem_per_proc = hw.mem_per_core
if mem_per_proc > hw.mem_per_node:
raise self.Error(
"mem_per_proc > mem_per_node.\n Cannot distribute mpi_procs %d, omp_threads %d, mem_per_proc %s" %
(mpi_procs, omp_threads, mem_per_proc))
# Try to use all then cores in the node.
num_nodes, rest_cores = hw.divmod_node(mpi_procs, omp_threads)
if num_nodes == 0 and mpi_procs * mem_per_proc <= hw.mem_per_node:
# One node is enough
return Distrib(num_nodes=1, mpi_per_node=mpi_procs, exact=True)
if num_nodes == 0: num_nodes = 2
mpi_per_node = mpi_procs // num_nodes
if mpi_per_node * mem_per_proc <= hw.mem_per_node and rest_cores == 0:
# Commensurate with nodes.
return Distrib(num_nodes=num_nodes, mpi_per_node=mpi_per_node, exact=True)
#if mode == "block", "cyclic"
# Try first to pack MPI processors in a node as much as possible
mpi_per_node = int(hw.mem_per_node / mem_per_proc)
assert mpi_per_node != 0
num_nodes = (mpi_procs * omp_threads) // mpi_per_node
print("exact --> false", num_nodes, mpi_per_node)
if mpi_per_node * omp_threads <= hw.cores_per_node and mem_per_proc <= hw.mem_per_node:
return Distrib(num_nodes=num_nodes, mpi_per_node=mpi_per_node, exact=False)
if (mpi_procs * omp_threads) % mpi_per_node != 0:
# Have to reduce the number of MPI procs per node
for mpi_per_node in reversed(range(1, mpi_per_node)):
if mpi_per_node > hw.cores_per_node: continue
num_nodes = (mpi_procs * omp_threads) // mpi_per_node
if (mpi_procs * omp_threads) % mpi_per_node == 0 and mpi_per_node * mem_per_proc <= hw.mem_per_node:
return Distrib(num_nodes=num_nodes, mpi_per_node=mpi_per_node, exact=False)
else:
raise self.Error("Cannot distribute mpi_procs %d, omp_threads %d, mem_per_proc %s" %
(mpi_procs, omp_threads, mem_per_proc))
def optimize_params(self, qnodes=None):
"""
This method is called in get_subs_dict. Return a dict with parameters to be added to qparams
Subclasses may provide a specialized version.
"""
logger.debug("optimize_params of baseclass --> no optimization available!!!")
return {}
def get_subs_dict(self, qnodes=None):
"""
Return substitution dict for replacements into the template
Subclasses may want to customize this method.
"""
#d = self.qparams.copy()
d = self.qparams
d.update(self.optimize_params(qnodes=qnodes))
# clean null values
subs_dict = {k: v for k, v in d.items() if v is not None}
#print("subs_dict:", subs_dict)
return subs_dict
def _make_qheader(self, job_name, qout_path, qerr_path):
"""Return a string with the options that are passed to the resource manager."""
# get substitution dict for replacements into the template
subs_dict = self.get_subs_dict()
# Set job_name and the names for the stderr and stdout of the
# queue manager (note the use of the extensions .qout and .qerr
# so that we can easily locate this file.
subs_dict['job_name'] = job_name.replace('/', '_')
subs_dict['_qout_path'] = qout_path
subs_dict['_qerr_path'] = qerr_path
qtemplate = QScriptTemplate(self.QTEMPLATE)
# might contain unused parameters as leftover $$.
unclean_template = qtemplate.safe_substitute(subs_dict)
# Remove lines with leftover $$.
clean_template = []
for line in unclean_template.split('\n'):
if '$$' not in line:
clean_template.append(line)
return '\n'.join(clean_template)
def get_script_str(self, job_name, launch_dir, executable, qout_path, qerr_path,
stdin=None, stdout=None, stderr=None, exec_args=None):
"""
Returns a (multi-line) String representing the queue script, e.g. PBS script.
Uses the template_file along with internal parameters to create the script.
Args:
job_name: Name of the job.
launch_dir: (str) The directory the job will be launched in.
executable: String with the name of the executable to be executed or list of commands
qout_path Path of the Queue manager output file.
qerr_path: Path of the Queue manager error file.
exec_args: List of arguments passed to executable (used only if executable is a string, default: empty)
"""
# PbsPro does not accept job_names longer than 15 chars.
if len(job_name) > 14 and isinstance(self, PbsProAdapter):
job_name = job_name[:14]
# Construct the header for the Queue Manager.
qheader = self._make_qheader(job_name, qout_path, qerr_path)
# Add the bash section.
se = ScriptEditor()
# Cd to launch_dir immediately.
se.add_line("cd " + os.path.abspath(launch_dir))
if self.setup:
se.add_comment("Setup section")
se.add_lines(self.setup)
se.add_emptyline()
if self.modules:
# stderr is redirected to mods.err file.
# module load 2>> mods.err
se.add_comment("Load Modules")
se.add_line("module purge")
se.load_modules(self.modules)
se.add_emptyline()
se.add_comment("OpenMp Environment")
if self.has_omp:
se.declare_vars(self.omp_env)
se.add_emptyline()
else:
se.declare_vars({"OMP_NUM_THREADS": 1})
if self.shell_env:
se.add_comment("Shell Environment")
se.declare_vars(self.shell_env)
se.add_emptyline()
if self.pre_run:
se.add_comment("Commands before execution")
se.add_lines(self.pre_run)
se.add_emptyline()
# Construct the string to run the executable with MPI and mpi_procs.
if is_string(executable):
line = self.mpi_runner.string_to_run(self, executable,
stdin=stdin, stdout=stdout, stderr=stderr, exec_args=exec_args)
se.add_line(line)
else:
assert isinstance(executable, (list, tuple))
se.add_lines(executable)
if self.post_run:
se.add_emptyline()
se.add_comment("Commands after execution")
se.add_lines(self.post_run)
return qheader + se.get_script_str() + "\n"
def submit_to_queue(self, script_file):
"""
Public API: wraps the concrete implementation _submit_to_queue
Raises:
`self.MaxNumLaunchesError` if we have already tried to submit the job max_num_launches
`self.Error` if generic error
"""
if not os.path.exists(script_file):
raise self.Error('Cannot find script file located at: {}'.format(script_file))
if self.num_launches == self.max_num_launches:
raise self.MaxNumLaunchesError("num_launches %s == max_num_launches %s" % (self.num_launches, self.max_num_launches))
# Call the concrete implementation.
s = self._submit_to_queue(script_file)
self.record_launch(s.qid)
if s.qid is None:
raise self.Error("Error in job submission with %s. file %s \n" %
(self.__class__.__name__, script_file) +
"The error response reads:\n %s \n " % s.err +
"The out response reads:\n %s \n" % s.out)
# Here we create a concrete instance of QueueJob
return QueueJob.from_qtype_and_id(self.QTYPE, s.qid, self.qname), s.process
@abc.abstractmethod
def _submit_to_queue(self, script_file):
"""
Submits the job to the queue, probably using subprocess or shutil
This method must be provided by the concrete classes and will be called by submit_to_queue
Args:
script_file: (str) name of the script file to use (String)
Returns:
queue_id, process
"""
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue, probably using subprocess or shutil to
call a command like 'qstat'. returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
if username is None: username = getpass.getuser()
njobs, process = self._get_njobs_in_queue(username=username)
if process is not None and process.returncode != 0:
# there's a problem talking to squeue server?
err_msg = ('Error trying to get the number of jobs in the queue' +
'The error response reads:\n {}'.format(process.stderr.read()))
logger.critical(err_msg)
if not isinstance(self, ShellAdapter):
logger.info('The number of jobs currently in the queue is: {}'.format(njobs))
return njobs
@abc.abstractmethod
def _get_njobs_in_queue(self, username):
"""
Concrete Subclasses must implement this method. Return (njobs, process)
"""
# Methods to fix problems
def add_exclude_nodes(self, nodes):
return _EXCL_NODES_FILE.add_nodes(self.qname, nodes)
def get_exclude_nodes(self):
return _EXCL_NODES_FILE.read_nodes(self.qname)
@abc.abstractmethod
def exclude_nodes(self, nodes):
"""Method to exclude nodes in the calculation. Return True if nodes have been excluded"""
def more_mem_per_proc(self, factor=1):
"""
Method to increase the amount of memory asked for, by factor.
Return: new memory if success, 0 if memory cannot be increased.
"""
base_increase = 2000
old_mem = self.mem_per_proc
new_mem = old_mem + factor*base_increase
if new_mem < self.hw.mem_per_node:
self.set_mem_per_proc(new_mem)
return new_mem
raise self.Error('could not increase mem_per_proc further')
def more_master_mem_overhead(self, mem_increase_mb=1000):
"""
Method to increase the amount of memory overheaded asked for the master node.
Return: new master memory overhead if success, 0 if it cannot be increased.
"""
old_master_mem_overhead = self.master_mem_overhead
new_master_mem_overhead = old_master_mem_overhead + mem_increase_mb
if new_master_mem_overhead + self.mem_per_proc < self.hw.mem_per_node:
self.set_master_mem_overhead(new_master_mem_overhead)
return new_master_mem_overhead
raise self.Error('could not increase master_mem_overhead further')
def more_cores(self, factor=1):
"""
Method to increase the number of MPI procs.
Return: new number of processors if success, 0 if processors cannot be increased.
"""
# TODO : find a formula that works for all max_cores
if self.max_cores > 40:
base_increase = 4 * int(self.max_cores / 40)
else:
base_increase = 4
new_cores = self.hint_cores + factor * base_increase
if new_cores < self.max_cores:
self.hint_cores = new_cores
return new_cores
raise self.Error('%s hint_cores reached limit on max_core %s' % (new_cores, self.max_cores))
def more_time(self, factor=1):
"""
Method to increase the wall time
"""
base_increase = int(self.timelimit_hard / 10)
new_time = self.timelimit + base_increase*factor
print('qadapter: trying to increase time')
if new_time < self.timelimit_hard:
self.set_timelimit(new_time)
print('new time set: ', new_time)
return new_time
self.priority = -1
raise self.Error("increasing time is not possible, the hard limit has been reached")
####################
# Concrete classes #
####################
class ShellAdapter(QueueAdapter):
"""Simple Adapter used to submit runs through the shell."""
QTYPE = "shell"
QTEMPLATE = """\
#!/bin/bash
$${qverbatim}
"""
def cancel(self, job_id):
return os.system("kill -9 %d" % job_id)
def _submit_to_queue(self, script_file):
# submit the job, return process and pid.
process = Popen(("/bin/bash", script_file), stderr=PIPE)
return SubmitResults(qid=process.pid, out='no out in shell submission', err='no err in shell submission', process=process)
def _get_njobs_in_queue(self, username):
return None, None
def exclude_nodes(self, nodes):
return False
class SlurmAdapter(QueueAdapter):
"""Adapter for SLURM."""
QTYPE = "slurm"
QTEMPLATE = """\
#!/bin/bash
#SBATCH --partition=$${partition}
#SBATCH --job-name=$${job_name}
#SBATCH --nodes=$${nodes}
#SBATCH --total_tasks=$${total_tasks}
#SBATCH --ntasks=$${ntasks}
#SBATCH --ntasks-per-node=$${ntasks_per_node}
#SBATCH --cpus-per-task=$${cpus_per_task}
#####SBATCH --mem=$${mem}
#SBATCH --mem-per-cpu=$${mem_per_cpu}
#SBATCH --hint=$${hint}
#SBATCH --time=$${time}
#SBATCH --exclude=$${exclude_nodes}
#SBATCH --account=$${account}
#SBATCH --mail-user=$${mail_user}
#SBATCH --mail-type=$${mail_type}
#SBATCH --constraint=$${constraint}
#SBATCH --gres=$${gres}
#SBATCH --requeue=$${requeue}
#SBATCH --nodelist=$${nodelist}
#SBATCH --propagate=$${propagate}
#SBATCH --licenses=$${licenses}
#SBATCH --output=$${_qout_path}
#SBATCH --error=$${_qerr_path}
$${qverbatim}
"""
def set_qname(self, qname):
super(SlurmAdapter, self).set_qname(qname)
if qname:
self.qparams["partition"] = qname
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
super(SlurmAdapter, self).set_mpi_procs(mpi_procs)
self.qparams["ntasks"] = mpi_procs
def set_omp_threads(self, omp_threads):
super(SlurmAdapter, self).set_omp_threads(omp_threads)
self.qparams["cpus_per_task"] = omp_threads
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super(SlurmAdapter, self).set_mem_per_proc(mem_mb)
self.qparams["mem_per_cpu"] = self.mem_per_proc
# Remove mem if it's defined.
#self.qparams.pop("mem", None)
def set_timelimit(self, timelimit):
super(SlurmAdapter, self).set_timelimit(timelimit)
self.qparams["time"] = qu.time2slurm(timelimit)
def cancel(self, job_id):
return os.system("scancel %d" % job_id)
def optimize_params(self, qnodes=None):
params = {}
if self.allocation == "nodes":
# run on the smallest number of nodes compatible with the configuration
params["nodes"] = max(int(math.ceil(self.mpi_procs / self.hw.cores_per_node)),
int(math.ceil(self.total_mem / self.hw.mem_per_node)))
return params
#dist = self.distribute(self.mpi_procs, self.omp_threads, self.mem_per_proc)
##print(dist)
#if False and dist.exact:
# # Can optimize parameters
# self.qparams["nodes"] = dist.num_nodes
# self.qparams.pop("ntasks", None)
# self.qparams["ntasks_per_node"] = dist.mpi_per_node
# self.qparams["cpus_per_task"] = self.omp_threads
# self.qparams["mem"] = dist.mpi_per_node * self.mem_per_proc
# self.qparams.pop("mem_per_cpu", None)
#else:
# # Delegate to slurm.
# self.qparams["ntasks"] = self.mpi_procs
# self.qparams.pop("nodes", None)
# self.qparams.pop("ntasks_per_node", None)
# self.qparams["cpus_per_task"] = self.omp_threads
# self.qparams["mem_per_cpu"] = self.mem_per_proc
# self.qparams.pop("mem", None)
#return {}
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
process = Popen(['sbatch', script_file], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
# grab the returncode. SLURM returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id
queue_id = int(out.split()[3])
logger.info('Job submission was successful and queue_id is {}'.format(queue_id))
except:
# probably error parsing job code
logger.critical('Could not parse job id following slurm...')
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def exclude_nodes(self, nodes):
try:
if 'exclude_nodes' not in self.qparams:
self.qparams.update({'exclude_nodes': 'node' + nodes[0]})
print('excluded node %s' % nodes[0])
for node in nodes[1:]:
self.qparams['exclude_nodes'] += ',node' + node
print('excluded node %s' % node)
return True
except (KeyError, IndexError):
raise self.Error('qadapter failed to exclude nodes')
def _get_njobs_in_queue(self, username):
process = Popen(['squeue', '-o "%u"', '-u', username], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result. lines should have this form:
# username
# count lines that include the username in it
outs = out.splitlines()
njobs = len([line.split() for line in outs if username in line])
return njobs, process
class PbsProAdapter(QueueAdapter):
"""Adapter for PbsPro"""
QTYPE = "pbspro"
#PBS -l select=$${select}:ncpus=$${ncpus}:vmem=$${vmem}mb:mpiprocs=$${mpiprocs}:ompthreads=$${ompthreads}
#PBS -l select=$${select}:ncpus=1:vmem=$${vmem}mb:mpiprocs=1:ompthreads=$${ompthreads}
####PBS -l select=$${select}:ncpus=$${ncpus}:vmem=$${vmem}mb:mpiprocs=$${mpiprocs}:ompthreads=$${ompthreads}
####PBS -l pvmem=$${pvmem}mb
QTEMPLATE = """\
#!/bin/bash
#PBS -q $${queue}
#PBS -N $${job_name}
#PBS -A $${account}
#PBS -l select=$${select}
#PBS -l pvmem=$${pvmem}mb
#PBS -l walltime=$${walltime}
#PBS -l model=$${model}
#PBS -l place=$${place}
#PBS -W group_list=$${group_list}
#PBS -M $${mail_user}
#PBS -m $${mail_type}
#PBS -o $${_qout_path}
#PBS -e $${_qerr_path}
$${qverbatim}
"""
def set_qname(self, qname):
super(PbsProAdapter, self).set_qname(qname)
if qname:
self.qparams["queue"] = qname
def set_timelimit(self, timelimit):
super(PbsProAdapter, self).set_timelimit(timelimit)
self.qparams["walltime"] = qu.time2pbspro(timelimit)
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super(PbsProAdapter, self).set_mem_per_proc(mem_mb)
#self.qparams["vmem"] = self.mem_per_proc
self.qparams["pvmem"] = self.mem_per_proc
def cancel(self, job_id):
return os.system("qdel %d" % job_id)
def optimize_params(self, qnodes=None):
return {"select": self.get_select(qnodes=qnodes)}
def get_select(self, ret_dict=False, qnodes=None):
"""
Select is not the most intuitive command. For more info see:
* http://www.cardiff.ac.uk/arcca/services/equipment/User-Guide/pbs.html
* https://portal.ivec.org/docs/Supercomputers/PBS_Pro
"""
hw, mem_per_proc = self.hw, int(self.mem_per_proc)
#dist = self.distribute(self.mpi_procs, self.omp_threads, mem_per_proc)
"""
if self.pure_mpi:
num_nodes, rest_cores = hw.divmod_node(self.mpi_procs, self.omp_threads)
if num_nodes == 0:
logger.info("IN_CORE PURE MPI: %s" % self.run_info)
chunks = 1
ncpus = rest_cores
mpiprocs = rest_cores
vmem = mem_per_proc * ncpus
ompthreads = 1
elif rest_cores == 0:
# Can allocate entire nodes because self.mpi_procs is divisible by cores_per_node.
logger.info("PURE MPI run commensurate with cores_per_node %s" % self.run_info)
chunks = num_nodes
ncpus = hw.cores_per_node
mpiprocs = hw.cores_per_node
vmem = ncpus * mem_per_proc
ompthreads = 1
else:
logger.info("OUT-OF-CORE PURE MPI (not commensurate with cores_per_node): %s" % self.run_info)
chunks = self.mpi_procs
ncpus = 1
mpiprocs = 1
vmem = mem_per_proc
ompthreads = 1
elif self.pure_omp:
# Pure OMP run.
logger.info("PURE OPENMP run: %s" % self.run_info)
assert hw.can_use_omp_threads(self.omp_threads)
chunks = 1
ncpus = self.omp_threads
mpiprocs = 1
vmem = mem_per_proc
ompthreads = self.omp_threads
elif self.hybrid_mpi_omp:
assert hw.can_use_omp_threads(self.omp_threads)
num_nodes, rest_cores = hw.divmod_node(self.mpi_procs, self.omp_threads)
#print(num_nodes, rest_cores)
# TODO: test this
if rest_cores == 0 or num_nodes == 0:
logger.info("HYBRID MPI-OPENMP run, perfectly divisible among nodes: %s" % self.run_info)
chunks = max(num_nodes, 1)
mpiprocs = self.mpi_procs // chunks
chunks = chunks
ncpus = mpiprocs * self.omp_threads
mpiprocs = mpiprocs
vmem = mpiprocs * mem_per_proc
ompthreads = self.omp_threads
else:
logger.info("HYBRID MPI-OPENMP, NOT commensurate with nodes: %s" % self.run_info)
chunks=self.mpi_procs
ncpus=self.omp_threads
mpiprocs=1
vmem= mem_per_proc
ompthreads=self.omp_threads
else:
raise RuntimeError("You should not be here")
"""
if qnodes is None:
qnodes = self.qnodes
else:
if qnodes not in ["standard", "shared", "exclusive"]:
raise ValueError("Nodes must be either in standard, shared or exclusive mode "
"while qnodes parameter was {}".format(self.qnodes))
if qnodes == "standard":
return self._get_select_standard(ret_dict=ret_dict)
else:
return self._get_select_with_master_mem_overhead(ret_dict=ret_dict, qnodes=qnodes)
def _get_select_with_master_mem_overhead(self, ret_dict=False, qnodes=None):
if self.has_omp:
raise NotImplementedError("select with master mem overhead not yet implemented with has_omp")
if qnodes is None:
qnodes = self.qnodes
else:
if qnodes not in ["standard", "shared", "exclusive"]:
raise ValueError("Nodes must be either in standard, shared or exclusive mode "
"while qnodes parameter was {}".format(self.qnodes))
if qnodes == "exclusive":
return self._get_select_with_master_mem_overhead_exclusive(ret_dict=ret_dict)
elif qnodes == "shared":
return self._get_select_with_master_mem_overhead_shared(ret_dict=ret_dict)
else:
raise ValueError("Wrong value of qnodes parameter : {}".format(self.qnodes))
def _get_select_with_master_mem_overhead_shared(self, ret_dict=False):
chunk_master, ncpus_master, vmem_master, mpiprocs_master = 1, 1, self.mem_per_proc+self.master_mem_overhead, 1
if self.mpi_procs > 1:
chunks_slaves, ncpus_slaves, vmem_slaves, mpiprocs_slaves = self.mpi_procs - 1, 1, self.mem_per_proc, 1
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, vmem_master=int(vmem_master),
chunks_slaves=chunks_slaves, ncpus_slaves=ncpus_slaves,
mpiprocs_slaves=mpiprocs_slaves, vmem_slaves=int(vmem_slaves))
s = "{chunk_master}:ncpus={ncpus_master}:vmem={vmem_master}mb:mpiprocs={mpiprocs_master}+" \
"{chunks_slaves}:ncpus={ncpus_slaves}:vmem={vmem_slaves}mb:" \
"mpiprocs={mpiprocs_slaves}".format(**select_params)
tot_ncpus = chunk_master*ncpus_master + chunks_slaves*ncpus_slaves
if tot_ncpus != self.mpi_procs:
raise ValueError('Total number of cpus is different from mpi_procs ...')
else:
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, vmem_master=int(vmem_master))
s = "{chunk_master}:ncpus={ncpus_master}:vmem={vmem_master}mb:" \
"mpiprocs={mpiprocs_master}".format(**select_params)
if ret_dict:
return s, select_params
return s
def _get_select_with_master_mem_overhead_exclusive(self, ret_dict=False):
max_ncpus_master = min(self.hw.cores_per_node,
int((self.hw.mem_per_node-self.mem_per_proc-self.master_mem_overhead)
/ self.mem_per_proc) + 1)
if max_ncpus_master >= self.mpi_procs:
chunk, ncpus, vmem, mpiprocs = 1, self.mpi_procs, self.hw.mem_per_node, self.mpi_procs
select_params = AttrDict(chunks=chunk, ncpus=ncpus, mpiprocs=mpiprocs, vmem=int(vmem))
s = "{chunks}:ncpus={ncpus}:vmem={vmem}mb:mpiprocs={mpiprocs}".format(**select_params)
tot_ncpus = chunk*ncpus
else:
ncpus_left = self.mpi_procs-max_ncpus_master
max_ncpus_per_slave_node = min(self.hw.cores_per_node, int(self.hw.mem_per_node/self.mem_per_proc))
nslaves_float = float(ncpus_left)/float(max_ncpus_per_slave_node)
ncpus_per_slave = max_ncpus_per_slave_node
mpiprocs_slaves = max_ncpus_per_slave_node
chunk_master = 1
vmem_slaves = self.hw.mem_per_node
explicit_last_slave = False
chunk_last_slave, ncpus_last_slave, vmem_last_slave, mpiprocs_last_slave = None, None, None, None
if nslaves_float > int(nslaves_float):
chunks_slaves = int(nslaves_float) + 1
pot_ncpus_all_slaves = chunks_slaves*ncpus_per_slave
if pot_ncpus_all_slaves >= self.mpi_procs-1:
explicit_last_slave = True
chunks_slaves = chunks_slaves-1
chunk_last_slave = 1
ncpus_master = 1
ncpus_last_slave = self.mpi_procs - 1 - chunks_slaves*ncpus_per_slave
vmem_last_slave = self.hw.mem_per_node
mpiprocs_last_slave = ncpus_last_slave
else:
ncpus_master = self.mpi_procs-pot_ncpus_all_slaves
if ncpus_master > max_ncpus_master:
raise ValueError('ncpus for the master node exceeds the maximum ncpus for the master ... this'
'should not happen ...')
if ncpus_master < 1:
raise ValueError('ncpus for the master node is 0 ... this should not happen ...')
elif nslaves_float == int(nslaves_float):
chunks_slaves = int(nslaves_float)
ncpus_master = max_ncpus_master
else:
raise ValueError('nslaves_float < int(nslaves_float) ...')
vmem_master, mpiprocs_master = self.hw.mem_per_node, ncpus_master
if explicit_last_slave:
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, vmem_master=int(vmem_master),
chunks_slaves=chunks_slaves, ncpus_per_slave=ncpus_per_slave,
mpiprocs_slaves=mpiprocs_slaves, vmem_slaves=int(vmem_slaves),
chunk_last_slave=chunk_last_slave, ncpus_last_slave=ncpus_last_slave,
vmem_last_slave=int(vmem_last_slave), mpiprocs_last_slave=mpiprocs_last_slave)
s = "{chunk_master}:ncpus={ncpus_master}:vmem={vmem_master}mb:mpiprocs={mpiprocs_master}+" \
"{chunks_slaves}:ncpus={ncpus_per_slave}:vmem={vmem_slaves}mb:mpiprocs={mpiprocs_slaves}+" \
"{chunk_last_slave}:ncpus={ncpus_last_slave}:vmem={vmem_last_slave}mb:" \
"mpiprocs={mpiprocs_last_slave}".format(**select_params)
tot_ncpus = chunk_master*ncpus_master+chunks_slaves*ncpus_per_slave+chunk_last_slave*ncpus_last_slave
else:
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, vmem_master=int(vmem_master),
chunks_slaves=chunks_slaves, ncpus_per_slave=ncpus_per_slave,
mpiprocs_slaves=mpiprocs_slaves, vmem_slaves=int(vmem_slaves))
s = "{chunk_master}:ncpus={ncpus_master}:vmem={vmem_master}mb:mpiprocs={mpiprocs_master}+" \
"{chunks_slaves}:ncpus={ncpus_per_slave}:vmem={vmem_slaves}mb:" \
"mpiprocs={mpiprocs_slaves}".format(**select_params)
tot_ncpus = chunk_master*ncpus_master + chunks_slaves*ncpus_per_slave
if tot_ncpus != self.mpi_procs:
raise ValueError('Total number of cpus is different from mpi_procs ...')
if ret_dict:
return s, select_params
return s
def _get_select_standard(self, ret_dict=False):
if not self.has_omp:
chunks, ncpus, vmem, mpiprocs = self.mpi_procs, 1, self.mem_per_proc, 1
select_params = AttrDict(chunks=chunks, ncpus=ncpus, mpiprocs=mpiprocs, vmem=int(vmem))
s = "{chunks}:ncpus={ncpus}:vmem={vmem}mb:mpiprocs={mpiprocs}".format(**select_params)
else:
chunks, ncpus, vmem, mpiprocs, ompthreads = self.mpi_procs, self.omp_threads, self.mem_per_proc, 1, self.omp_threads
select_params = AttrDict(chunks=chunks, ncpus=ncpus, mpiprocs=mpiprocs, vmem=int(vmem), ompthreads=ompthreads)
s = "{chunks}:ncpus={ncpus}:vmem={vmem}mb:mpiprocs={mpiprocs}:ompthreads={ompthreads}".format(**select_params)
if ret_dict:
return s, select_params
return s
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
# grab the return code. PBS returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id
queue_id = int(out.split('.')[0])
except:
# probably error parsing job code
logger.critical("Could not parse job id following qsub...")
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def _get_njobs_in_queue(self, username):
process = Popen(['qstat', '-a', '-u', username], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result
# lines should have this form
# '1339044.sdb username queuename 2012-02-29-16-43 20460 -- -- -- 00:20 C 00:09'
# count lines that include the username in it
# TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'.
outs = out.split('\n')
njobs = len([line.split() for line in outs if username in line])
return njobs, process
def exclude_nodes(self, nodes):
return False
class TorqueAdapter(PbsProAdapter):
"""Adapter for Torque."""
QTYPE = "torque"
QTEMPLATE = """\
#!/bin/bash
#PBS -q $${queue}
#PBS -N $${job_name}
#PBS -A $${account}
#PBS -l pmem=$${pmem}mb
####PBS -l mppwidth=$${mppwidth}
#PBS -l nodes=$${nodes}:ppn=$${ppn}
#PBS -l walltime=$${walltime}
#PBS -l model=$${model}
#PBS -l place=$${place}
#PBS -W group_list=$${group_list}
#PBS -M $${mail_user}
#PBS -m $${mail_type}
# Submission environment
#PBS -V
#PBS -o $${_qout_path}
#PBS -e $${_qerr_path}
$${qverbatim}
"""
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
QueueAdapter.set_mem_per_proc(self, mem_mb)
self.qparams["pmem"] = self.mem_per_proc
#self.qparams["mem"] = self.mem_per_proc
#@property
#def mpi_procs(self):
# """Number of MPI processes."""
# return self.qparams.get("nodes", 1) * self.qparams.get("ppn", 1)
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
QueueAdapter.set_mpi_procs(self, mpi_procs)
self.qparams["nodes"] = 1
self.qparams["ppn"] = mpi_procs
def exclude_nodes(self, nodes):
raise self.Error('qadapter failed to exclude nodes, not implemented yet in torque')
class SGEAdapter(QueueAdapter):
"""
Adapter for Sun Grid Engine (SGE) task submission software.
See also:
* https://www.wiki.ed.ac.uk/display/EaStCHEMresearchwiki/How+to+write+a+SGE+job+submission+script
* http://www.uibk.ac.at/zid/systeme/hpc-systeme/common/tutorials/sge-howto.html
"""
QTYPE = "sge"
QTEMPLATE = """\
#!/bin/bash
#$ -account_name $${account_name}
#$ -N $${job_name}
#$ -q $${queue_name}
#$ -pe $${parallel_environment} $${ncpus}
#$ -l h_rt=$${walltime}
# request a per slot memory limit of size bytes.
##$ -l h_vmem=$${mem_per_slot}
##$ -l mf=$${mem_per_slot}
###$ -j no
#$ -M $${mail_user}
#$ -m $${mail_type}
# Submission environment
##$ -S /bin/bash
###$ -cwd # Change to current working directory
###$ -V # Export environment variables into script
#$ -e $${_qerr_path}
#$ -o $${_qout_path}
$${qverbatim}
"""
def set_qname(self, qname):
super(SGEAdapter, self).set_qname(qname)
if qname:
self.qparams["queue_name"] = qname
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
super(SGEAdapter, self).set_mpi_procs(mpi_procs)
self.qparams["ncpus"] = mpi_procs
def set_omp_threads(self, omp_threads):
super(SGEAdapter, self).set_omp_threads(omp_threads)
logger.warning("Cannot use omp_threads with SGE")
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super(SGEAdapter, self).set_mem_per_proc(mem_mb)
self.qparams["mem_per_slot"] = str(int(self.mem_per_proc)) + "M"
def set_timelimit(self, timelimit):
super(SGEAdapter, self).set_timelimit(timelimit)
# Same convention as pbspro e.g. [hours:minutes:]seconds
self.qparams["walltime"] = qu.time2pbspro(timelimit)
def cancel(self, job_id):
return os.system("qdel %d" % job_id)
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
# grab the returncode. SGE returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# output should of the form
# Your job 1659048 ("NAME_OF_JOB") has been submitted
queue_id = int(out.split(' ')[2])
except:
# probably error parsing job code
logger.critical("Could not parse job id following qsub...")
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def exclude_nodes(self, nodes):
"""Method to exclude nodes in the calculation"""
raise self.Error('qadapter failed to exclude nodes, not implemented yet in sge')
def _get_njobs_in_queue(self, username):
process = Popen(['qstat', '-u', username], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result
# lines should contain username
# count lines that include the username in it
# TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'.
outs = out.splitlines()
njobs = len([line.split() for line in outs if username in line])
return njobs, process
class MOABAdapter(QueueAdapter):
"""Adapter for MOAB. See https://computing.llnl.gov/tutorials/moab/"""
QTYPE = "moab"
QTEMPLATE = """\
#!/bin/bash
#MSUB -a $${eligible_date}
#MSUB -A $${account}
#MSUB -c $${checkpoint_interval}
#MSUB -l feature=$${feature}
#MSUB -l gres=$${gres}
#MSUB -l nodes=$${nodes}
#MSUB -l partition=$${partition}
#MSUB -l procs=$${procs}
#MSUB -l ttc=$${ttc}
#MSUB -l walltime=$${walltime}
#MSUB -l $${resources}
#MSUB -p $${priority}
#MSUB -q $${queue}
#MSUB -S $${shell}
#MSUB -N $${job_name}
#MSUB -v $${variable_list}
#MSUB -o $${_qout_path}
#MSUB -e $${_qerr_path}
$${qverbatim}
"""
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
super(MOABAdapter, self).set_mpi_procs(mpi_procs)
self.qparams["procs"] = mpi_procs
def set_timelimit(self, timelimit):
super(MOABAdapter, self).set_timelimit(timelimit)
self.qparams["walltime"] = qu.time2slurm(timelimit)
def set_mem_per_proc(self, mem_mb):
super(MOABAdapter, self).set_mem_per_proc(mem_mb)
#TODO
#raise NotImplementedError("set_mem_per_cpu")
def exclude_nodes(self, nodes):
raise self.Error('qadapter failed to exclude nodes, not implemented yet in moad')
def cancel(self, job_id):
return os.system("canceljob %d" % job_id)
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
process = Popen(['msub', script_file], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
queue_id = None
if process.returncode == 0:
# grab the returncode. MOAB returns 0 if the job was successful
try:
# output should be the queue_id
queue_id = int(out.split()[0])
except:
# probably error parsing job code
logger.critical('Could not parse job id following msub...')
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def _get_njobs_in_queue(self, username):
process = Popen(['showq', '-s -u', username], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result
# lines should have this form:
##
## active jobs: N eligible jobs: M blocked jobs: P
##
## Total job: 1
##
# Split the output string and return the last element.
out = out.splitlines()[-1]
njobs = int(out.split()[-1])
return njobs, process
class BlueGeneAdapter(QueueAdapter):
"""
Adapter for LoadLever on BlueGene architectures.
See:
http://www.prace-ri.eu/best-practice-guide-blue-gene-q-html/#id-1.5.4.8
https://www.lrz.de/services/compute/supermuc/loadleveler/
"""
QTYPE = "bluegene"
QTEMPLATE = """\
#!/bin/bash
# @ job_name = $${job_name}
# @ class = $${class}
# @ error = $${_qout_path}
# @ output = $${_qerr_path}
# @ wall_clock_limit = $${wall_clock_limit}
# @ notification = $${notification}
# @ notify_user = $${mail_user}
# @ environment = $${environment}
# @ account_no = $${account_no}
# @ job_type = bluegene
# @ bg_connectivity = $${bg_connectivity}
# @ bg_size = $${bg_size}
$${qverbatim}
# @ queue
"""
def set_qname(self, qname):
super(BlueGeneAdapter, self).set_qname(qname)
if qname:
self.qparams["class"] = qname
#def set_mpi_procs(self, mpi_procs):
# """Set the number of CPUs used for MPI."""
# super(BlueGeneAdapter, self).set_mpi_procs(mpi_procs)
# #self.qparams["ntasks"] = mpi_procs
#def set_omp_threads(self, omp_threads):
# super(BlueGeneAdapter, self).set_omp_threads(omp_threads)
# #self.qparams["cpus_per_task"] = omp_threads
#def set_mem_per_proc(self, mem_mb):
# """Set the memory per process in megabytes"""
# super(BlueGeneAdapter, self).set_mem_per_proc(mem_mb)
# #self.qparams["mem_per_cpu"] = self.mem_per_proc
def set_timelimit(self, timelimit):
"""Limits are specified with the format hh:mm:ss (hours:minutes:seconds)"""
super(BlueGeneAdapter, self).set_timelimit(timelimit)
self.qparams["wall_clock_limit"] = qu.time2loadlever(timelimit)
def cancel(self, job_id):
return os.system("llcancel %d" % job_id)
def bgsize_rankspernode(self):
"""Return (bg_size, ranks_per_node) from mpi_procs and omp_threads."""
bg_size = int(math.ceil((self.mpi_procs * self.omp_threads)/ self.hw.cores_per_node))
bg_size = max(bg_size, 32) # TODO hardcoded
ranks_per_node = int(math.ceil(self.mpi_procs / bg_size))
return bg_size, ranks_per_node
def optimize_params(self, qnodes=None):
params = {}
bg_size, rpn = self.bgsize_rankspernode()
print("in optimize params")
print("mpi_procs:", self.mpi_procs, "omp_threads:",self.omp_threads)
print("bg_size:",bg_size,"ranks_per_node",rpn)
return {"bg_size": bg_size}
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
process = Popen(['llsubmit', script_file], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
# grab the return code. llsubmit returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# on JUQUEEN, output should of the form
#llsubmit: The job "juqueen1c1.zam.kfa-juelich.de.281506" has been submitted.
token = out.split()[3]
s = token.split(".")[-1].replace('"', "")
queue_id = int(s)
except:
# probably error parsing job code
logger.critical("Could not parse job id following llsubmit...")
raise
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def _get_njobs_in_queue(self, username):
process = Popen(['llq', '-u', username], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result. lines should have this form:
#
# Id Owner Submitted ST PRI Class Running On
# ------------------------ ---------- ----------- -- --- ------------ -----------
# juqueen1c1.281508.0 paj15530 1/23 13:20 I 50 n001
# 1 job step(s) in query, 1 waiting, 0 pending, 0 running, 0 held, 0 preempted
#
# count lines that include the username in it
outs = out.split('\n')
njobs = len([line.split() for line in outs if username in line])
return njobs, process
def exclude_nodes(self, nodes):
return False
|
aykol/pymatgen
|
pymatgen/io/abinit/qadapters.py
|
Python
|
mit
| 79,102
|
[
"ABINIT",
"pymatgen"
] |
246ddca62d8ad4cc7a0133a1a6efdea63a4029870eb65dfe63d7175ec37bfa63
|
from __future__ import absolute_import
import ast
class MockAttrVisitor(ast.NodeVisitor):
non_existent_methods = frozenset([
'assert_calls',
'assert_not_called',
'assert_called',
'assert_called_once',
'not_called',
'called_once',
'called_once_with',
])
def __init__(self):
self.errors = []
def visit_Attribute(self, node):
self.generic_visit(node)
if node.attr in self.non_existent_methods:
self.errors.append((node.lineno, node.attr))
class MockCheck(object):
name = 'mock-checker'
version = '0'
_error_tmpl = "C900 Mock function call is banned: %s"
def __init__(self, tree, filename=None):
self.tree = tree
def run(self):
visitor = MockAttrVisitor()
visitor.visit(self.tree)
for lineno, attr in visitor.errors:
text = self._error_tmpl % (attr,)
yield lineno, 0, text, type(self)
|
Natim/sentry
|
src/sentry/lint/mock_check.py
|
Python
|
bsd-3-clause
| 974
|
[
"VisIt"
] |
26abe0c7893d62fb05c0423b126042647407faee9d58a9caf0fb585c401bd43b
|
# Copyright (c) 2009 Brian Zimmer <bzimmer@ziclix.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from decimal import Decimal
from sqlalchemy.sql import func
from sqlalchemy.orm import eagerload
from collections import defaultdict
from openroom import Exif, Iptc, Image, Develop
def stats(seq):
buckets = defaultdict(list)
for a, b in seq:
buckets[a].append(b)
yield buckets
for key, values in buckets.items():
# compute mode
m = defaultdict(int)
for a in values:
m[a] += 1
mode = sorted((v, k) for k, v, in m.items())[-1][-1]
mean = sum(values) / len(values)
yield key, mean, mode, len(values)
class Reports(object):
def __init__(self, session):
self.session = session
def query(self, *entity):
return self.session.query(*entity)
def imageCounts(self):
q = self.query(Image, func.count()).options(eagerload("exif")).join(Exif)
q = q.filter(Exif.dateYear >= 2007)
q = q.group_by(Exif.dateYear).group_by(Exif.dateMonth)
for a, cnt in q:
yield a.exif.dateYear, a.exif.dateMonth, cnt
def locations(self):
q = self.query(Image).join(Iptc).filter(Image.pick == True)
q = q.filter(Iptc.location != None).filter(Iptc.city != None).filter(Iptc.country != None)
for a in q:
yield a.iptc.location.name, a.iptc.city.name, a.iptc.country.name
def focalLengths(self, pick=None):
q = self.query(Image).options(eagerload("exif")).join(Exif)
if pick is not None:
q = q.filter(Image.pick == pick)
q = q.filter(Exif.lens != None).filter(Exif.focalLength != None).filter(Exif.lens != None)
return stats((g.exif.lens, g.exif.focalLength.to_integral()) for g in q.all())
def crops(self, pick=None):
q = self.query(Image).options(eagerload("develop"), eagerload("exif")).join(Develop).join(Exif)
if pick is not None:
q = q.filter(Image.pick == pick)
q = q.filter(Develop.croppedHeight != None).filter(Develop.croppedWidth != None).filter(Exif.lens != None)
c = list()
for g in q.all():
if g.develop.fileHeight is None:
continue
p = (g.develop.croppedHeight + g.develop.croppedWidth) / (g.develop.fileHeight + g.develop.fileWidth)
c.append((g.exif.lens, Decimal("%0.2f" % (p))))
return stats(c)
|
bzimmer/openroom
|
openroom/reports.py
|
Python
|
mit
| 3,462
|
[
"Brian"
] |
97fb665f311e7374ae6e94ce75de8d2bc55d2989f698cf69469d869e9c901c94
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.validation import NotFittedError
from ..utils.multiclass import check_classification_targets
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort == True and issparse(X):
raise ValueError("Presorting is not supported for sparse matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms which
# desire presorting must do presorting themselves and pass that matrix
# into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
|
yanlend/scikit-learn
|
sklearn/tree/tree.py
|
Python
|
bsd-3-clause
| 37,683
|
[
"Brian"
] |
6762bd19d4a7b7aeede5de2d62f6a9ae5591b4d47805c3bc640f318a075cdd50
|
"""
Retrieves dependencies from setup.py file
"""
import ast
from os import path
import pdb
class SetupVisitor(ast.NodeVisitor):
def __init__(self):
self.install_requires_parse_fail = False
self.candidates = {}
self.deps = []
def visit_Assign(self, node):
if len(node.targets) == 1 and isinstance(node.targets[0], ast.Name):
name = node.targets[0].id
if 'req' in name.lower() and isinstance(node.value, ast.List):
self.candidates[name] = self.deps_from_ast_list(node.value)
def visit_Call(self, node):
if isinstance(node.func, ast.Name) and node.func.id == 'setup':
for kw in node.keywords:
if kw.arg == 'install_requires':
self.record_deps_from_expr(kw.value)
def deps_from_ast_list(self, ast_list):
d = []
for elt in ast_list.elts:
if isinstance(elt, ast.Str):
d.append(elt.s)
return d
def record_deps_from_expr(self, expr_node):
if isinstance(expr_node, ast.List):
self.deps.extend(self.deps_from_ast_list(expr_node))
elif isinstance(expr_node, ast.Name):
if expr_node.id in self.candidates:
self.deps.extend(self.candidates[expr_node.id])
else:
self.install_requires_parse_fail = True
def deps(project_dir):
"""Returns (deps, success)"""
setup_file = path.join(project_dir, 'setup.py')
if path.exists(setup_file):
return deps_from_setup_file(setup_file)
return None
def deps_from_setup_file(setup_file):
with open(setup_file) as setupf:
root = ast.parse(setupf.read())
visitor = SetupVisitor()
visitor.visit(root)
if visitor.install_requires_parse_fail:
return None
else:
return visitor.deps
return None
|
sourcegraph/python-deps
|
setupdep.py
|
Python
|
bsd-2-clause
| 1,889
|
[
"VisIt"
] |
9818a3e6fa53d50547ed54760f1214ad2d4816ee346a98d0f44962d0b286260b
|
# Copyright Yair Benita Y.Benita@pharm.uu.nl
# Biopython (http://biopython.org) license applies
"""Simple protein analysis.
Example,
X = ProteinAnalysis("MAEGEITTFTALTEKFNLPPGNYKKPKLLYCSNGGHFLRILPDGTVDGTRDRSDQHIQLQLSAESVGEVYIKSTETGQYLAMDTSGLLYGSQTPSEECLFLERLEENHYNTYTSKKHAEKNWFVGLKKNGSCKRGPRTHYGQKAILFLPLPV")
print X.count_amino_acids()
print X.get_amino_acids_percent()
print X.molecular_weight()
print X.aromaticity()
print X.instability_index()
print X.flexibility()
print X.isoelectric_point()
print X.secondary_structure_fraction()
print X.protein_scale(ProtParamData.kd, 9, 0.4)
"""
import sys
import ProtParamData, IsoelectricPoint
from ProtParamData import kd # Added by Iddo to enable the gravy method
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
from Bio.Data import IUPACData
#from BioModule import
class ProteinAnalysis(object):
"""Class containing methods for protein analysis.
The class init method takes only one argument, the protein sequence as a
string and builds a sequence object using the Bio.Seq module. This is done
just to make sure the sequence is a protein sequence and not anything else.
"""
def __init__(self, ProtSequence):
if ProtSequence.islower():
self.sequence = Seq(ProtSequence.upper(), IUPAC.protein)
else:
self.sequence = Seq(ProtSequence, IUPAC.protein)
self.amino_acids_content = None
self.amino_acids_percent = None
self.length = len(self.sequence)
def count_amino_acids(self):
"""Count standard amino acids, returns a dict.
Simply counts the number times an amino acid is repeated in the protein
sequence. Returns a dictionary {AminoAcid:Number} and also stores the
dictionary in self.amino_acids_content.
"""
ProtDic = dict([ (k, 0) for k in IUPACData.protein_letters])
for i in ProtDic:
ProtDic[i]=self.sequence.count(i)
self.amino_acids_content = ProtDic
return ProtDic
def get_amino_acids_percent(self):
"""Calculate the amino acid content in percents.
The same as count_amino_acids only returns the Number in percentage of
entire sequence. Returns a dictionary and stores the dictionary in
self.amino_acids_content_percent.
input is the dictionary from CountAA.
output is a dictionary with AA as keys.
"""
if not self.amino_acids_content:
self.count_amino_acids()
PercentAA = {}
for i in self.amino_acids_content:
if self.amino_acids_content[i] > 0:
PercentAA[i]=self.amino_acids_content[i]/float(self.length)
else:
PercentAA[i] = 0
self.amino_acids_percent = PercentAA
return PercentAA
def molecular_weight (self):
"""Calculate MW from Protein sequence"""
# make local dictionary for speed
MwDict = {}
# remove a molecule of water from the amino acid weight.
for i in IUPACData.protein_weights:
MwDict[i] = IUPACData.protein_weights[i] - 18.02
MW = 18.02 # add just one water molecule for the whole sequence.
for i in self.sequence:
MW += MwDict[i]
return MW
def aromaticity(self):
"""Calculate the aromaticity according to Lobry, 1994.
Calculates the aromaticity value of a protein according to Lobry, 1994.
It is simply the relative frequency of Phe+Trp+Tyr.
"""
if not self.amino_acids_percent:
self.get_amino_acids_percent()
Arom= self.amino_acids_percent['Y']+self.amino_acids_percent['W']+self.amino_acids_percent['F']
return Arom
def instability_index(self):
"""Calculate the instability index according to Guruprasad et al 1990.
Implementation of the method of Guruprasad et al. 1990 to test a
protein for stability. Any value above 40 means the protein is unstable
(has a short half life).
See: Guruprasad K., Reddy B.V.B., Pandit M.W.
Protein Engineering 4:155-161(1990).
"""
#make the dictionary local for speed.
DIWV=ProtParamData.DIWV.copy()
score=0.0
for i in range(self.length - 1):
DiPeptide=DIWV[self.sequence[i]][self.sequence[i+1]]
score += DiPeptide
return (10.0/self.length) * score
def flexibility(self):
"""Calculate the flexibility according to Vihinen, 1994.
No argument to change window size because parameters are specific for a
window=9. The parameters used are optimized for determining the flexibility.
"""
Flex = ProtParamData.Flex.copy()
Window=9
Weights=[0.25,0.4375,0.625,0.8125,1]
List=[]
for i in range(self.length - Window):
SubSeq=self.sequence[i:i+Window]
score = 0.0
for j in range(Window//2):
score += (Flex[SubSeq[j]]+Flex[SubSeq[Window-j-1]]) * Weights[j]
score += Flex[SubSeq[Window//2+1]]
List.append(score/5.25)
return List
def gravy(self):
"""Calculate the gravy according to Kyte and Doolittle."""
ProtGravy=0.0
for i in self.sequence:
ProtGravy += kd[i]
return ProtGravy/self.length
# this method is used to make a list of relative weight of the
# window edges compared to the window center. The weights are linear.
# it actually generates half a list. For a window of size 9 and edge 0.4
# you get a list of [0.4, 0.55, 0.7, 0.85].
def _weight_list(self, window, edge):
unit = ((1.0-edge)/(window-1))*2
list = [0.0]*(window//2)
for i in range(window//2):
list[i] = edge + unit * i
return list
# The weight list returns only one tail. If the list should be [0.4,0.7,1.0,0.7,0.4]
# what you actually get from _weights_list is [0.4,0.7]. The correct calculation is done
# in the loop.
def protein_scale(self, ParamDict, Window, Edge=1.0):
"""Compute a profile by any amino acid scale.
An amino acid scale is defined by a numerical value assigned to each type of
amino acid. The most frequently used scales are the hydrophobicity or
hydrophilicity scales and the secondary structure conformational parameters
scales, but many other scales exist which are based on different chemical and
physical properties of the amino acids. You can set several parameters that
control the computation of a scale profile, such as the window size and the
window edge relative weight value. WindowSize: The window size is the length
of the interval to use for the profile computation. For a window size n, we
use the i- ( n-1)/2 neighboring residues on each side of residue it compute
the score for residue i. The score for residue is the sum of the scale values
for these amino acids, optionally weighted according to their position in the
window. Edge: The central amino acid of the window always has a weight of 1.
By default, the amino acids at the remaining window positions have the same
weight, but you can make the residue at the center of the window have a
larger weight than the others by setting the edge value for the residues at
the beginning and end of the interval to a value between 0 and 1. For
instance, for Edge=0.4 and a window size of 5 the weights will be: 0.4, 0.7,
1.0, 0.7, 0.4. The method returns a list of values which can be plotted to
view the change along a protein sequence. Many scales exist. Just add your
favorites to the ProtParamData modules.
Similar to expasy's ProtScale: http://www.expasy.org/cgi-bin/protscale.pl
"""
# generate the weights
weight = self._weight_list(Window,Edge)
list = []
# the score in each Window is divided by the sum of weights
sum_of_weights = 0.0
for i in weight: sum_of_weights += i
# since the weight list is one sided:
sum_of_weights = sum_of_weights*2+1
for i in range(self.length-Window+1):
subsequence = self.sequence[i:i+Window]
score = 0.0
for j in range(Window//2):
# walk from the outside of the Window towards the middle.
# Iddo: try/except clauses added to avoid raising an exception on a non-standad amino acid
try:
score += weight[j] * ParamDict[subsequence[j]] + weight[j] * ParamDict[subsequence[Window-j-1]]
except KeyError:
sys.stderr.write('warning: %s or %s is not a standard amino acid.\n' %
(subsequence[j],subsequence[Window-j-1]))
# Now add the middle value, which always has a weight of 1.
if subsequence[Window//2] in ParamDict:
score += ParamDict[subsequence[Window//2]]
else:
sys.stderr.write('warning: %s is not a standard amino acid.\n' % (subsequence[Window//2]))
list.append(score/sum_of_weights)
return list
def isoelectric_point(self):
"""Calculate the isoelectric point.
This method uses the module IsoelectricPoint to calculate the pI of a protein.
"""
if not self.amino_acids_content:
self.count_amino_acids()
X = IsoelectricPoint.IsoelectricPoint(self.sequence, self.amino_acids_content)
return X.pi()
def secondary_structure_fraction (self):
"""Calculate fraction of helix, turn and sheet.
This methods returns a list of the fraction of amino acids which tend
to be in Helix, Turn or Sheet.
Amino acids in helix: V, I, Y, F, W, L.
Amino acids in Turn: N, P, G, S.
Amino acids in sheet: E, M, A, L.
Returns a tuple of three integers (Helix, Turn, Sheet).
"""
if not self.amino_acids_percent:
self.get_amino_acids_percent()
Helix = self.amino_acids_percent['V'] + self.amino_acids_percent['I'] + self.amino_acids_percent['Y'] + self.amino_acids_percent['F'] + self.amino_acids_percent['W'] + self.amino_acids_percent['L']
Turn = self.amino_acids_percent['N'] + self.amino_acids_percent['P'] + self.amino_acids_percent['G'] + self.amino_acids_percent['S']
Sheet = self.amino_acids_percent['E'] + self.amino_acids_percent['M'] + self.amino_acids_percent['A'] + self.amino_acids_percent['L']
return Helix, Turn, Sheet
|
asherkhb/coge
|
bin/last_wrapper/Bio/SeqUtils/ProtParam.py
|
Python
|
bsd-2-clause
| 10,850
|
[
"Biopython"
] |
017e9c872ca30a0fba180b2ab2d4825c28f79eeda24a63c2302c661b4df1902b
|
# pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from nose.tools import assert_true, assert_in, assert_false # pylint: disable=E0611
from auth.authz import get_user_by_email, get_course_groupname_for_role
from django.conf import settings
from selenium.webdriver.common.keys import Keys
import time
import os
from django.contrib.auth.models import Group
from logging import getLogger
logger = getLogger(__name__)
from terrain.browser import reset_data
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
@step('I (?:visit|access|open) the Studio homepage$')
def i_visit_the_studio_homepage(_step):
# To make this go to port 8001, put
# LETTUCE_SERVER_PORT = 8001
# in your settings.py file.
world.visit('/')
signin_css = 'a.action-signin'
assert world.is_css_present(signin_css)
@step('I am logged into Studio$')
def i_am_logged_into_studio(_step):
log_into_studio()
@step('I confirm the alert$')
def i_confirm_with_ok(_step):
world.browser.get_alert().accept()
@step(u'I press the "([^"]*)" delete icon$')
def i_press_the_category_delete_icon(_step, category):
if category == 'section':
css = 'a.delete-button.delete-section-button span.delete-icon'
elif category == 'subsection':
css = 'a.delete-button.delete-subsection-button span.delete-icon'
else:
assert False, 'Invalid category: %s' % category
world.css_click(css)
@step('I have opened a new course in Studio$')
def i_have_opened_a_new_course(_step):
open_new_course()
@step('(I select|s?he selects) the new course')
def select_new_course(_step, whom):
course_link_css = 'a.course-link'
world.css_click(course_link_css)
@step(u'I press the "([^"]*)" notification button$')
def press_the_notification_button(_step, name):
# Because the notification uses a CSS transition,
# Selenium will always report it as being visible.
# This makes it very difficult to successfully click
# the "Save" button at the UI level.
# Instead, we use JavaScript to reliably click
# the button.
btn_css = 'div#page-notification a.action-%s' % name.lower()
world.trigger_event(btn_css, event='focus')
world.browser.execute_script("$('{}').click()".format(btn_css))
world.wait_for_ajax_complete()
@step('I change the "(.*)" field to "(.*)"$')
def i_change_field_to_value(_step, field, value):
field_css = '#%s' % '-'.join([s.lower() for s in field.split()])
ele = world.css_find(field_css).first
ele.fill(value)
ele._element.send_keys(Keys.ENTER)
@step('I reset the database')
def reset_the_db(_step):
"""
When running Lettuce tests using examples (i.e. "Confirmation is
shown on save" in course-settings.feature), the normal hooks
aren't called between examples. reset_data should run before each
scenario to flush the test database. When this doesn't happen we
get errors due to trying to insert a non-unique entry. So instead,
we delete the database manually. This has the effect of removing
any users and courses that have been created during the test run.
"""
reset_data(None)
@step('I see a confirmation that my changes have been saved')
def i_see_a_confirmation(step):
confirmation_css = '#alert-confirmation'
assert world.is_css_present(confirmation_css)
def open_new_course():
world.clear_courses()
create_studio_user()
log_into_studio()
create_a_course()
def create_studio_user(
uname='robot',
email='robot+studio@edx.org',
password='test',
is_staff=False):
studio_user = world.UserFactory(
username=uname,
email=email,
password=password,
is_staff=is_staff)
registration = world.RegistrationFactory(user=studio_user)
registration.register(studio_user)
registration.activate()
return studio_user
def fill_in_course_info(
name='Robot Super Course',
org='MITx',
num='101',
run='2013_Spring'):
world.css_fill('.new-course-name', name)
world.css_fill('.new-course-org', org)
world.css_fill('.new-course-number', num)
world.css_fill('.new-course-run', run)
def log_into_studio(
uname='robot',
email='robot+studio@edx.org',
password='test',
name='Robot Studio'):
world.log_in(username=uname, password=password, email=email, name=name)
# Navigate to the studio dashboard
world.visit('/')
assert_in(uname, world.css_text('h2.title', timeout=10))
def add_course_author(user, course):
"""
Add the user to the instructor group of the course
so they will have the permissions to see it in studio
"""
for role in ("staff", "instructor"):
groupname = get_course_groupname_for_role(course.location, role)
group, __ = Group.objects.get_or_create(name=groupname)
user.groups.add(group)
user.save()
def create_a_course():
course = world.CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course')
world.scenario_dict['COURSE'] = course
user = world.scenario_dict.get("USER")
if not user:
user = get_user_by_email('robot+studio@edx.org')
add_course_author(user, course)
# Navigate to the studio dashboard
world.visit('/')
course_link_css = 'a.course-link'
world.css_click(course_link_css)
course_title_css = 'span.course-title'
assert_true(world.is_css_present(course_title_css))
def add_section(name='My Section'):
link_css = 'a.new-courseware-section-button'
world.css_click(link_css)
name_css = 'input.new-section-name'
save_css = 'input.new-section-name-save'
world.css_fill(name_css, name)
world.css_click(save_css)
span_css = 'span.section-name-span'
assert_true(world.is_css_present(span_css))
def add_subsection(name='Subsection One'):
css = 'a.new-subsection-item'
world.css_click(css)
name_css = 'input.new-subsection-name-input'
save_css = 'input.new-subsection-name-save'
world.css_fill(name_css, name)
world.css_click(save_css)
def set_date_and_time(date_css, desired_date, time_css, desired_time):
world.css_fill(date_css, desired_date)
# hit TAB to get to the time field
e = world.css_find(date_css).first
# pylint: disable=W0212
e._element.send_keys(Keys.TAB)
world.css_fill(time_css, desired_time)
e = world.css_find(time_css).first
e._element.send_keys(Keys.TAB)
time.sleep(float(1))
@step('I have enabled the (.*) advanced module$')
def i_enabled_the_advanced_module(step, module):
step.given('I have opened a new course section in Studio')
world.css_click('.nav-course-settings')
world.css_click('.nav-course-settings-advanced a')
type_in_codemirror(0, '["%s"]' % module)
press_the_notification_button(step, 'Save')
@world.absorb
def create_course_with_unit():
"""
Prepare for tests by creating a course with a section, subsection, and unit.
Performs the following:
Clear out all courseware
Create a course with a section, subsection, and unit
Create a user and make that user a course author
Log the user into studio
Open the course from the dashboard
Expand the section and click on the New Unit link
The end result is the page where the user is editing the new unit
"""
world.clear_courses()
course = world.CourseFactory.create()
world.scenario_dict['COURSE'] = course
section = world.ItemFactory.create(parent_location=course.location)
world.ItemFactory.create(
parent_location=section.location,
category='sequential',
display_name='Subsection One',
)
user = create_studio_user(is_staff=False)
add_course_author(user, course)
log_into_studio()
world.css_click('a.course-link')
world.wait_for_js_to_load()
css_selectors = [
'div.section-item a.expand-collapse-icon', 'a.new-unit-item'
]
for selector in css_selectors:
world.css_click(selector)
world.wait_for_mathjax()
world.wait_for_xmodule()
assert world.is_css_present('ul.new-component-type')
@step('I have clicked the new unit button$')
@step(u'I am in Studio editing a new unit$')
def edit_new_unit(step):
create_course_with_unit()
@step('the save notification button is disabled')
def save_button_disabled(step):
button_css = '.action-save'
disabled = 'is-disabled'
assert world.css_has_class(button_css, disabled)
@step('the "([^"]*)" button is disabled')
def button_disabled(step, value):
button_css = 'input[value="%s"]' % value
assert world.css_has_class(button_css, 'is-disabled')
def _do_studio_prompt_action(intent, action):
"""
Wait for a studio prompt to appear and press the specified action button
See cms/static/js/views/feedback_prompt.js for implementation
"""
assert intent in ['warning', 'error', 'confirmation', 'announcement',
'step-required', 'help', 'mini']
assert action in ['primary', 'secondary']
world.wait_for_present('div.wrapper-prompt.is-shown#prompt-{}'.format(intent))
action_css = 'li.nav-item > a.action-{}'.format(action)
world.trigger_event(action_css, event='focus')
world.browser.execute_script("$('{}').click()".format(action_css))
world.wait_for_ajax_complete()
world.wait_for_present('div.wrapper-prompt.is-hiding#prompt-{}'.format(intent))
@world.absorb
def confirm_studio_prompt():
_do_studio_prompt_action('warning', 'primary')
@step('I confirm the prompt')
def confirm_the_prompt(step):
confirm_studio_prompt()
@step(u'I am shown a prompt$')
def i_am_shown_a_notification(step):
assert world.is_css_present('.wrapper-prompt')
def type_in_codemirror(index, text):
world.wait(1) # For now, slow this down so that it works. TODO: fix it.
world.css_click("div.CodeMirror-lines", index=index)
world.browser.execute_script("$('div.CodeMirror.CodeMirror-focused > div').css('overflow', '')")
g = world.css_find("div.CodeMirror.CodeMirror-focused > div > textarea")
if world.is_mac():
g._element.send_keys(Keys.COMMAND + 'a')
else:
g._element.send_keys(Keys.CONTROL + 'a')
g._element.send_keys(Keys.DELETE)
g._element.send_keys(text)
if world.is_firefox():
world.trigger_event('div.CodeMirror', index=index, event='blur')
world.wait_for_ajax_complete()
def upload_file(filename):
path = os.path.join(TEST_ROOT, filename)
world.browser.execute_script("$('input.file-input').css('display', 'block')")
world.browser.attach_file('file', os.path.abspath(path))
button_css = '.upload-dialog .action-upload'
world.css_click(button_css)
@step(u'"([^"]*)" logs in$')
def other_user_login(step, name):
step.given('I log out')
world.visit('/')
signin_css = 'a.action-signin'
world.is_css_present(signin_css)
world.css_click(signin_css)
def fill_login_form():
login_form = world.browser.find_by_css('form#login_form')
login_form.find_by_name('email').fill(name + '@edx.org')
login_form.find_by_name('password').fill("test")
login_form.find_by_name('submit').click()
world.retry_on_exception(fill_login_form)
assert_true(world.is_css_present('.new-course-button'))
world.scenario_dict['USER'] = get_user_by_email(name + '@edx.org')
@step(u'the user "([^"]*)" exists( as a course (admin|staff member|is_staff))?$')
def create_other_user(_step, name, has_extra_perms, role_name):
email = name + '@edx.org'
user = create_studio_user(uname=name, password="test", email=email)
if has_extra_perms:
if role_name == "is_staff":
user.is_staff = True
else:
if role_name == "admin":
# admins get staff privileges, as well
roles = ("staff", "instructor")
else:
roles = ("staff",)
location = world.scenario_dict["COURSE"].location
for role in roles:
groupname = get_course_groupname_for_role(location, role)
group, __ = Group.objects.get_or_create(name=groupname)
user.groups.add(group)
user.save()
@step('I log out')
def log_out(_step):
world.visit('logout')
@step(u'I click on "edit a draft"$')
def i_edit_a_draft(_step):
world.css_click("a.create-draft")
@step(u'I click on "replace with draft"$')
def i_edit_a_draft(_step):
world.css_click("a.publish-draft")
@step(u'I publish the unit$')
def publish_unit(_step):
world.select_option('visibility-select', 'public')
|
TsinghuaX/edx-platform
|
cms/djangoapps/contentstore/features/common.py
|
Python
|
agpl-3.0
| 12,646
|
[
"VisIt"
] |
1bef8bcb82e553570c6366bba5abed987514b892304a392d7e6dbdce45adb587
|
# (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import shutil
import subprocess
import tempfile
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.playbook.role.definition import RoleDefinition
__all__ = ['RoleRequirement']
VALID_SPEC_KEYS = [
'name',
'role',
'scm',
'src',
'version',
]
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class RoleRequirement(RoleDefinition):
"""
Helper class for Galaxy, which is used to parse both dependencies
specified in meta/main.yml and requirements.yml files.
"""
def __init__(self):
pass
@staticmethod
def repo_url_to_role_name(repo_url):
# gets the role name out of a repo like
# http://git.example.com/repos/repo.git" => "repo"
if '://' not in repo_url and '@' not in repo_url:
return repo_url
trailing_path = repo_url.split('/')[-1]
if trailing_path.endswith('.git'):
trailing_path = trailing_path[:-4]
if trailing_path.endswith('.tar.gz'):
trailing_path = trailing_path[:-7]
if ',' in trailing_path:
trailing_path = trailing_path.split(',')[0]
return trailing_path
@staticmethod
def role_spec_parse(role_spec):
# takes a repo and a version like
# git+http://git.example.com/repos/repo.git,v1.0
# and returns a list of properties such as:
# {
# 'scm': 'git',
# 'src': 'http://git.example.com/repos/repo.git',
# 'version': 'v1.0',
# 'name': 'repo'
# }
display.deprecated("The comma separated role spec format, use the yaml/explicit format instead. Line that trigger this: %s" % role_spec,
version="2.7")
default_role_versions = dict(git='master', hg='tip')
role_spec = role_spec.strip()
role_version = ''
if role_spec == "" or role_spec.startswith("#"):
return (None, None, None, None)
tokens = [s.strip() for s in role_spec.split(',')]
# assume https://github.com URLs are git+https:// URLs and not
# tarballs unless they end in '.zip'
if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
tokens[0] = 'git+' + tokens[0]
if '+' in tokens[0]:
(scm, role_url) = tokens[0].split('+')
else:
scm = None
role_url = tokens[0]
if len(tokens) >= 2:
role_version = tokens[1]
if len(tokens) == 3:
role_name = tokens[2]
else:
role_name = RoleRequirement.repo_url_to_role_name(tokens[0])
if scm and not role_version:
role_version = default_role_versions.get(scm, '')
return dict(scm=scm, src=role_url, version=role_version, name=role_name)
@staticmethod
def role_yaml_parse(role):
if isinstance(role, string_types):
name = None
scm = None
src = None
version = None
if ',' in role:
if role.count(',') == 1:
(src, version) = role.strip().split(',', 1)
elif role.count(',') == 2:
(src, version, name) = role.strip().split(',', 2)
else:
raise AnsibleError("Invalid role line (%s). Proper format is 'role_name[,version[,name]]'" % role)
else:
src = role
if name is None:
name = RoleRequirement.repo_url_to_role_name(src)
if '+' in src:
(scm, src) = src.split('+', 1)
return dict(name=name, src=src, scm=scm, version=version)
if 'role' in role:
name = role['role']
if ',' in name:
# Old style: {role: "galaxy.role,version,name", other_vars: "here" }
role = RoleRequirement.role_spec_parse(role['role'])
else:
del role['role']
role['name'] = name
else:
role = role.copy()
if 'src'in role:
# New style: { src: 'galaxy.role,version,name', other_vars: "here" }
if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
role["src"] = "git+" + role["src"]
if '+' in role["src"]:
(scm, src) = role["src"].split('+')
role["scm"] = scm
role["src"] = src
if 'name' not in role:
role["name"] = RoleRequirement.repo_url_to_role_name(role["src"])
if 'version' not in role:
role['version'] = ''
if 'scm' not in role:
role['scm'] = None
for key in list(role.keys()):
if key not in VALID_SPEC_KEYS:
role.pop(key)
return role
@staticmethod
def scm_archive_role(src, scm='git', name=None, version='HEAD'):
if scm not in ['hg', 'git']:
raise AnsibleError("- scm %s is not currently supported" % scm)
tempdir = tempfile.mkdtemp()
clone_cmd = [scm, 'clone', src, name]
with open('/dev/null', 'w') as devnull:
try:
popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull)
except:
raise AnsibleError("error executing: %s" % " ".join(clone_cmd))
rc = popen.wait()
if rc != 0:
raise AnsibleError("- command %s failed in directory %s (rc=%s)" % (' '.join(clone_cmd), tempdir, rc))
if scm == 'git' and version:
checkout_cmd = [scm, 'checkout', version]
with open('/dev/null', 'w') as devnull:
try:
popen = subprocess.Popen(checkout_cmd, cwd=os.path.join(tempdir, name), stdout=devnull, stderr=devnull)
except (IOError, OSError):
raise AnsibleError("error executing: %s" % " ".join(checkout_cmd))
rc = popen.wait()
if rc != 0:
raise AnsibleError("- command %s failed in directory %s (rc=%s)" % (' '.join(checkout_cmd), tempdir, rc))
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar')
if scm == 'hg':
archive_cmd = ['hg', 'archive', '--prefix', "%s/" % name]
if version:
archive_cmd.extend(['-r', version])
archive_cmd.append(temp_file.name)
if scm == 'git':
archive_cmd = ['git', 'archive', '--prefix=%s/' % name, '--output=%s' % temp_file.name]
if version:
archive_cmd.append(version)
else:
archive_cmd.append('HEAD')
with open('/dev/null', 'w') as devnull:
popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, name),
stderr=devnull, stdout=devnull)
rc = popen.wait()
if rc != 0:
raise AnsibleError("- command %s failed in directory %s (rc=%s)" % (' '.join(archive_cmd), tempdir, rc))
shutil.rmtree(tempdir, ignore_errors=True)
return temp_file.name
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/playbook/role/requirement.py
|
Python
|
bsd-3-clause
| 8,216
|
[
"Galaxy"
] |
68525aff2569a51b35e83f55006e6e4fa262bcaf25d0e489daf0a6dced584ff6
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import range
from builtins import object
# standard library
# third party
import numpy as np
from scipy.cluster.hierarchy import fcluster, dendrogram
from scipy.spatial.distance import squareform
import fastcluster
import skbio
try:
from Bio.Cluster import kmedoids
Biopython_Unavailable = False
except ImportError:
print("Biopython unavailable - kmedoids clustering disabled")
Biopython_Unavailable = True
from sklearn.cluster import AffinityPropagation, DBSCAN, KMeans
from sklearn.manifold import spectral_embedding
# GMM was deprecated in scikit-learn version 0.18 and fully removed in 0.20
import pkg_resources
sklearn_version = [int(x) for x in pkg_resources.get_distribution("scikit-learn").version.split('.')]
USE_GAUSSIAN_MIXTURE = tuple(sklearn_version) >= (0, 20, 0)
if USE_GAUSSIAN_MIXTURE:
from sklearn.mixture import GaussianMixture
else:
from sklearn.mixture import GMM
# treeCl
from .distance_matrix import DistanceMatrix, rbf, binsearch_mask, kmask, kscale, affinity, laplace, eigen, \
double_centre, normalise_rows, CoordinateMatrix
from .partition import Partition
from .utils import enum
from .errors import OptionError, isnumbercheck, rangecheck
options = enum(
"PRUNING_NONE",
"PRUNING_ESTIMATE",
"PRUNING_MANUAL",
"LOCAL_SCALE_MEDIAN",
"LOCAL_SCALE_ESTIMATE",
"LOCAL_SCALE_MANUAL")
methods = enum(
"KMEANS",
"GMM",
"WARD")
linkage = enum(
"SINGLE",
"COMPLETE",
"AVERAGE",
"WARD",
"WEIGHTED",
"CENTROID",
"MEDIAN")
mds = enum(
"CLASSICAL",
"METRIC")
spectral = enum(
"SPECTRAL",
"KPCA",
"ZELNIKMANOR")
def _get_threshold(linkmat, nclusters):
linkmat_size = len(linkmat)
if nclusters <= 1:
br_top = linkmat[linkmat_size - nclusters][2]
else:
br_top = linkmat[linkmat_size - nclusters + 1][2]
if nclusters >= len(linkmat):
br_bottom = 0
else:
br_bottom = linkmat[linkmat_size - nclusters][2]
threshold = 0.5 * (br_top + br_bottom)
return threshold
def _hclust(linkmat, nclusters):
threshold = _get_threshold(linkmat, nclusters)
t = fcluster(linkmat, threshold, criterion='distance')
return Partition(t)
class ClusteringManager(object):
"""
Clustering manager base class
"""
def __init__(self, dm):
if isinstance(dm, np.ndarray):
dm = DistanceMatrix.from_array(dm)
if not isinstance(dm, DistanceMatrix):
raise ValueError('Distance matrix should be a numpy array or treeCl.DistanceMatrix')
self.dm = dm
def __str__(self):
return str(self.dm)
def get_dm(self, noise):
return self.dm.add_noise().values if noise else self.dm.values
class EMMixin(object):
"""
Provide methods to do kmeans and GMM estimation
"""
@staticmethod
def kmeans(nclusters, coords):
est = KMeans(n_clusters=nclusters, n_init=50, max_iter=500)
est.fit(coords)
return Partition(est.labels_)
@staticmethod
def gmm(nclusters, coords, n_init=50, n_iter=500):
if USE_GAUSSIAN_MIXTURE:
est = GaussianMixture(n_components=nclusters, n_init=n_init, max_iter=n_iter)
else:
est = GMM(n_components=nclusters, n_init=n_init, n_iter=n_iter)
est.fit(coords)
return Partition(est.predict(coords))
def _check_val(opt, min_, max_):
isnumbercheck(opt)
rangecheck(opt, min_, max_)
class Spectral(ClusteringManager, EMMixin):
"""
Manager for spectral clustering and Kernel PCA clustering
"""
def __init__(self, dm,
pruning_option=options.PRUNING_NONE,
scale_option=options.LOCAL_SCALE_MEDIAN,
manual_pruning=None,
manual_scale=None,
verbosity=0):
super(Spectral, self).__init__(dm)
try:
options.reverse[pruning_option]
except KeyError:
raise OptionError(pruning_option, list(options.reverse.values()))
try:
options.reverse[scale_option]
except KeyError:
raise OptionError(scale_option, list(options.reverse.values()))
if pruning_option == options.PRUNING_MANUAL:
_check_val(manual_pruning, 2, self.dm.df.shape[0])
if scale_option == options.LOCAL_SCALE_MANUAL:
_check_val(manual_scale, 2, self.dm.df.shape[0])
self._pruning_option = pruning_option
self._scale_option = scale_option
self._manual_pruning = manual_pruning
self._manual_scale = manual_scale
self._verbosity = verbosity
self._affinity = self.decompose()
def __str__(self):
return ('Spectral Clustering with local scaling:\n'
'Pruning option: {}\n'
'Scaling option: {}'
.format(options.reverse[self._pruning_option], options.reverse[self._scale_option]))
def decompose(self,
noise=False,
verbosity=0,
logic='or',
**kwargs):
""" Use prune to remove links between distant points:
prune is None: no pruning
prune={int > 0}: prunes links beyond `prune` nearest neighbours
prune='estimate': searches for the smallest value that retains a fully
connected graph
"""
matrix = self.get_dm(noise)
# get local scale estimate
est_scale = None
# ADJUST MASK
if self._pruning_option == options.PRUNING_NONE:
# Set kp to max value
kp = len(matrix) - 1
mask = np.ones(matrix.shape, dtype=bool)
elif self._pruning_option == options.PRUNING_MANUAL:
# Manually set value of kp
kp = self._manual_pruning
mask = kmask(matrix, self._manual_pruning, logic=logic)
elif self._pruning_option == options.PRUNING_ESTIMATE:
# Must estimate value of kp
kp, mask, est_scale = binsearch_mask(matrix, logic=logic)
else:
raise ValueError("Unexpected error: 'kp' not set")
# ADJUST SCALE
if self._scale_option == options.LOCAL_SCALE_MEDIAN:
dist = np.median(matrix, axis=1)
scale = np.outer(dist, dist)
elif self._scale_option == options.LOCAL_SCALE_MANUAL:
scale = kscale(matrix, self._manual_scale)
elif self._scale_option == options.LOCAL_SCALE_ESTIMATE:
if est_scale is None:
_, _, scale = binsearch_mask(matrix, logic=logic)
else:
# Nothing to be done - est_scale was set during the PRUNING_ESTIMATE
scale = est_scale
else:
raise ValueError("Unexpected error: 'scale' not set")
# ZeroDivisionError safety check
if not (scale > 1e-5).all():
if verbosity > 0:
print('Rescaling to avoid zero-div error')
_, _, scale = binsearch_mask(matrix, logic=logic)
assert (scale > 1e-5).all()
aff = affinity(matrix, mask, scale)
aff.flat[::len(aff)+1] = 1.0
return aff
def cluster(self, n, embed_dim=None, algo=spectral.SPECTRAL, method=methods.KMEANS):
"""
Cluster the embedded coordinates using spectral clustering
Parameters
----------
n: int
The number of clusters to return
embed_dim: int
The dimensionality of the underlying coordinates
Defaults to same value as n
algo: enum value (spectral.SPECTRAL | spectral.KPCA | spectral.ZELNIKMANOR)
Type of embedding to use
method: enum value (methods.KMEANS | methods.GMM)
The clustering method to use
Returns
-------
Partition: Partition object describing the data partition
"""
if n == 1:
return Partition([1] * len(self.get_dm(False)))
if embed_dim is None:
embed_dim = n
if algo == spectral.SPECTRAL:
self._coords = self.spectral_embedding(embed_dim)
elif algo == spectral.KPCA:
self._coords = self.kpca_embedding(embed_dim)
elif algo == spectral.ZELNIKMANOR:
self._coords = self.spectral_embedding_(embed_dim)
else:
raise OptionError(algo, list(spectral.reverse.values()))
if method == methods.KMEANS:
p = self.kmeans(n, self._coords.df.values)
elif method == methods.GMM:
p = self.gmm(n, self._coords.df.values)
elif method == methods.WARD:
linkmat = fastcluster.linkage(self._coords.values, 'ward')
p = _hclust(linkmat, n)
else:
raise OptionError(method, list(methods.reverse.values()))
if self._verbosity > 0:
print('Using clustering method: {}'.format(methods.reverse[method]))
return p
def spectral_embedding(self, n):
"""
Embed the points using spectral decomposition of the laplacian of
the affinity matrix
Parameters
----------
n: int
The number of dimensions
"""
coords = spectral_embedding(self._affinity, n)
return CoordinateMatrix(normalise_rows(coords))
def spectral_embedding_(self, n):
"""
Old method for generating coords, used on original analysis
of yeast data. Included to reproduce yeast result from paper.
Reason for difference - switched to using spectral embedding
method provided by scikit-learn (mainly because it spreads
points over a sphere, rather than a half sphere, so looks
better plotted). Uses a different Laplacian matrix.
"""
aff = self._affinity.copy()
aff.flat[::aff.shape[0]+1] = 0
laplacian = laplace(aff)
decomp = eigen(laplacian)
return CoordinateMatrix(normalise_rows(decomp.vecs[:,:n]))
def kpca_embedding(self, n):
"""
Embed the points using kernel PCA of the affinity matrix
Parameters
----------
n: int
The number of dimensions
"""
return self.dm.embedding(n, 'kpca', affinity_matrix=self._affinity)
@property
def affinity(self):
return self._affinity
class MultidimensionalScaling(ClusteringManager, EMMixin):
"""
Manager for clustering using multidimensional scaling
"""
def cluster(self, n, embed_dim=None, algo=mds.CLASSICAL, method=methods.KMEANS):
"""
Cluster the embedded coordinates using multidimensional scaling
Parameters
----------
n: int
The number of clusters to return
embed_dim int
The dimensionality of the underlying coordinates
Defaults to same value as n
method: enum value (methods.KMEANS | methods.GMM)
The clustering method to use
Returns
-------
Partition: Partition object describing the data partition
"""
if n == 1:
return Partition([1] * len(self.get_dm(False)))
if embed_dim is None:
embed_dim = n
if algo == mds.CLASSICAL:
self._coords = self.dm.embedding(embed_dim, 'cmds')
elif algo == mds.METRIC:
self._coords = self.dm.embedding(embed_dim, 'mmds')
else:
raise OptionError(algo, list(mds.reverse.values()))
if method == methods.KMEANS:
p = self.kmeans(n, self._coords.values)
elif method == methods.GMM:
p = self.gmm(n, self._coords.values)
elif method == methods.WARD:
linkmat = fastcluster.linkage(self._coords.values, 'ward')
p = _hclust(linkmat, n)
else:
raise OptionError(method, list(methods.reverse.values()))
#if self._verbosity > 0:
# print('Using clustering method: {}'.format(methods.reverse[method]))
return p
class Hierarchical(ClusteringManager):
""" Apply clustering methods to distance matrix
= Hierarchical clustering - single-linkage - complete-linkage - average-
linkage (UPGMA) - Ward's method
= k-medoids
= Multidimensional Scaling (Principal Coordinate Analysis) + k-means
= Spectral Clustering + k-means - NJW method - Shi-Malik method - Zelnik-
Manor and Perona Local Scaling - Local Scaling with eigenvector rotation as
stopping criterion
"""
def __str__(self):
return 'Hierarchical Clustering'
def cluster(self, nclusters, linkage_method=linkage.WARD, **kwargs):
"""
Do hierarchical clustering on a distance matrix using one of the methods:
methods.SINGLE = single-linkage clustering
methods.COMPLETE = complete-linkage clustering
methods.AVERAGE = average-linkage clustering
methods.WARD = Ward's minimum variance method
"""
if linkage_method == linkage.SINGLE:
return self._hclust(nclusters, 'single', **kwargs)
elif linkage_method == linkage.COMPLETE:
return self._hclust(nclusters, 'complete', **kwargs)
elif linkage_method == linkage.AVERAGE:
return self._hclust(nclusters, 'average', **kwargs)
elif linkage_method == linkage.WARD:
return self._hclust(nclusters, 'ward', **kwargs)
elif linkage_method == linkage.WEIGHTED:
return self._hclust(nclusters, 'weighted', **kwargs)
elif linkage_method == linkage.CENTROID:
return self._hclust(nclusters, 'centroid', **kwargs)
elif linkage_method == linkage.MEDIAN:
return self._hclust(nclusters, 'median', **kwargs)
else:
raise ValueError('Unknown linkage_method: {}'.format(linkage_method))
def _hclust(self, nclusters, method, noise=False):
"""
:param nclusters: Number of clusters to return
:param linkage_method: single, complete, average, ward, weighted, centroid or median
(http://docs.scipy.org/doc/scipy/reference/cluster.hierarchy.html)
:param noise: Add Gaussian noise to the distance matrix prior to clustering (bool, default=False)
:return: Partition object describing clustering
"""
matrix = self.get_dm(noise)
linkmat = fastcluster.linkage(squareform(matrix), method)
self.nclusters = nclusters # Store these in case we want to plot
self.linkmat = linkmat #
return _hclust(linkmat, nclusters)
def plot_dendrogram(self, nclusters=None, leaf_font_size=8, leaf_rotation=90, names=None,
title_font_size=16, ):
"""
Plots the dendrogram of the most recently generated partition
:param nclusters: Override the plot default number of clusters
:return: matplotlib.pyplot.figure
"""
if not hasattr(self, 'nclusters') and not hasattr(self, 'linkmat'):
raise ValueError("This instance has no plottable information.")
if nclusters is None:
nclusters = self.nclusters
threshold = _get_threshold(self.linkmat, nclusters)
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(11.7, 8.3))
if names is not None:
labfn=lambda leaf: names[leaf]
else:
labfn=None
leaf_rotation=0
dendrogram(
self.linkmat,
color_threshold=threshold,
leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
leaf_label_func=labfn,
count_sort=True,
)
plt.suptitle('Dendrogram', fontsize=title_font_size)
# plt.title('Distance metric: {0} Linkage method: {1} Number of classes: {2}'.format(compound_key[0],
# compound_key[1], compound_key[2]), fontsize=12)
plt.axhline(threshold, color='grey', ls='dashed')
plt.xlabel('Gene')
plt.ylabel('Distance')
return fig
class Automatic(ClusteringManager):
"""
Clustering methods that automatically return the number of clusters
- Affinity Propagation
- DBSCAN
"""
def affinity_propagation(self, affinity_matrix=None, sigma=1, **kwargs):
"""
:param kwargs: damping=0.5, max_iter=200, convergence_iter=15, copy=True, preference=None, verbose=False
:return:
"""
if affinity_matrix is None:
aff = rbf(self.dm.values, sigma)
else:
aff = affinity_matrix
est = AffinityPropagation(affinity='precomputed', **kwargs)
est.fit(aff.view(np.ndarray))
return Partition(est.labels_)
def dbscan(self, eps=0.75, min_samples=3):
"""
:param kwargs: key-value arguments to pass to DBSCAN
(eps: max dist between points in same neighbourhood,
min_samples: number of points in a neighbourhood)
:return:
"""
est = DBSCAN(metric='precomputed', eps=eps, min_samples=min_samples)
est.fit(self.get_dm(False))
return Partition(est.labels_)
class Kmedoids(ClusteringManager):
"""
Kmedoids clustering acts directly on the distance matrix without
needing an intermediate embedding into coordinate space
"""
def cluster(self, nclusters, noise=False, npass=100, nreps=1):
if Biopython_Unavailable:
print('kmedoids not available without Biopython')
return
matrix = self.get_dm(noise)
p = [kmedoids(matrix, nclusters=nclusters, npass=npass) for _ in
range(nreps)]
p.sort(key=lambda x: x[1])
return Partition(p[0][0])
class Evaluation(ClusteringManager):
"""
Methods for evaluating the fit of a cluster to the distance matrix
anosim and permanova seem pretty useless; silhouette is ok
"""
def anosim(self, partition, n_permutations=999):
if partition.is_minimal():
raise ValueError("ANOSim is not defined for singleton clusters")
elif partition.is_maximal():
raise ValueError("ANOSim is not defined for maximally divided partitions")
result = skbio.stats.distance.ANOSIM(skbio.DistanceMatrix(self.get_dm(False)), partition.partition_vector)
return result(n_permutations)
def permanova(self, partition, n_permutations=999):
if partition.is_minimal():
raise ValueError("PERMANOVA is not defined for singleton clusters")
elif partition.is_maximal():
raise ValueError("PERMANOVA is not defined for maximally divided partitions")
result = skbio.stats.distance.PERMANOVA(skbio.DistanceMatrix(self.get_dm(False)), partition.partition_vector)
return result(n_permutations)
def silhouette(self, partition):
pvec = np.array(partition.partition_vector)
groups = np.unique(pvec)
nbrs = np.zeros(pvec.shape)
scores = np.zeros(pvec.shape)
if len(groups) == 1:
raise ValueError("Silhouette is not defined for singleton clusters")
for ingroup in groups:
ingroup_ix = np.where(pvec == ingroup)[0]
within, between, outgroups = self.__get_mean_dissimilarities_for_group(pvec, ingroup, groups)
between_min = between.min(axis=0)
outgroup_ix, neighbours_ix = np.where(between == between_min)
neighbours = np.zeros(neighbours_ix.shape)
neighbours[neighbours_ix] = outgroups[outgroup_ix]
nbrs[ingroup_ix] = neighbours
scores[ingroup_ix] = self.__silhouette_calc(within, between_min)
return scores[1].mean()
@staticmethod
def __get_indices_for_groups_by_index(ix, jx):
if len(ix) == len(jx) == 1 and ix == jx:
return [list(ix)], [list(jx)]
row_indices = [[i for j in jx if i != j] for i in ix]
column_indices = [[j for j in jx if j != i] for i in ix]
return row_indices, column_indices
@staticmethod
def __silhouette_calc(ingroup, outgroup):
if len(ingroup) == 1:
return 0
max_ = np.array([ingroup, outgroup]).max(axis=0)
return (outgroup - ingroup) / max_
def __get_indices_for_groups(self, pvec, group1, group2):
ix = np.where(pvec == group1)[0]
jx = np.where(pvec == group2)[0]
return self.__get_indices_for_groups_by_index(ix, jx)
def __get_mean_dissimilarities_for_group(self, pvec, group, groups):
outgroups = groups[groups != group]
within_indices = self.__get_indices_for_groups(pvec, group, group)
within_distances = self.dm.values[within_indices].mean(axis=1)
dissimilarities = []
for outgroup in outgroups:
between_indices = self.__get_indices_for_groups(pvec, group, outgroup)
between_distances = self.dm.values[between_indices]
dissimilarities.append(between_distances.mean(axis=1))
return within_distances, np.array(dissimilarities), outgroups
|
DessimozLab/treeCl
|
treeCl/clustering.py
|
Python
|
mit
| 21,507
|
[
"Biopython",
"Gaussian"
] |
df0cf6b2cd7ce5be432231fcdf0092c34f4579a657532ec04bba0caa3621c5f0
|
from matplotlib import pyplot as plt
from annotate import annotate_horiz
import psychic
import numpy as np
f = plt.figure(figsize=(10,5))
f.add_axes([0.1, 0.1, 0.5, 0.5])
d = psychic.fake.gaussian(4, 10, 100)
d = psychic.nodes.Butterworth(4, 15, 'lowpass').train_apply(d, d)
psychic.plot_eeg(d, fig=f, vspace=3)
plt.ylim(-1.5, 15)
plt.title('Sliding window')
for i,x in enumerate(np.arange(0, 10, 2.6)):
annotate_horiz(x+0.1, x+2.1, 13, 'Trial %d' % (i*2+1), 0.2)
if x + 3.4 < 10:
annotate_horiz(x+1.4, x+3.4, 11.2, 'Trial %d' % (i*2+2), 0.2)
plt.savefig('sliding_window.png')
|
wmvanvliet/psychic
|
docs/images/sliding_window.py
|
Python
|
bsd-3-clause
| 599
|
[
"Gaussian"
] |
0304d848ec6b220e18abc9ee9d88cd574b4ca919cad56965505ff791e7f881ca
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS.
"""
import urllib
from datetime import datetime, timedelta
from textwrap import dedent
import pytz
from nose.plugins.attrib import attr
from common.test.acceptance.fixtures.course import CourseFixture, CourseUpdateDesc, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.common.logout import LogoutPage
from common.test.acceptance.pages.common.utils import enroll_user_track
from common.test.acceptance.pages.lms import BASE_URL
from common.test.acceptance.pages.lms.account_settings import AccountSettingsPage
from common.test.acceptance.pages.lms.course_home import CourseHomePage
from common.test.acceptance.pages.lms.course_wiki import (
CourseWikiChildrenPage,
CourseWikiEditPage,
CourseWikiHistoryPage,
CourseWikiPage
)
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.create_mode import ModeCreationPage
from common.test.acceptance.pages.lms.dashboard import DashboardPage
from common.test.acceptance.pages.lms.login_and_register import CombinedLoginAndRegisterPage, ResetPasswordPage
from common.test.acceptance.pages.lms.pay_and_verify import FakePaymentPage, PaymentAndVerificationFlow
from common.test.acceptance.pages.lms.problem import ProblemPage
from common.test.acceptance.pages.lms.progress import ProgressPage
from common.test.acceptance.pages.lms.tab_nav import TabNavPage
from common.test.acceptance.pages.lms.video.video import VideoPage
from common.test.acceptance.pages.studio.settings import SettingsPage
from common.test.acceptance.tests.helpers import (
EventsTestMixin,
UniqueCourseTest,
element_has_text,
get_selected_option_text,
load_data_str,
select_option_by_text,
)
@attr(shard=19)
class ForgotPasswordPageTest(UniqueCourseTest):
"""
Test that forgot password forms is rendered if url contains 'forgot-password-modal'
in hash.
"""
def setUp(self):
""" Initialize the page object """
super(ForgotPasswordPageTest, self).setUp()
self.user_info = self._create_user()
self.reset_password_page = ResetPasswordPage(self.browser)
def _create_user(self):
"""
Create a unique user
"""
auto_auth = AutoAuthPage(self.browser).visit()
user_info = auto_auth.user_info
LogoutPage(self.browser).visit()
return user_info
def test_reset_password_form_visibility(self):
# Navigate to the password reset page
self.reset_password_page.visit()
# Expect that reset password form is visible on the page
self.assertTrue(self.reset_password_page.is_form_visible())
def test_reset_password_confirmation_box_visibility(self):
# Navigate to the password reset page
self.reset_password_page.visit()
# Navigate to the password reset form and try to submit it
self.reset_password_page.fill_password_reset_form(self.user_info['email'])
self.reset_password_page.is_success_visible(".submission-success")
# Expect that we're shown a success message
self.assertIn("Check Your Email", self.reset_password_page.get_success_message())
@attr(shard=19)
class LoginFromCombinedPageTest(UniqueCourseTest):
"""Test that we can log in using the combined login/registration page.
Also test that we can request a password reset from the combined
login/registration page.
"""
def setUp(self):
"""Initialize the page objects and create a test course. """
super(LoginFromCombinedPageTest, self).setUp()
self.login_page = CombinedLoginAndRegisterPage(
self.browser,
start_page="login",
course_id=self.course_id
)
self.dashboard_page = DashboardPage(self.browser)
# Create a course to enroll in
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
def test_login_success(self):
# Create a user account
email, password = self._create_unique_user()
# Navigate to the login page and try to log in
self.login_page.visit().login(email=email, password=password)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
def test_login_failure(self):
# Navigate to the login page
self.login_page.visit()
# User account does not exist
self.login_page.login(email="nobody@nowhere.com", password="password")
# Verify that an error is displayed
self.assertIn("Email or password is incorrect.", self.login_page.wait_for_errors())
def test_toggle_to_register_form(self):
self.login_page.visit().toggle_form()
self.assertEqual(self.login_page.current_form, "register")
def test_password_reset_success(self):
# Create a user account
email, password = self._create_unique_user() # pylint: disable=unused-variable
# Navigate to the password reset form and try to submit it
self.login_page.visit().password_reset(email=email)
# Expect that we're shown a success message
self.assertIn("Check Your Email", self.login_page.wait_for_success())
def test_password_reset_no_user(self):
# Navigate to the password reset form
self.login_page.visit()
# User account does not exist
self.login_page.password_reset(email="nobody@nowhere.com")
# Expect that we're shown a success message
self.assertIn("Check Your Email", self.login_page.wait_for_success())
def test_third_party_login(self):
"""
Test that we can login using third party credentials, and that the
third party account gets linked to the edX account.
"""
# Create a user account
email, password = self._create_unique_user()
# Navigate to the login page
self.login_page.visit()
# Baseline screen-shots are different for chrome and firefox.
#self.assertScreenshot('#login .login-providers', 'login-providers-{}'.format(self.browser.name), .25)
#The line above is commented out temporarily see SOL-1937
# Try to log in using "Dummy" provider
self.login_page.click_third_party_dummy_provider()
# The user will be redirected somewhere and then back to the login page:
msg_text = self.login_page.wait_for_auth_status_message()
self.assertIn("You have successfully signed into Dummy", msg_text)
self.assertIn(
u"To link your accounts, sign in now using your édX password",
msg_text
)
# Now login with username and password:
self.login_page.login(email=email, password=password)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
try:
# Now logout and check that we can log back in instantly (because the account is linked):
LogoutPage(self.browser).visit()
self.login_page.visit()
self.login_page.click_third_party_dummy_provider()
self.dashboard_page.wait_for_page()
finally:
self._unlink_dummy_account()
def test_hinted_login(self):
""" Test the login page when coming from course URL that specified which third party provider to use """
# Create a user account and link it to third party auth with the dummy provider:
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self._link_dummy_account()
try:
LogoutPage(self.browser).visit()
# When not logged in, try to load a course URL that includes the provider hint ?tpa_hint=...
course_page = CoursewarePage(self.browser, self.course_id)
self.browser.get(course_page.url + '?tpa_hint=oa2-dummy')
# We should now be redirected to the login page
self.login_page.wait_for_page()
self.assertIn(
"Would you like to sign in using your Dummy credentials?",
self.login_page.hinted_login_prompt
)
# Baseline screen-shots are different for chrome and firefox.
#self.assertScreenshot('#hinted-login-form', 'hinted-login-{}'.format(self.browser.name), .25)
#The line above is commented out temporarily see SOL-1937
self.login_page.click_third_party_dummy_provider()
# We should now be redirected to the course page
course_page.wait_for_page()
finally:
self._unlink_dummy_account()
def _link_dummy_account(self):
""" Go to Account Settings page and link the user's account to the Dummy provider """
account_settings = AccountSettingsPage(self.browser).visit()
# switch to "Linked Accounts" tab
account_settings.switch_account_settings_tabs('accounts-tab')
field_id = "auth-oa2-dummy"
account_settings.wait_for_field(field_id)
self.assertEqual("Link Your Account", account_settings.link_title_for_link_field(field_id))
account_settings.click_on_link_in_link_field(field_id)
# make sure we are on "Linked Accounts" tab after the account settings
# page is reloaded
account_settings.switch_account_settings_tabs('accounts-tab')
account_settings.wait_for_link_title_for_link_field(field_id, "Unlink This Account")
def _unlink_dummy_account(self):
""" Verify that the 'Dummy' third party auth provider is linked, then unlink it """
# This must be done after linking the account, or we'll get cross-test side effects
account_settings = AccountSettingsPage(self.browser).visit()
# switch to "Linked Accounts" tab
account_settings.switch_account_settings_tabs('accounts-tab')
field_id = "auth-oa2-dummy"
account_settings.wait_for_field(field_id)
self.assertEqual("Unlink This Account", account_settings.link_title_for_link_field(field_id))
account_settings.click_on_link_in_link_field(field_id)
account_settings.wait_for_message(field_id, "Successfully unlinked")
def _create_unique_user(self):
"""
Create a new user with a unique name and email.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
password = "password"
# Create the user (automatically logs us in)
AutoAuthPage(
self.browser,
username=username,
email=email,
password=password
).visit()
# Log out
LogoutPage(self.browser).visit()
return (email, password)
@attr(shard=19)
class RegisterFromCombinedPageTest(UniqueCourseTest):
"""Test that we can register a new user from the combined login/registration page. """
def setUp(self):
"""Initialize the page objects and create a test course. """
super(RegisterFromCombinedPageTest, self).setUp()
self.register_page = CombinedLoginAndRegisterPage(
self.browser,
start_page="register",
course_id=self.course_id
)
self.dashboard_page = DashboardPage(self.browser)
# Create a course to enroll in
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
def test_register_success(self):
# Navigate to the registration page
self.register_page.visit()
# Fill in the form and submit it
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
self.register_page.register(
email=email,
password="password",
username=username,
full_name="Test User",
country="US",
favorite_movie="Mad Max: Fury Road"
)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
def test_register_failure(self):
# Navigate to the registration page
self.register_page.visit()
# Enter a blank for the username field, which is required
# Don't agree to the terms of service / honor code.
# Don't specify a country code, which is required.
# Don't specify a favorite movie.
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
self.register_page.register(
email=email,
password="password",
username="",
full_name="Test User"
)
# Verify that the expected errors are displayed.
errors = self.register_page.wait_for_errors()
self.assertIn(u'Please enter your Public Username.', errors)
self.assertIn(u'Select your country or region of residence.', errors)
self.assertIn(u'Please tell us your favorite movie.', errors)
def test_toggle_to_login_form(self):
self.register_page.visit().toggle_form()
self.assertEqual(self.register_page.current_form, "login")
def test_third_party_register(self):
"""
Test that we can register using third party credentials, and that the
third party account gets linked to the edX account.
"""
# Navigate to the register page
self.register_page.visit()
# Baseline screen-shots are different for chrome and firefox.
#self.assertScreenshot('#register .login-providers', 'register-providers-{}'.format(self.browser.name), .25)
# The line above is commented out temporarily see SOL-1937
# Try to authenticate using the "Dummy" provider
self.register_page.click_third_party_dummy_provider()
# The user will be redirected somewhere and then back to the register page:
msg_text = self.register_page.wait_for_auth_status_message()
self.assertEqual(self.register_page.current_form, "register")
self.assertIn("You've successfully signed into Dummy", msg_text)
self.assertIn("We just need a little more information", msg_text)
# Now the form should be pre-filled with the data from the Dummy provider:
self.assertEqual(self.register_page.email_value, "adama@fleet.colonies.gov")
self.assertEqual(self.register_page.full_name_value, "William Adama")
self.assertIn("Galactica1", self.register_page.username_value)
# Set country and submit the form:
self.register_page.register(country="US", favorite_movie="Battlestar Galactica")
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
# Now logout and check that we can log back in instantly (because the account is linked):
LogoutPage(self.browser).visit()
login_page = CombinedLoginAndRegisterPage(self.browser, start_page="login")
login_page.visit()
login_page.click_third_party_dummy_provider()
self.dashboard_page.wait_for_page()
# Now unlink the account (To test the account settings view and also to prevent cross-test side effects)
account_settings = AccountSettingsPage(self.browser).visit()
# switch to "Linked Accounts" tab
account_settings.switch_account_settings_tabs('accounts-tab')
field_id = "auth-oa2-dummy"
account_settings.wait_for_field(field_id)
self.assertEqual("Unlink This Account", account_settings.link_title_for_link_field(field_id))
account_settings.click_on_link_in_link_field(field_id)
account_settings.wait_for_message(field_id, "Successfully unlinked")
@attr(shard=19)
class PayAndVerifyTest(EventsTestMixin, UniqueCourseTest):
"""Test that we can proceed through the payment and verification flow."""
def setUp(self):
"""Initialize the test.
Create the necessary page objects, create a test course and configure its modes,
create a user and log them in.
"""
super(PayAndVerifyTest, self).setUp()
self.payment_and_verification_flow = PaymentAndVerificationFlow(self.browser, self.course_id)
self.immediate_verification_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='verify-now')
self.upgrade_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='upgrade')
self.fake_payment_page = FakePaymentPage(self.browser, self.course_id)
self.dashboard_page = DashboardPage(self.browser)
# Create a course
CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
).install()
# Add an honor mode to the course
ModeCreationPage(self.browser, self.course_id).visit()
# Add a verified mode to the course
ModeCreationPage(self.browser, self.course_id, mode_slug=u'verified', mode_display_name=u'Verified Certificate', min_price=10, suggested_prices='10,20').visit()
def test_deferred_verification_enrollment(self):
# Create a user and log them in
student_id = AutoAuthPage(self.browser).visit().get_user_id()
enroll_user_track(self.browser, self.course_id, 'verified')
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
def test_enrollment_upgrade(self):
# Create a user, log them in, and enroll them in the honor mode
student_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as honor in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'honor')
# Click the upsell button on the dashboard
self.dashboard_page.upgrade_enrollment(self.course_info["display_name"], self.upgrade_page)
# Select the first contribution option appearing on the page
self.upgrade_page.indicate_contribution()
# Proceed to the fake payment page
self.upgrade_page.proceed_to_payment()
def only_enrollment_events(event):
"""Filter out all non-enrollment events."""
return event['event_type'].startswith('edx.course.enrollment.')
expected_events = [
{
'event_type': 'edx.course.enrollment.mode_changed',
'event': {
'user_id': int(student_id),
'mode': 'verified',
}
}
]
with self.assert_events_match_during(event_filter=only_enrollment_events, expected_events=expected_events):
# Submit payment
self.fake_payment_page.submit_payment()
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
@attr('a11y')
class CourseWikiA11yTest(UniqueCourseTest):
"""
Tests that verify the course wiki.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(CourseWikiA11yTest, self).setUp()
# self.course_info['number'] must be shorter since we are accessing the wiki. See TNL-1751
self.course_info['number'] = self.unique_id[0:6]
self.course_wiki_page = CourseWikiPage(self.browser, self.course_id)
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.course_wiki_edit_page = CourseWikiEditPage(self.browser, self.course_id, self.course_info)
self.tab_nav = TabNavPage(self.browser)
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
# Access course wiki page
self.course_home_page.visit()
self.tab_nav.go_to_tab('Wiki')
def _open_editor(self):
self.course_wiki_page.open_editor()
self.course_wiki_edit_page.wait_for_page()
def test_view(self):
"""
Verify the basic accessibility of the wiki page as initially displayed.
"""
self.course_wiki_page.a11y_audit.check_for_accessibility_errors()
def test_edit(self):
"""
Verify the basic accessibility of edit wiki page.
"""
self._open_editor()
self.course_wiki_edit_page.a11y_audit.check_for_accessibility_errors()
def test_changes(self):
"""
Verify the basic accessibility of changes wiki page.
"""
self.course_wiki_page.show_history()
history_page = CourseWikiHistoryPage(self.browser, self.course_id, self.course_info)
history_page.wait_for_page()
history_page.a11y_audit.check_for_accessibility_errors()
def test_children(self):
"""
Verify the basic accessibility of changes wiki page.
"""
self.course_wiki_page.show_children()
children_page = CourseWikiChildrenPage(self.browser, self.course_id, self.course_info)
children_page.wait_for_page()
children_page.a11y_audit.check_for_accessibility_errors()
@attr(shard=1)
class HighLevelTabTest(UniqueCourseTest):
"""
Tests that verify each of the high-level tabs available within a course.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(HighLevelTabTest, self).setUp()
# self.course_info['number'] must be shorter since we are accessing the wiki. See TNL-1751
self.course_info['number'] = self.unique_id[0:6]
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.progress_page = ProgressPage(self.browser, self.course_id)
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
self.video = VideoPage(self.browser)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_update(
CourseUpdateDesc(date='January 29, 2014', content='Test course update1')
)
course_fix.add_handout('demoPDF.pdf')
course_fix.add_children(
XBlockFixtureDesc('static_tab', 'Test Static Tab', data=r"static tab data with mathjax \(E=mc^2\)"),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
XBlockFixtureDesc('html', 'Test HTML'),
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2'),
XBlockFixtureDesc('sequential', 'Test Subsection 3').add_children(
XBlockFixtureDesc('problem', 'Test Problem A', data=load_data_str('multiple_choice.xml'))
),
)
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_progress(self):
"""
Navigate to the progress page.
"""
# Navigate to the progress page from the info page
self.course_home_page.visit()
self.tab_nav.go_to_tab('Progress')
# We haven't answered any problems yet, so assume scores are zero
# Only problems should have scores; so there should be 2 scores.
CHAPTER = 'Test Section'
SECTION = 'Test Subsection'
EXPECTED_SCORES = [(0, 3), (0, 1)]
actual_scores = self.progress_page.scores(CHAPTER, SECTION)
self.assertEqual(actual_scores, EXPECTED_SCORES)
def test_static_tab(self):
"""
Navigate to a static tab (course content)
"""
# From the course info page, navigate to the static tab
self.course_home_page.visit()
self.tab_nav.go_to_tab('Test Static Tab')
self.assertTrue(self.tab_nav.is_on_tab('Test Static Tab'))
@skip('Edraak: Disable until we support SVG output for the Arabic MathJax extension.')
def test_static_tab_with_mathjax(self):
"""
Navigate to a static tab (course content)
"""
# From the course info page, navigate to the static tab
self.course_home_page.visit()
self.tab_nav.go_to_tab('Test Static Tab')
self.assertTrue(self.tab_nav.is_on_tab('Test Static Tab'))
# Verify that Mathjax has rendered
self.tab_nav.mathjax_has_rendered()
def test_wiki_tab_first_time(self):
"""
Navigate to the course wiki tab. When the wiki is accessed for
the first time, it is created on the fly.
"""
course_wiki = CourseWikiPage(self.browser, self.course_id)
# From the course info page, navigate to the wiki tab
self.course_home_page.visit()
self.tab_nav.go_to_tab('Wiki')
self.assertTrue(self.tab_nav.is_on_tab('Wiki'))
# Assert that a default wiki is created
expected_article_name = "{course_name}".format(
course_name=self.course_info['display_name']
)
self.assertEqual(expected_article_name, course_wiki.article_name)
def test_course_home_tab(self):
"""
Navigate to the course home page using the tab.
"""
self.course_home_page.visit()
self.tab_nav.go_to_tab('Course')
# Check that the tab lands on the course home page.
self.assertTrue(self.course_home_page.is_browser_on_page())
@attr(shard=1)
class PDFTextBooksTabTest(UniqueCourseTest):
"""
Tests that verify each of the textbook tabs available within a course.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(PDFTextBooksTabTest, self).setUp()
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
# Install a course with TextBooks
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
# Add PDF textbooks to course fixture.
for i in range(1, 3):
course_fix.add_textbook("PDF Book {}".format(i), [{"title": "Chapter Of Book {}".format(i), "url": ""}])
course_fix.install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_verify_textbook_tabs(self):
"""
Test multiple pdf textbooks loads correctly in lms.
"""
self.course_home_page.visit()
# Verify each PDF textbook tab by visiting, it will fail if correct tab is not loaded.
for i in range(1, 3):
self.tab_nav.go_to_tab("PDF Book {}".format(i))
@attr(shard=1)
class VisibleToStaffOnlyTest(UniqueCourseTest):
"""
Tests that content with visible_to_staff_only set to True cannot be viewed by students.
"""
def setUp(self):
super(VisibleToStaffOnlyTest, self).setUp()
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Subsection With Locked Unit').add_children(
XBlockFixtureDesc('vertical', 'Locked Unit', metadata={'visible_to_staff_only': True}).add_children(
XBlockFixtureDesc('html', 'Html Child in locked unit', data="<html>Visible only to staff</html>"),
),
XBlockFixtureDesc('vertical', 'Unlocked Unit').add_children(
XBlockFixtureDesc('html', 'Html Child in unlocked unit', data="<html>Visible only to all</html>"),
)
),
XBlockFixtureDesc('sequential', 'Unlocked Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Html Child in visible unit', data="<html>Visible to all</html>"),
)
),
XBlockFixtureDesc('sequential', 'Locked Subsection', metadata={'visible_to_staff_only': True}).add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc(
'html', 'Html Child in locked subsection', data="<html>Visible only to staff</html>"
)
)
)
)
).install()
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.courseware_page = CoursewarePage(self.browser, self.course_id)
def test_visible_to_student(self):
"""
Scenario: Content marked 'visible_to_staff_only' is not visible for students in the course
Given some of the course content has been marked 'visible_to_staff_only'
And I am logged on with an authorized student account
Then I can only see content without 'visible_to_staff_only' set to True
"""
AutoAuthPage(self.browser, username="STUDENT_TESTER", email="johndoe_student@example.com",
course_id=self.course_id, staff=False).visit()
self.course_home_page.visit()
self.assertEqual(2, len(self.course_home_page.outline.sections['Test Section']))
self.course_home_page.outline.go_to_section("Test Section", "Subsection With Locked Unit")
self.courseware_page.wait_for_page()
self.assertEqual([u'Unlocked Unit'], self.courseware_page.nav.sequence_items)
self.course_home_page.visit()
self.course_home_page.outline.go_to_section("Test Section", "Unlocked Subsection")
self.courseware_page.wait_for_page()
self.assertEqual([u'Test Unit'], self.courseware_page.nav.sequence_items)
@attr(shard=1)
class TooltipTest(UniqueCourseTest):
"""
Tests that tooltips are displayed
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(TooltipTest, self).setUp()
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('static_tab', 'Test Static Tab'),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
XBlockFixtureDesc('html', 'Test HTML'),
)
)
).install()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_tooltip(self):
"""
Verify that tooltips are displayed when you hover over the sequence nav bar.
"""
self.courseware_page.visit()
self.courseware_page.verify_tooltips_displayed()
@attr(shard=1)
class ProblemExecutionTest(UniqueCourseTest):
"""
Tests of problems.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(ProblemExecutionTest, self).setUp()
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
# Install a course with sections and problems.
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_asset(['python_lib.zip'])
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Python Problem', data=dedent(
"""\
<problem>
<script type="loncapa/python">
from number_helpers import seventeen, fortytwo
oneseven = seventeen()
def check_function(expect, ans):
if int(ans) == fortytwo(-22):
return True
else:
return False
</script>
<p>What is the sum of $oneseven and 3?</p>
<customresponse expect="20" cfn="check_function">
<textline/>
</customresponse>
</problem>
"""
))
)
)
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_python_execution_in_problem(self):
# Navigate to the problem page
self.course_home_page.visit()
self.course_home_page.outline.go_to_section('Test Section', 'Test Subsection')
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name.upper(), 'PYTHON PROBLEM')
# Does the page have computation results?
self.assertIn("What is the sum of 17 and 3?", problem_page.problem_text)
# Fill in the answer correctly.
problem_page.fill_answer("20")
problem_page.click_submit()
self.assertTrue(problem_page.is_correct())
# Fill in the answer incorrectly.
problem_page.fill_answer("4")
problem_page.click_submit()
self.assertFalse(problem_page.is_correct())
@attr(shard=1)
class EntranceExamTest(UniqueCourseTest):
"""
Tests that course has an entrance exam.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(EntranceExamTest, self).setUp()
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_entrance_exam_section(self):
"""
Scenario: Any course that is enabled for an entrance exam, should have
entrance exam section in the course outline.
Given that I visit the course outline
And entrance exams are not yet enabled
Then I should not see an "Entrance Exam" section
When I log in as staff
And enable entrance exams
And I visit the course outline again as student
Then there should be an "Entrance Exam" chapter.'
"""
# visit the course outline and make sure there is no "Entrance Exam" section.
self.course_home_page.visit()
self.assertFalse('Entrance Exam' in self.course_home_page.outline.sections.keys())
# Logout and login as a staff.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=True).visit()
# visit course settings page and set/enabled entrance exam for that course.
self.settings_page.visit()
self.settings_page.require_entrance_exam()
self.settings_page.save_changes()
# Logout and login as a student.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=False).visit()
# visit the course outline and make sure there is an "Entrance Exam" section.
self.course_home_page.visit()
self.assertTrue('Entrance Exam' in self.course_home_page.outline.sections.keys())
# TODO: TNL-6546: Remove test
def test_entrance_exam_section_2(self):
"""
Scenario: Any course that is enabled for an entrance exam, should have entrance exam chapter at course
page.
Given that I am on the course page
When I view the course that has an entrance exam
Then there should be an "Entrance Exam" chapter.'
"""
courseware_page = CoursewarePage(self.browser, self.course_id)
entrance_exam_link_selector = '.accordion .course-navigation .chapter .group-heading'
# visit course page and make sure there is not entrance exam chapter.
courseware_page.visit()
courseware_page.wait_for_page()
self.assertFalse(element_has_text(
page=courseware_page,
css_selector=entrance_exam_link_selector,
text='Entrance Exam'
))
# Logout and login as a staff.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=True).visit()
# visit course settings page and set/enabled entrance exam for that course.
self.settings_page.visit()
self.settings_page.require_entrance_exam()
self.settings_page.save_changes()
# Logout and login as a student.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=False).visit()
# visit course info page and make sure there is an "Entrance Exam" section.
courseware_page.visit()
courseware_page.wait_for_page()
self.assertTrue(element_has_text(
page=courseware_page,
css_selector=entrance_exam_link_selector,
text='Entrance Exam'
))
@attr(shard=1)
class NotLiveRedirectTest(UniqueCourseTest):
"""
Test that a banner is shown when the user is redirected to
the dashboard from a non-live course.
"""
def setUp(self):
"""Create a course that isn't live yet and enroll for it."""
super(NotLiveRedirectTest, self).setUp()
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name'],
start_date=datetime(year=2099, month=1, day=1)
).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_redirect_banner(self):
"""
Navigate to the course info page, then check that we're on the
dashboard page with the appropriate message.
"""
url = BASE_URL + "/courses/" + self.course_id + "/" + 'info'
self.browser.get(url)
page = DashboardPage(self.browser)
page.wait_for_page()
self.assertIn(
'The course you are looking for does not start until',
page.banner_text
)
@attr(shard=1)
class EnrollmentClosedRedirectTest(UniqueCourseTest):
"""
Test that a banner is shown when the user is redirected to the
dashboard after trying to view the track selection page for a
course after enrollment has ended.
"""
def setUp(self):
"""Create a course that is closed for enrollment, and sign in as a user."""
super(EnrollmentClosedRedirectTest, self).setUp()
course = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
now = datetime.now(pytz.UTC)
course.add_course_details({
'enrollment_start': (now - timedelta(days=30)).isoformat(),
'enrollment_end': (now - timedelta(days=1)).isoformat()
})
course.install()
# Add an honor mode to the course
ModeCreationPage(self.browser, self.course_id).visit()
# Add a verified mode to the course
ModeCreationPage(
self.browser,
self.course_id,
mode_slug=u'verified',
mode_display_name=u'Verified Certificate',
min_price=10,
suggested_prices='10,20'
).visit()
def _assert_dashboard_message(self):
"""
Assert that the 'closed for enrollment' text is present on the
dashboard.
"""
page = DashboardPage(self.browser)
page.wait_for_page()
self.assertIn(
'The course you are looking for is closed for enrollment',
page.banner_text
)
def test_redirect_banner(self):
"""
Navigate to the course info page, then check that we're on the
dashboard page with the appropriate message.
"""
AutoAuthPage(self.browser).visit()
url = BASE_URL + "/course_modes/choose/" + self.course_id
self.browser.get(url)
self._assert_dashboard_message()
@attr(shard=1)
class LMSLanguageTest(UniqueCourseTest):
""" Test suite for the LMS Language """
def setUp(self):
super(LMSLanguageTest, self).setUp()
self.dashboard_page = DashboardPage(self.browser)
self.account_settings = AccountSettingsPage(self.browser)
AutoAuthPage(self.browser).visit()
def test_lms_language_change(self):
"""
Scenario: Ensure that language selection is working fine.
First I go to the user dashboard page in LMS. I can see 'English' is selected by default.
Then I choose 'Dummy Language' from drop down (at top of the page).
Then I visit the student account settings page and I can see the language has been updated to 'Dummy Language'
in both drop downs.
After that I select the 'English' language and visit the dashboard page again.
Then I can see that top level language selector persist its value to 'English'.
"""
self.dashboard_page.visit()
language_selector = self.dashboard_page.language_selector
self.assertEqual(
get_selected_option_text(language_selector),
u'English'
)
select_option_by_text(language_selector, 'Dummy Language (Esperanto)')
self.dashboard_page.wait_for_ajax()
self.account_settings.visit()
self.assertEqual(self.account_settings.value_for_dropdown_field('pref-lang'), u'Dummy Language (Esperanto)')
self.assertEqual(
get_selected_option_text(language_selector),
u'Dummy Language (Esperanto)'
)
# changed back to English language.
select_option_by_text(language_selector, 'English')
self.account_settings.wait_for_ajax()
self.assertEqual(self.account_settings.value_for_dropdown_field('pref-lang'), u'English')
self.dashboard_page.visit()
self.assertEqual(
get_selected_option_text(language_selector),
u'English'
)
|
Edraak/edraak-platform
|
common/test/acceptance/tests/lms/test_lms.py
|
Python
|
agpl-3.0
| 45,548
|
[
"VisIt"
] |
af158026003633f28c7d3b0a5b2dca1d1abdf0ecb464ffbec414958067866961
|
#
# This file is a part of the normalize python library
#
# normalize is free software: you can redistribute it and/or modify
# it under the terms of the MIT License.
#
# normalize is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
#
# You should have received a copy of the MIT license along with
# normalize. If not, refer to the upstream repository at
# http://github.com/hearsaycorp/normalize
#
"""tests that look at the wholistic behavior of records"""
from __future__ import absolute_import
from datetime import datetime
import unittest2
import warnings
from normalize import ListProperty
from normalize import Property
from normalize import Record
from normalize import V1Property
import normalize.exc as exc
from normalize.visitor import VisitorPattern
class TestRecords(unittest2.TestCase):
"""Test that the new data descriptor classes work"""
def test_false_emptiness(self):
"""Test that Properties with falsy empty values don't throw
exceptions"""
class SophiesRecord(Record):
placeholder = Property()
aux_placeholder = Property(default='')
age = Property(default=0)
name = V1Property(isa=basestring)
sophie = SophiesRecord()
with self.assertRaises(AttributeError):
sophie.placeholder
self.assertFalse(sophie.placeholder0)
self.assertEqual(sophie.aux_placeholder, '')
self.assertFalse(sophie.aux_placeholder0)
self.assertEqual(sophie.age, 0)
self.assertFalse(sophie.age0)
self.assertEqual(sophie.name, None)
with self.assertRaises(AttributeError):
sophie.name0
sophie.name = "Sophie"
self.assertEqual(sophie.name, "Sophie")
sophie.name = None
self.assertEqual(sophie.name, None)
# the properties aren't really set, but VisitorPattern sees them.
expected = {"age": 0, "aux_placeholder": ""}
self.assertEqual(VisitorPattern.visit(sophie), expected)
sophie.age = 1
expected['age'] = 1
self.assertEqual(VisitorPattern.visit(sophie), expected)
sophie.age = 0
expected['age'] = 0
self.assertEqual(VisitorPattern.visit(sophie), expected)
del sophie.age
self.assertEqual(VisitorPattern.visit(sophie), expected)
def test_functional_emptiness(self):
"""Test that functional empty values are transient"""
class BlahRecord(Record):
blah = Property()
class LambdaRecord(Record):
epoch = Property(isa=datetime)
objective = Property(isa=BlahRecord)
lambda_ = LambdaRecord()
self.assertFalse(
lambda_.epoch0.isoformat()[:4].bob.foo,
"empty values work",
)
self.assertFalse(lambda_.objective0.blah0,
"empty values don't persist")
with self.assertRaisesRegexp(AttributeError, r'BlahRecord.*blha0'):
lambda_.objective0.blha0
with self.assertRaisesRegexp(
exc.EmptyAttributeError, r"Can't assign.*BlahRecord"
):
lambda_.objective0.blah = 42
with self.assertRaisesRegexp(
exc.EmptyAttributeError, r"Can't assign.*BlahRecord"
):
lambda_.objective0[0] = 42
def test_bad_constructor(self):
"""Test that 'empty' definition errors are no longer possible"""
with warnings.catch_warnings(record=True) as w:
class OhNoRecord(Record):
lets_go = Property(isa=datetime)
self.assertEqual(len(w), 0)
def test_empty_type_inference(self):
class OneRecord(Record):
foo = Property(isa=type(2))
class TwoRecord(Record):
bar = Property(isa=type(None))
def __call__(self):
return "hi"
class NumRecord(Record):
which = Property(isa=(OneRecord, TwoRecord))
class NumsRecord(Record):
nums = ListProperty(of=NumRecord)
nr = NumsRecord()
with self.assertRaisesRegexp(
exc.EmptyAttributeError, r"NumRecordList.*attribute 'which'",
):
nr.nums0.which
self.assertFalse(nr.nums0[1].which)
with self.assertRaisesRegexp(
exc.EmptyAttributeError, r"NumRecord\b.*attribute 'blah'",
):
nr.nums0[0].blah
self.assertFalse(nr.nums0[2].which.foo)
self.assertFalse(nr.nums0[2].which.bar)
# 0 forms also work as well
self.assertFalse(nr.nums0[3].which0.bar0)
self.assertFalse(nr.nums0[4].which0.foo0)
# array slicing
self.assertFalse(nr.nums0[3:-1][0].which0.foo0)
with self.assertRaisesRegexp(
exc.NotSubscriptable, r"OneRecord,TwoRecord"
):
nr.nums0[1].which[1]
# test invoking
with self.assertRaisesRegexp(exc.NotCallable, r"NumRecord"):
nr.nums0[1]()
self.assertFalse(nr.nums0[4].which())
class MagicRecord(Record):
def __getattr__(self, whatever):
return 1
class MagicList(Record):
def __getitem__(self, whatever):
return 1
class LooseRecord(Record):
this = Property(isa=(OneRecord, TwoRecord, datetime))
that = Property(isa=MagicRecord)
other = Property(isa=MagicList)
lr = LooseRecord()
self.assertFalse(lr.this0.date)
self.assertFalse(lr.this0.foo.real)
with self.assertRaisesRegexp(
exc.NoSuchAttribute, r"TwoRecord,datetime",
):
lr.this0.dote
with self.assertRaisesRegexp(exc.NoSuchAttribute, r"None"):
self.assertFalse(lr.this0.bar.real)
self.assertFalse(lr.that0.date)
with self.assertRaisesRegexp(
exc.NotSubscriptable, r"MagicRecord",
):
lr.that0[7]
self.assertFalse(lr.other0[0].foo.bar())
with self.assertRaisesRegexp(exc.NoSuchAttribute, r"MagicList"):
lr.other0.anything
def test_v1_none(self):
class SafeRecord(Record):
maybe_int = V1Property(isa=int)
maybe_str = V1Property(isa=basestring, json_name="maybeStr")
self.assertEqual(type(SafeRecord.maybe_int).__name__, "V1Property")
# FIXME: the name combination code should know that 'Safe' is
# not needed in this combination
self.assertEqual(
type(SafeRecord.maybe_str).__name__, "V1SafeJsonProperty",
)
sr = SafeRecord(maybe_int=4, maybe_str="hey")
del sr.maybe_int
self.assertEqual(sr.maybe_int, None)
del sr.maybe_str
self.assertEqual(sr.maybe_str, None)
sr = SafeRecord(maybe_int=4, maybe_str="hey")
sr.maybe_int = None
self.assertEqual(sr.maybe_int, None)
sr.maybe_str = None
self.assertEqual(sr.maybe_str, None)
|
samv/normalize
|
tests/test_record.py
|
Python
|
mit
| 7,140
|
[
"VisIt"
] |
a7d71d3a3cbe92201c9f57fe199e72acc5e30b1081bf45488d0b139eb16b1b97
|
# -*- coding: utf-8 -*-
#
# pychemqt documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 13 22:26:06 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# Added initialization code for pychemqt config files initialization
from configparser import ConfigParser
import os
import shutil
import subprocess
import sys
from urllib.request import urlopen
from urllib.error import URLError
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
autodoc_mock_imports = ['sip', 'PyQt5', 'PyQt5.QtGui', 'PyQt5.QtCore',
'PyQt5.QtWidgets']
# Define pychemqt environment
os.environ["pychemqt"] = os.path.abspath('../')
os.environ["freesteam"] = "False"
os.environ["pybel"] = "False"
os.environ["CoolProp"] = "False"
os.environ["refprop"] = "False"
os.environ["ezodf"] = "False"
os.environ["openpyxl"] = "False"
os.environ["xlwt"] = "False"
os.environ["icu"] = "False"
os.environ["reportlab"] = "False"
os.environ["PyQt5.Qsci"] = "False"
conf_dir = os.path.expanduser("~") + os.sep + ".pychemqt" + os.sep
# Checking config folder
if not os.path.isdir(conf_dir):
os.mkdir(conf_dir)
try:
open(conf_dir + "pychemqt.log", 'x')
except FileExistsError: # noqa
pass
# Checking config files
from tools import firstrun # noqa
# Checking config file
default_Preferences = firstrun.Preferences()
change = False
if not os.path.isfile(conf_dir + "pychemqtrc"):
default_Preferences.write(open(conf_dir + "pychemqtrc", "w"))
Preferences = default_Preferences
change = True
else:
# Check Preferences options to find set new options
Preferences = ConfigParser()
Preferences.read(conf_dir + "pychemqtrc")
for section in default_Preferences.sections():
if not Preferences.has_section(section):
Preferences.add_section(section)
change = True
for option in default_Preferences.options(section):
if not Preferences.has_option(section, option):
value = default_Preferences.get(section, option)
Preferences.set(section, option, value)
change = True
if change:
Preferences.write(open(conf_dir + "pychemqtrc", "w"))
# FIXME: This file might not to be useful but for now I use it to save project
# configuration data
if not os.path.isfile(conf_dir + "pychemqtrc_temporal"):
Config = firstrun.config()
Config.write(open(conf_dir + "pychemqtrc_temporal", "w"))
# Checking costindex
if not os.path.isfile(conf_dir + "CostIndex.dat"):
orig = os.path.join(os.environ["pychemqt"], "dat", "costindex.dat")
with open(orig) as cost_index:
lista = cost_index.readlines()[-1].split(" ")
with open(conf_dir + "CostIndex.dat", "w") as archivo:
for data in lista:
archivo.write(data.replace(os.linesep, "") + os.linesep)
# Checking currency rates
origen = os.path.join(os.environ["pychemqt"], "dat", "moneda.dat")
shutil.copy(origen, conf_dir + "moneda.dat")
# Checking database with custom components
if not os.path.isfile(conf_dir + "databank.db"):
firstrun.createDatabase(conf_dir + "databank.db")
import equipment # noqa
import lib # noqa
import tools # noqa
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
# 'sphinx.ext.napoleon',
'numpydoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pychemqt'
copyright = u'2019, Juan José Gómez Romera'
author = u'Juan José Gómez Romera'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'sphinx_rtd_theme'
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# "display_version": False,
# 'navigation_depth': 5,
# 'collapse_navigation': False,
# 'includehidden': False,
# }
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../images/pychemqt_98.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "images/pychemqt.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pychemqtdoc'
# -- Options for LaTeX output ---------------------------------------------
# latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
# }
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pychemqt.tex', u'pychemqt Documentation',
u'Juan José Gómez Romera', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pychemqt', u'pychemqt Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pychemqt', u'pychemqt Documentation',
author, 'pychemqt', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# Numpydoc configuration
numpydoc_show_class_members = True
numpydoc_show_inherited_class_members = True
numpydoc_class_members_toctree = False
# Autosummary configuration
autosummary_generate = True
# Autodoc configuration
autodoc_default_options = {
'member-order': 'bysource',
'special-members': '__init__',
'undoc-members': None,
}
# Let mathjax render expression without internet conection
try:
response = urlopen('https://www.google.com/', timeout=10)
except URLError:
mathjax_path = '/usr/share/javascript/mathjax/MathJax.js?config=default.js'
def setup(app):
# Avoid print the copyright intro in each module documentation
from sphinx.ext.autodoc import cut_lines
app.connect('autodoc-process-docstring', cut_lines(15, what=['module']))
# Generate the meos documentation files
app.connect('builder-inited', Autogenerate_MEoS)
def Autogenerate_MEoS(app):
"""Autobuild the mÉoS related documentation source files"""
print('cd .. && python3 docs/generateReferences.py')
subprocess.check_output(
['bash', '-c', 'cd .. && python3 docs/generateReferences.py'])
print('cd .. && python3 docs/generateMEOSrst.py')
subprocess.check_output(
['bash', '-c', 'cd .. && python3 docs/generateMEOSrst.py'])
print('cd .. && python3 docs/generateEoSrst.py')
subprocess.check_output(
['bash', '-c', 'cd .. && python3 docs/generateEoSrst.py'])
|
jjgomera/pychemqt
|
docs/conf.py
|
Python
|
gpl-3.0
| 14,259
|
[
"Pybel"
] |
720f072e01cc368e3e5674ff64fa6b32850a853a9032452d378fd43997cb9c11
|
""" Class that contains client access to the JobManager handler. """
from DIRAC.Core.Base.Client import Client, createClient
@createClient("WorkloadManagement/JobManager")
class JobManagerClient(Client):
"""JobManagerClient sets url for the JobManagerHandler."""
def __init__(self, url=None, **kwargs):
"""
Sets URL for JobManager handler
:param self: self reference
:param url: url of the JobManagerHandler
:param kwargs: forwarded to the Base Client class
"""
super(JobManagerClient, self).__init__(**kwargs)
if not url:
self.serverURL = "WorkloadManagement/JobManager"
else:
self.serverURL = url
|
DIRACGrid/DIRAC
|
src/DIRAC/WorkloadManagementSystem/Client/JobManagerClient.py
|
Python
|
gpl-3.0
| 711
|
[
"DIRAC"
] |
060f15b35eca7562dda73fefa1f436491d945304a06bf251d04686dab0fdab0d
|
###########################################################################
# (C) 2016 Elettra - Sincrotrone Trieste S.C.p.A.. All rights reserved. #
# #
# #
# This file is part of STP-Core, the Python core of SYRMEP Tomo Project, #
# a software tool for the reconstruction of experimental CT datasets. #
# #
# STP-Core is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# STP-Core is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License #
# for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with STP-Core. If not, see <http://www.gnu.org/licenses/>. #
# #
###########################################################################
#
# Author: Francesco Brun
# Last modified: January, 2nd 2017 (bug in concatenate with numpy 1.11 + mkl)
#
from numpy import float32, iinfo, finfo, ndarray, arange, meshgrid, sqrt
from numpy import exp, real, copy, zeros, ones, pad, ComplexWarning, hanning
from numpy import tile, concatenate
#from numpy.fft import fft2, ifft2
from pyfftw import n_byte_align, simd_alignment
from pyfftw.interfaces.numpy_fft import rfft2, irfft2
from warnings import simplefilter
def _windowing_lr(im, marg):
vscale = ones(im.shape[1] - marg)
hann = hanning(marg)
vleft = hann[0:marg / 2]
vright = hann[marg / 2:]
vrow = concatenate((vleft,vscale))
vrow = concatenate((vrow,vright))
vmatrix = tile(vrow, (im.shape[0],1))
# Correction for odd/even issues:
marg = im.shape[1] - vmatrix.shape[1]
tmp = zeros(vmatrix[:,vmatrix.shape[1] - 1].shape) # Get last column
tmp = tile(tmp, (marg,1)) # Replicate the last column the right number of times
vmatrix = concatenate((vmatrix,tmp.T), axis=1) # Concatenate tmp after the image
# Apply smoothing:
im = im * vmatrix
return im.astype(float32)
def raven(im, args):
"""Process a sinogram image with the Raven de-striping algorithm.
Parameters
----------
im : array_like
Image data as numpy array.
n : int
Size of the window (minimum n = 3) around the zero frequency where
filtering is actually applied (v0 parameter in Raven's article). Higher
values means more smoothing effect. [Suggested for default: 3]
d0 : float
Cutoff in the range [0.01, 0.99] of the low pass filter (a Gaussian filter
is used instead of the originally proposed Butterworth filter in order to
have only one tuning parameter). Higher values means more smoothing effect.
[Suggested for default: 0.5].
(Parameters n and d0 have to passed as a string separated by ;)
Example (using tiffile.py)
--------------------------
>>> im = imread('sino_orig.tif')
>>> im = raven(im, '3;0.50')
>>> imsave('sino_flt.tif', im)
References
----------
C. Raven, Numerical removal of ring artifacts in microtomography,
Review of Scientific Instruments 69(8):2978-2980, 1998.
"""
# Disable a warning:
simplefilter("ignore", ComplexWarning)
# Get args:
param1, param2 = args.split(";")
n = int(param1)
d0 = (1.0 - float(param2)) # Simpler for user
# Internal parameters for Gaussian low-pass filtering:
d0 = d0 * (im.shape[1] / 2.0)
# Pad image:
marg = im.shape
im = pad(im, pad_width=((im.shape[0] / 2, im.shape[0] / 2), (0,0)), mode='reflect') # or 'constant' for zero padding
im = pad(im, pad_width=((0,0) ,(im.shape[1] / 2, im.shape[1] / 2)), mode='edge') # or 'constant' for zero padding
# Windowing:
im = _windowing_lr(im, marg[1])
im = _windowing_lr(im.T, marg[0]).T
# Compute FFT:
n_byte_align(im, simd_alignment)
im = rfft2(im, threads=2)
# Prepare the frequency coordinates:
u = arange(0, im.shape[0], dtype=float32)
v = arange(0, im.shape[1], dtype=float32)
# Compute the indices for meshgrid:
u[(u > im.shape[0] / 2.0)] = u[(u > im.shape[0] / 2.0)] - im.shape[0]
v[(v > im.shape[1] / 2.0)] = v[(v > im.shape[1] / 2.0)] - im.shape[1]
# Compute the meshgrid arrays:
V, U = meshgrid(v, u)
# Compute the distances D(U, V):
D = sqrt(U ** 2 + V ** 2)
# Prepare Guassian filter limited to a window around zero-frequency:
H = exp(-(D ** 2) / (2 * (d0 ** 2)))
if (n % 2 == 1):
H[n / 2:-(n / 2),:] = 1
else:
H[n / 2:-(n / 2 - 1),:] = 1
# Do the filtering:
im = H * im
# Compute inverse FFT of the filtered data:
n_byte_align(im, simd_alignment)
#im = real(irfft2(im, threads=2))
im = irfft2(im, threads=2)
# Crop image:
im = im[im.shape[0] / 4:(im.shape[0] / 4 + marg[0]), im.shape[1] / 4:(im.shape[1] / 4 + marg[1])]
# Return image:
return im.astype(float32)
|
ElettraSciComp/STP-Core
|
STP-Core/preprocess/ringremoval/raven.py
|
Python
|
gpl-3.0
| 5,407
|
[
"Gaussian"
] |
032d6d3f380e357106979f10b927ad6b8269cf18870afe2732d17519b615bde5
|
'''
Created on Nov 21, 2011
@author: Mirna Lerotic, 2nd Look Consulting
http://www.2ndlookconsulting.com/
Copyright (c) 2013, Stefan Vogt, Argonne National Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the Argonne National Laboratory nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
'''
from __future__ import division
import string
import datetime
import time as tm
import numpy as np
import logging
import os
import struct
import matplotlib as mplot
import multiprocessing
import csv
import glob
from file_io import maps_mda
from file_io import maps_nc
from file_io import maps_hdf5
import maps_definitions
import maps_detector
import maps_fit_parameters
import maps_analyze
import maps_tools
import h5py
from file_io.file_util import open_file_with_retry, call_function_with_retry
NO_MATRIX = 0
FLYSACN_TYPE_NONE = 0
FLYSCAN_TYPE_NETCDF = 1
FLYSCAN_TYPE_BIONANOPROBE = 3
FLYSCAN_TYPE_HDF = 4
FLYSCAN_TYPE_2ID_NETCDF = 1
FLYSCAN_TYPE_8BM_NETCDF = 2
# ----------------------------------------------------------------------
def rebin(a, *args):
'''
rebin ndarray data into a smaller ndarray of the same rank whose dimensions
are factors of the original dimensions. eg. An array with 6 columns and 4 rows
can be reduced to have 6,3,2 or 1 columns and 4,2 or 1 rows.
example usages:
>>> a=rand(6,4); b=rebin(a,3,2)
>>> a=rand(6); b=rebin(a,2)
'''
shape = a.shape
lenShape = len(shape)
factor = np.asarray(shape)/np.asarray(args)
factor = factor[0]
args = args[0]
evList = ['a.reshape('] + \
['args[%d],factor[%d],'%(i,i) for i in range(lenShape)] + \
[')'] + ['.sum(%d)'%(i+1) for i in range(lenShape)] + \
['/factor[%d]'%i for i in range(lenShape)]
return eval(''.join(evList))
# ----------------------------------------------------------------------
def fit_line_threaded(log_name, i_fit, data_line, n_rows, matrix, spectral_binning, elt_line, fitp, old_fitp, keywords, xrf_bin, calib):
logger = logging.getLogger(log_name)
'''
fHandler = logging.FileHandler(log_name)
logger.setLevel(logging.DEBUG)
fHandler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s | %(levelname)s | PID[%(process)d] | %(funcName)s(): %(message)s')
fHandler.setFormatter(formatter)
logger.addHandler(fHandler)
'''
logger.info('fitting row number %s', i_fit)
fit = maps_analyze.analyze(logger)
fitted_line, ka_line, l_line, bkground_line, values_line, bkgnd_line, tfy_line, xmin, xmax = fit.fit_line(data_line,
n_rows, matrix, spectral_binning, elt_line, fitp, old_fitp, keywords, xrf_bin, calib)
return [fitted_line, ka_line, l_line, bkground_line, values_line, bkgnd_line, tfy_line, xmin, xmax]
# ----------------------------------------------------------------------
class analyze:
def __init__(self, logger, info_elements, main_dict, maps_conf, beamline='2-ID-E', use_fit=0):
self.info_elements = info_elements
self.beamline = beamline
self.logger = logger
self.integrate = 0
self.main_dict = main_dict
self.pca = 0
self.save_ram = 0
self.verbose = 2
self.maxiter = 500
self.main_max_spectra = main_dict['max_spectra']
self.max_spec_channels = main_dict['max_spec_channels']
self.max_ICs = main_dict['max_ICs']
self.show_extra_pvs = 1
self.which_par_str = 0
self.maps_def = maps_definitions.maps_definitions(self.logger)
#self.maps_conf = self.maps_def.set_maps_definitions(beamline, info_elements)
self.maps_conf = maps_conf
self.maps_conf.use_fit = use_fit
self.netcdf_fly_scan = 0
self.save_h5 = 1
self.xanes = 0
if beamline == '2-ID-E': self.crate = '2xfm'
if beamline == '2-ID-D': self.crate = '2idd'
if beamline == '2-ID-B': self.crate = '2idb1'
if beamline == '2-BM': self.crate = '2bmb'
if beamline == 'Bio-CAT': self.crate = 'biocat'
if beamline == 'GSE-CARS': self.crate = 'gsecars'
# ----------------------------------------------------------------------
def __binning__(self, scan, xrf_bin, n_cols, n_rows, mca_arr_dimensions, elt1_arr):
self.logger.info('binning the data')
this_mca_arr_dimensions = scan.mca_arr.shape
this_n_channels = min(mca_arr_dimensions[2], self.main_dict['max_spec_channels'])
if (xrf_bin == 2) and (n_cols > 5) and (n_rows > 5):
for i_bin in range(n_cols - 1):
if i_bin % 2 == 0:
for jj in range(n_rows):
if jj % 2 == 0:
scan.mca_arr[i_bin, jj, 0:this_n_channels] = scan.mca_arr[i_bin, jj, 0:this_n_channels] + scan.mca_arr[i_bin + 1, jj, 0:this_n_channels] + scan.mca_arr[i_bin, jj+1, 0:this_n_channels]+scan.mca_arr[i_bin+1, jj+1, 0:this_n_channels]
elt1_arr[i_bin, jj] = elt1_arr[i_bin, jj] + elt1_arr[i_bin + 1, jj]+elt1_arr[i_bin, jj + 1]+elt1_arr[i_bin + 1, jj + 1]
else:
scan.mca_arr[i_bin, jj, 0:this_n_channels] = scan.mca_arr[i_bin, jj - 1, 0:this_n_channels]
elt1_arr[i_bin, jj] = elt1_arr[i_bin, jj - 1]
else:
scan.mca_arr[i_bin, :, 0:this_n_channels] = scan.mca_arr[i_bin - 1, :, 0:this_n_channels]
elt1_arr[i_bin, :] = elt1_arr[i_bin - 1, :]
if (xrf_bin == 3) and (n_cols > 5) and (n_rows > 5) :
current_line = np.zeros((self.main_dict['max_spec_channels'], n_rows))
previous_line = np.zeros((self.main_dict['max_spec_channels'], n_rows))
next_line = np.zeros((self.main_dict['max_spec_channels'], n_rows))
current_elt_line = np.zeros((elt1_arr.size))
next_elt_line = np.zeros((elt1_arr.size))
this_mca_arr_dimensions = scan.mca_arr.shape
this_n_channels = min(this_mca_arr_dimensions[2], self.main_dict['max_spec_channels'])
for i_bin in range(n_cols):
if i_bin > 1 :
previous_elt_line = current_elt_line.copy()
current_elt_line = next_elt_line.copy()
previous_line = current_line.copy()
current_line = next_line.copy()
else:
for jj in range(n_rows-1) :
current_line[0:this_n_channels, jj] = scan.mca_arr[i_bin, jj, 0:this_n_channels]
current_elt_line = elt1_arr[i_bin, :]
previous_line = current_line
previous_elt_line = current_elt_line
if i_bin < n_cols-1 :
for jj in range(n_rows) :
next_line[0:this_n_channels, jj] = scan.mca_arr[i_bin+1, jj, 0:this_n_channels]
next_elt_line = elt1_arr[i_bin+1, :]
if n_rows-2 > 1 :
jj = 0
scan.mca_arr[i_bin, jj, 0:this_n_channels] = previous_line[0:this_n_channels, jj] + previous_line[0:this_n_channels, jj] + previous_line[0:this_n_channels, jj+1] + \
current_line[0:this_n_channels, jj] + current_line[0:this_n_channels, jj] + current_line[0:this_n_channels, jj+1] + \
next_line[0:this_n_channels, jj] + next_line[0:this_n_channels, jj] + next_line[0:this_n_channels, jj+1]
elt1_arr[i_bin, jj] = np.sum([previous_elt_line[jj:jj+1], current_elt_line[jj:jj+1], next_elt_line[jj:jj+1],
previous_elt_line[jj], current_elt_line[jj], next_elt_line[jj]])
for jj in range(n_rows-1) :
scan.mca_arr[i_bin, jj, 0:this_n_channels] = previous_line[0:this_n_channels, jj-1] + previous_line[0:this_n_channels, jj] + previous_line[0:this_n_channels, jj+1] + \
current_line[0:this_n_channels, jj-1] + current_line[0:this_n_channels, jj] + current_line[0:this_n_channels, jj+1] + \
next_line[0:this_n_channels, jj-1] + next_line[0:this_n_channels, jj] + next_line[0:this_n_channels, jj+1]
elt1_arr[i_bin, jj] = np.sum([previous_elt_line[jj-1:jj+1], current_elt_line[jj-1:jj+1], next_elt_line[jj-1:jj+1]])
jj = n_rows-1
scan.mca_arr[i_bin, jj, 0:this_n_channels] = previous_line[0:this_n_channels, jj] + previous_line[0:this_n_channels, jj] + previous_line[0:this_n_channels, jj] + \
current_line[0:this_n_channels, jj] + current_line[0:this_n_channels, jj] + current_line[0:this_n_channels, jj] + \
next_line[0:this_n_channels, jj] + next_line[0:this_n_channels, jj] + next_line[0:this_n_channels, jj]
elt1_arr[i_bin, jj] = np.sum([previous_elt_line[jj-1:jj], current_elt_line[jj-1:jj], next_elt_line[jj-1:jj],
previous_elt_line[jj], current_elt_line[jj], next_elt_line[jj]])
if (xrf_bin == 4) and (n_cols > 5) and (n_rows > 5) :
for i_bin in range(n_cols-2) :
if i_bin % 3. == 0:
for jj in range(n_rows-2):
if jj % 3 == 0:
scan.mca_arr[i_bin, jj, 0:this_n_channels] = scan.mca_arr[i_bin, jj, 0:this_n_channels]+scan.mca_arr[i_bin+1, jj, 0:this_n_channels]+scan.mca_arr[i_bin+2, jj, 0:this_n_channels]+\
scan.mca_arr[i_bin, jj+1, 0:this_n_channels]+scan.mca_arr[i_bin+1, jj+1, 0:this_n_channels]+scan.mca_arr[i_bin+2, jj+1, 0:this_n_channels]+\
scan.mca_arr[i_bin, jj+2, 0:this_n_channels]+scan.mca_arr[i_bin+1, jj+2, 0:this_n_channels]+scan.mca_arr[i_bin+2, jj+2, 0:this_n_channels]
elt1_arr[i_bin, jj] = elt1_arr[i_bin, jj]+elt1_arr[i_bin+1, jj]+elt1_arr[i_bin+2, jj]+elt1_arr[i_bin+2, jj]+\
elt1_arr[i_bin, jj+1]+elt1_arr[i_bin+1, jj+1]+elt1_arr[i_bin, jj+1]+elt1_arr[i_bin+2, jj+1]+\
elt1_arr[i_bin, jj+2]+elt1_arr[i_bin+1, jj+2]+elt1_arr[i_bin, jj+2]+elt1_arr[i_bin+2, jj+2]
else:
if (jj+2) % 3 == 0:
scan.mca_arr[i_bin, jj, 0:this_n_channels] = scan.mca_arr[i_bin, jj-1, 0:this_n_channels]
elt1_arr[i_bin, jj] = elt1_arr[i_bin, jj-1]
if (jj+1) % 3 == 0:
scan.mca_arr[i_bin, jj, 0:this_n_channels] = scan.mca_arr[i_bin, jj-2, 0:this_n_channels]
elt1_arr[i_bin, jj] = elt1_arr[i_bin, jj-2]
else:
if (i_bin+2) % 3 == 0:
scan.mca_arr[i_bin, :, 0:this_n_channels] = scan.mca_arr[i_bin-1, :, 0:this_n_channels]
elt1_arr[i_bin, :] = elt1_arr[i_bin-1, :]
if (i_bin+1) % 3 == 0:
scan.mca_arr[i_bin, :, 0:this_n_channels] = scan.mca_arr[i_bin-2, :, 0:this_n_channels]
elt1_arr[i_bin, :] = elt1_arr[i_bin-2, :]
return elt1_arr
# ----------------------------------------------------------------------
def maps_generate_img_add_extra(self, extra_pv, thisdata):
try:
if extra_pv.name.find('saveData_fileSystem') > -1:
thisdata.add_str.a = extra_pv[0].str_value
if extra_pv.name.find('saveData_subDir') > -1:
thisdata.add_str.b = extra_pv[0].str_value
if extra_pv.name.find('saveData_scanNumber') > -1:
thisdata.add_str.c = extra_pv[0].str_value
if extra_pv.name.find('userStringCalc10.AA') > -1:
thisdata.add_str.d = extra_pv[0].str_value
if extra_pv.name.find('userStringCalc10.BB') > -1:
thisdata.add_str.e = extra_pv[0].str_value
if extra_pv.name.find('userStringCalc10.CC') > -1:
thisdata.add_str.f = extra_pv[0].str_value
if extra_pv.name.find('userStringCalc10.DD') > -1:
thisdata.add_str.g = extra_pv[0].str_value
if extra_pv.name.find('userStringCalc10.EE') > -1:
thisdata.add_str.h = extra_pv[0].str_value
except:
pass
for i in range( min(len(extra_pv.name), 100) ):
thisdata.extra_str_arr[i] = extra_pv[i].name + '; ' + extra_pv[i].str_value
return
# ----------------------------------------------------------------------
def maps_core_generate_fly_dat(self, header, mdafilename, this_detector, total_number_detectors):
n_ev, n_rows, n_cols, n_energy, energy, energy_spec, scan_time_stamp, dataset_orig = self.change_xrf_resetvars()
mda_scan = maps_mda.mda(self.logger)
scan = mda_scan.read_scan(mdafilename, threeD_only=2)
if scan == None or scan.detector_description_arr == None:
self.logger.info('Not a valid mda flyscan file, scan == None . returning: %s', mdafilename)
return
if len(scan.detector_description_arr) < 3:
self.logger.info('Not a valid mda flyscan file, returning: %s', mdafilename)
return
new_detector_description_arr = ['us_ic', 'ds_ic', 'sum_mcs', 'H_dpc', 'V_dpc', 'dia1_dpc', 'dia2_dpc', 'sca1',
'sca2', 'sca3', 'TFY-ICR', 'T', 'norm_H_dpc', 'norm_V_dpc', 'phase', 'abs']
temp_a_arr = ['2xfm:mcs:mca10.VAL', '2xfm:mcs:mca11.VAL', '2xfm:mcs:mca12.VAL', '2xfm:mcs:mca13.VAL',
'2xfm:mcs:mca14.VAL', '2xfm:mcs:mca15.VAL', '2xfm:mcs:mca16.VAL', '2xfm:mcs:mca17.VAL',
'2xfm:mcs:mca18.VAL', '2xfm:mcs:mca19.VAL', '2xfm:mcs:mca2.VAL', '2xfm:mcs:mca3.VAL',
'2xfm:mcs:mca1.VAL', '2xfm:mcs:mca26.VAL', '2xfm:mcs:mca27.VAL', '2xfm:mcs:mca28.VAL',
'2xfm:mcs:mca29.VAL']
temp_b_arr = ['SD_1', 'SD_2', 'SD_3', 'SD_4', 'SD_5', 'SD_6', 'SD_7', 'SD_8', 'SD_9', 'SD_10',
'us_ic', 'ds_ic', 'T', 'sca1_det0', 'sca2_det0', 'sca3_det0', 'tfy_det0']
b_pos = np.zeros(len(temp_a_arr))
n_gui_chan = len(new_detector_description_arr)
dataset_orig = np.ndarray((scan.x_pixels, scan.y_pixels, n_gui_chan), dtype=float)
dataset_orig.fill(1.0)
try:
time_idx = scan.detector_description_arr.index('2xfm:mcs:mca1.VAL')
except:
time_idx = -1
if time_idx == -1:
try:
time_idx = scan.detector_description_arr.index('8bmb:3820:mca1.VAL')
except:
time_idx = -1
temp_a_arr = ['8bmb:3820:mca10.VAL', '8bmb:3820:mca11.VAL', '8bmb:3820:mca12.VAL', '8bmb:3820:mca13.VAL',
'8bmb:3820:mca14.VAL', '8bmb:3820:mca15.VAL', '8bmb:3820:mca16.VAL', '8bmb:3820:mca17.VAL',
'8bmb:3820:mca18.VAL', '8bmb:3820:mca19.VAL', '8bmb:3820:mca2.VAL', '8bmb:3820:mca3.VAL',
'8bmb:3820:mca1.VAL', '8bmb:3820:mca26.VAL', '8bmb:3820:mca27.VAL', '8bmb:3820:mca28.VAL',
'8bmb:3820:mca29.VAL']
if time_idx == -1:
self.logger.error('an error occured, cannot read dwell time in fly scan; returning')
n_ev, n_rows, n_cols, n_energy, energy, energy_spec, scan_time_stamp, dataset_orig = self.change_xrf_resetvars()
return
time = scan.detector_arr[:, :, time_idx]
time_norm = time / np.mean(time)
for n in range(len(temp_a_arr)):
try:
b_pos[n] = scan.detector_description_arr.index(temp_a_arr[n])
scan.detector_arr[:, :, b_pos[n]] = scan.detector_arr[:, :, b_pos[n]] / time[:, :] # time_norm[*, *]
# dataset_orig(*, *, n) = (detector_arr(*, *, idx[0])-32768.)/3276.8
except:
self.logger.debug('wo = -1; @ %s temp_a_arr[n] = %s', n, temp_a_arr[n])
dataset_orig[:, :, 11] = time[:, :] / 25000
# sum_mcs
dataset_orig[:, :, 2] = scan.detector_arr[:, :, b_pos[1]] + scan.detector_arr[:, :, b_pos[2]] + scan.detector_arr[:, :, b_pos[3]] + scan.detector_arr[:, :, b_pos[4]]
# H_dpc
dataset_orig[:, :, 3] = scan.detector_arr[:, :, b_pos[1]] - scan.detector_arr[:, :, b_pos[2]] - scan.detector_arr[:, :, b_pos[3]] + scan.detector_arr[:, :, b_pos[4]]
dataset_orig[:, :, 3] = dataset_orig[:, :, 3] / dataset_orig[:, :, 2]
# V_dpc
dataset_orig[:, :, 4] = scan.detector_arr[:, :, b_pos[1]] + scan.detector_arr[:, :, b_pos[2]] - scan.detector_arr[:, :, b_pos[3]] - scan.detector_arr[:, :, b_pos[4]]
dataset_orig[:, :, 4] = dataset_orig[:, :, 3] / dataset_orig[:, :, 2]
# dia1_dpc
dataset_orig[:, :, 5] = scan.detector_arr[:, :, b_pos[1]] - scan.detector_arr[:, :, b_pos[3]]
dataset_orig[:, :, 5] = dataset_orig[:, :, 5] / dataset_orig[:, :, 2]
# dia2_dpc
dataset_orig[:, :, 6] = scan.detector_arr[:, :, b_pos[2]] - scan.detector_arr[:, :, b_pos[4]]
dataset_orig[:, :, 6] = dataset_orig[:, :, 6] / dataset_orig[:, :, 2]
# us,ds_ic
dataset_orig[:, :, 0] = scan.detector_arr[:, :, b_pos[10]]
dataset_orig[:, :, 1] = scan.detector_arr[:, :, b_pos[11]]
dataset_orig[:, :, 15] = dataset_orig[:, :, 1] / dataset_orig[:, :, 0]
# scas
dataset_orig[:, :, 7] = scan.detector_arr[:, :, b_pos[13]]
dataset_orig[:, :, 8] = scan.detector_arr[:, :, b_pos[14]]
dataset_orig[:, :, 9] = scan.detector_arr[:, :, b_pos[15]]
dataset_orig[:, :, 10] = scan.detector_arr[:, :, b_pos[16]]
nrml = dataset_orig[:, :, 3]
sz = nrml.shape
nx = sz[0]
ny = sz[1]
if (nx % 2) == 0:
nx -= 1
if (ny % 2) == 0:
ny -= 1
nrml = dataset_orig[2:nx-3, 0:ny-1, 3]
ntmb = dataset_orig[2:nx-3, 0:ny-1, 4]
if self.maps_conf.dmaps[this_detector].name == 'phase':
no_int = 1
else:
no_int = 0
anl = maps_analyze.analyze(self.logger)
nrml, ntmb, rdt = anl.maps_simple_dpc_integration(nrml, ntmb, no_int=no_int)
# notem nrml, ntmb, now normalized (what comes up ust go down)
# first pad dataset with mean value, so that non assigned pixels are not
# zero (for scaling purposes)
dataset_orig[:, :, 12] = np.mean(nrml)
dataset_orig[2:nx - 3, 0:ny - 1, 12] = nrml
dataset_orig[:, :, 13] = np.mean(ntmb)
dataset_orig[2:nx - 3, 0:ny - 1, 13] = ntmb
dataset_orig[:, :, 14] = np.mean(rdt)
dataset_orig[2:nx - 3, 0:ny - 1, 14] = rdt
scan.detector_description_arr = new_detector_description_arr
no_detectors = 1
img_type = 0
n_channels = 2048
n_energy = 2048
XRFmaps_info = self.maps_def.define_xrfmaps_info(scan.x_pixels, scan.y_pixels, 3, n_channels, n_energy, no_detectors, self.maps_conf.n_used_chan, self.maps_conf.n_used_dmaps, self.maps_conf)
XRFmaps_info.img_type = img_type
if scan.extra_pv:
self.maps_generate_img_add_extra(scan.extra_pv, XRFmaps_info)
XRFmaps_info.dataset_names[:] = ['fly', 'dummy', 'dummy']
XRFmaps_info.scan_time_stamp = scan.scan_time_stamp
XRFmaps_info.write_date = datetime.datetime.ctime(datetime.datetime.now())
XRFmaps_info.x_coord_arr = scan.x_coord_arr
XRFmaps_info.y_coord_arr = scan.y_coord_arr
for j in range(len(new_detector_description_arr)):
XRFmaps_info.dmaps_set[:, :, j] = dataset_orig[:, :, j]
XRFmaps_info.dmaps_names[:] = new_detector_description_arr[:]
XRFmaps_info.dmaps_units = ['raw']
XRFmaps_info.chan_names = ['dummy'] # make_maps_conf.chan[wo].name
XRFmaps_info.version = 5L
XRFmaps_info.us_amp.fill(0.) # us_amp
XRFmaps_info.ds_amp.fill(0.) # ds_amp
XRFmaps_info.n_energy = n_energy
dataset_size = len(XRFmaps_info.dataset_names)
filename = os.path.join(self.main_dict['fly_dat_dir'], header + '.fly.dat')
data_file = open_file_with_retry(str(filename), 'wb')
if data_file == None:
self.logger.error('Error: (maps_core_generate_fly_dat) failed to open file to write to : %s', filename)
return None
data_file.write(struct.pack('>iiiii', 0, XRFmaps_info.version, n_channels, scan.x_pixels, scan.y_pixels))
data_file.write(struct.pack('>iiii', len(energy), no_detectors, dataset_size, self.maps_conf.n_used_chan))
data_file.write(struct.pack('>i', self.maps_conf.n_used_dmaps))
XRFmaps_info.dump(data_file)
data_file.close()
# ----------------------------------------------------------------------
def read_and_parse_scan(self, header, mdafilename, beamline, this_detector, total_number_detectors):
self.logger.info('beamline: %s', beamline)
if beamline == 'Bio-CAT':
self.logger.warning('cannot read biocat scans')
return None
if beamline == 'GSE-CARS':
self.logger.warning('cannot read GSE-CARS scans')
return None
if (beamline == '2-ID-E') or (beamline == '2-ID-D') or (beamline == '2-ID-B') or (beamline == '2-BM') or (beamline == 'Bionanoprobe'):
# read scan info
mda = maps_mda.mda(self.logger)
info = mda.read_scan_info(mdafilename)
if info == None:
self.logger.warning('Warning: skipping file : %s, due to maps_scan_info error', mdafilename)
return None
if np.amin(info.spectrum) == -1:
self.logger.warning('Warning: skipping file : %s, due to maps_scan_info error', mdafilename)
return None
if (info.rank == 2) and (info.spectrum[info.rank - 1] == 1):
self.xanes = 1
else:
self.xanes = 0
if self.verbose == 1:
self.logger.debug('info.rank : %s', info.rank)
self.logger.debug('info.dims : %s', info.dims)
self.logger.debug('info.spectrum: %s', info.spectrum)
self.logger.debug('xanes : %s', self.xanes)
test_textfile = 0
combined_file_info = os.path.join(self.main_dict['master_dir'], 'lookup', header + '.txt')
if os.path.isfile(combined_file_info):
test_textfile = 1
self.logger.debug('testing test_textfile %s', test_textfile)
test_netcdf = FLYSACN_TYPE_NONE
file_path = os.path.join(self.main_dict['master_dir'], os.path.join('flyXRF.h5', header))
hdf_files = glob.glob(file_path + '*.h5')
num_files_found = len(hdf_files)
if num_files_found != 1:
if num_files_found > 1:
self.logger.error('Error: too many files found, %s', hdf_files)
else:
self.logger.info('Could not find hdf5 file associated with mda file: %s', mdafilename)
else:
test_netcdf = FLYSCAN_TYPE_HDF
if test_netcdf == FLYSACN_TYPE_NONE:
self.logger.info('testing presence of converted flyscans %s', test_netcdf)
file_path = os.path.join(self.main_dict['master_dir'], os.path.join('flyXRF', header))
nc_files = glob.glob(file_path + '*.nc')
num_files_found = len(nc_files)
if num_files_found < 1:
self.logger.info('Could not find netcdf files associated with mda: %s', mdafilename)
else:
test_netcdf = FLYSCAN_TYPE_NETCDF
if test_netcdf == FLYSACN_TYPE_NONE:
# bnp_fly_18_001.nc
try:
scan_number = header[-4:]
scan_number = int(scan_number)
bnpfly_header = ''.join('bnp_fly_' + str(scan_number))
if os.path.isfile(os.path.join(self.main_dict['master_dir'], 'flyXRF', bnpfly_header + '_001.nc')):
test_netCDF = FLYSCAN_TYPE_BIONANOPROBE
# use test_netCDF = 3 to indicate this is a fly scan from bionanoprobe
except:
self.logger.error('This is not a scan file.')
if (info.rank == 2) and (np.sum(info.spectrum) == 0 and (test_textfile == 0) and (test_netcdf == FLYSACN_TYPE_NONE)):
self.logger.info('This is a fly scan, without XRF.')
self.maps_core_generate_fly_dat(header, mdafilename, this_detector, total_number_detectors)
return None
elif (info.rank == 2) and (np.sum(info.spectrum) == 0 and (test_textfile > 0)):
# this is a fly scan, but i found a text file, which should contain
# the filename of the XRF file
self.logger.warning( 'This scan has the combined file with info stored in a text file which is not yet supported - returning.')
return None
elif (info.rank == 2) and (np.sum(info.spectrum) == 0 and (test_netcdf == FLYSCAN_TYPE_2ID_NETCDF or test_netcdf == FLYSCAN_TYPE_NETCDF)):
# this is a fly scan, but i found a netcdf file with matching
# name. this should be a fly scna with XRF
self.logger.info('trying to do the combined file')
nc = maps_nc.nc(self.logger)
scan = nc.read_combined_nc_scans(mdafilename, self.main_dict['master_dir'], header, this_detector, extra_pvs=True)
self.logger.info('Finished reading combined nc scan')
self.netcdf_fly_scan = 1
elif (info.rank == 2) and (np.sum(info.spectrum) == 0 and (test_netcdf == FLYSCAN_TYPE_8BM_NETCDF)):
# this is a fly scan, but i found a netcdf file with matching
# name. this should be a fly scan with XRF from 8bm
self.logger.warning('This scan with XRF from 8bm which is not yet supported - returning.')
return None
elif (info.rank == 2) and (np.sum(info.spectrum) == 0 and (test_netcdf == FLYSCAN_TYPE_BIONANOPROBE)):
# this is a fly scan, but i found a netcdf file with matching
# name. this should be a fly scan with XRF from the bionanoprobe
self.logger.warning('This scan with with XRF from the bionanoprobe which is not yet supported - returning.')
return None
elif (info.rank == 2) and (np.sum(info.spectrum) == 0 and (test_netcdf == FLYSCAN_TYPE_HDF)):
# this is a fly scan, but i found a netcdf file with matching
# name. this should be a fly scan with XRF
self.logger.warning('trying to do the combined file')
scan = mda.read_combined_flyscan(self.main_dict['master_dir'], mdafilename, this_detector)
self.netcdf_fly_scan = 1
elif self.xanes == 1:
self.logger.warning('xanes scans not supported - returning')
return None
else:
# Read mda scan:
self.logger.info('Reading scan from %s', mdafilename)
try:
scan = mda.read_scan(mdafilename, extra_pvs=True)
except:
scan = None
self.logger.info('Finished reading scan from %s', mdafilename)
if beamline == 'DLS-I08':
self.logger.info('beamline: %s', beamline)
self.logger.info('reading DLS-I08 scan from /img.dat/*.h5')
filenameh5 = os.path.basename(str(mdafilename))
h5filename = os.path.join(os.path.join(self.main_dict['master_dir'], 'img.dat'), filenameh5)
self.logger.debug('filename= %s', h5filename)
h5 = maps_hdf5.h5(self.logger)
scan = h5.read_scan(h5filename)
self.save_h5 = 0
self.xanes = 0
return scan
# ----------------------------------------------------------------------
def generate_img_dat_threaded(self, header, mdafilename, this_detector, total_number_detectors, quick_dirty, nnls,
no_processors_to_use, xrf_bin, xrf_bin_ext=''):
info_elements = self.info_elements
beamline = self.beamline
make_maps_conf = self.maps_conf
self.save_h5 = 1
self.xanes = 0
# assume by default this is not a fly scan based on netcdf
self.netcdf_fly_scan = 0
suffix = ''
xrfflyscan = 0
maps_overridefile = os.path.join(self.main_dict['master_dir'], 'maps_fit_parameters_override.txt')
maps_intermediate_solution_file = 'maps_intermediate_solution.tmp'
xmin = 0L
xmax = 0L
nnls = 0L
#Look for override files in main.master_dir
if total_number_detectors > 1:
overide_files_found = 0
suffix = str(this_detector)
self.logger.debug('suff= %s', suffix)
maps_overridefile = os.path.join(self.main_dict['master_dir'], 'maps_fit_parameters_override.txt') + suffix
try:
f = open(maps_overridefile, 'rt')
self.logger.debug('maps override file %s exists',maps_overridefile)
f.close()
except :
# if i cannot find an override file specific per detector, assuming
# there is a single overall file.
maps_overridefile = os.path.join(self.main_dict['master_dir'], 'maps_fit_parameters_override.txt')
if xrf_bin > 0:
xrf_bin_ext = '.avg3'
self.version = make_maps_conf.version
extra_pv = 0
scan = self.read_and_parse_scan(header, mdafilename, beamline, this_detector, total_number_detectors)
if scan == None:
self.logger.error("Could not read and parse scan " + mdafilename)
return
extra_pv = scan.extra_pv
# Get scan date
scan_date = datetime.date(0001, 01, 01)
month_list = ['jan', 'feb', 'mar', 'apr', 'mai', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
if scan.scan_time_stamp != '':
monthstr = scan.scan_time_stamp[0:3].lower()
month = month_list.index(monthstr) + 1
day = int(scan.scan_time_stamp[4:6])
year = int(scan.scan_time_stamp[8:12])
scan_date = datetime.date(year, month, day)
# Read in detector calibration
maps_detector.get_detector_calibration(make_maps_conf, beamline, info_elements, scan, maps_overridefile, self.logger)
amp = np.zeros((8, 3), dtype=np.float)
if scan.mca_calib_description_arr:
for i in range(8):
mca_calib = [self.crate, ':A', str(i + 1), 'sens_num.VAL']
mca_calib = string.join(mca_calib, '')
if mca_calib in scan.mca_calib_description_arr:
amp[i, 0] = float(scan.mca_calib_arr[scan.mca_calib_description_arr.index(mca_calib)])
mca_calib = [self.crate, ':A', str(i + 1), 'sens_unit.VAL']
mca_calib = string.join(mca_calib, '')
if mca_calib in scan.mca_calib_description_arr:
amp[i, 1] = float(scan.mca_calib_arr[scan.mca_calib_description_arr.index(mca_calib)])
# if all amp values are 0, it is likely, amps were not found. then try this
# could not find amplifier sensitivity in data file. will try to look up
# in a user editable file
if amp.sum() == 0.0:
try:
f = open_file_with_retry(maps_overridefile, 'rt')
for line in f:
if ':' in line :
slist = line.split(':')
tag = slist[0]
value = ''.join(slist[1:])
if tag == 'US_AMP_SENS_NUM':
amp[0, 0] = value
elif tag == 'US_AMP_SENS_UNIT':
amp[0, 1] = value
elif tag == 'DS_AMP_SENS_NUM':
amp[1, 0] = value
elif tag == 'DS_AMP_SENS_UNIT':
amp[1, 1] = value
f.close()
if beamline == '2-ID-D':
amp[3, :] = amp[1, :]
amp[1, :] = amp[0, :]
except:
self.logger.warning('Warning: amp[] is 0 - could not read override file')
for i in range(8):
if amp[i, 0] == 0.0: amp[i, 2] = 1.
if amp[i, 0] == 1.0: amp[i, 2] = 2.
if amp[i, 0] == 2.0: amp[i, 2] = 5.
if amp[i, 0] == 3.0: amp[i, 2] = 10.
if amp[i, 0] == 4.0: amp[i, 2] = 20.
if amp[i, 0] == 5.0: amp[i, 2] = 50.
if amp[i, 0] == 6.0: amp[i, 2] = 100.
if amp[i, 0] == 7.0: amp[i, 2] = 200.
if amp[i, 0] == 8.0: amp[i, 2] = 500.
if amp[i, 1] == 0.0: amp[i, 2] = amp[i, 2] / 1000.0 # pA/V
if amp[i, 1] == 1.0: amp[i, 2] = amp[i, 2] # nA/V
if amp[i, 1] == 2.0: amp[i, 2] = amp[i, 2] * 1000.0 #uA/V
if amp[i, 1] == 3.0: amp[i, 2] = amp[i, 2] * 1000.0 * 1000.0 #mA/V
us_amp = np.zeros(3)
ds_amp = np.zeros(3)
if beamline == '2-ID-D':
us_amp[:] = amp[1, :]
ds_amp[:] = amp[3, :]
if beamline == '2-ID-E':
us_amp[:] = amp[0, :]
ds_amp[:] = amp[1, :]
if beamline == 'Bio-CAT':
us_amp[:] = amp[0, :]
ds_amp[:] = amp[1, :]
try:
if scan.mca_arr.size < 1:
self.logger.warning( 'Skipping file : %s, was not able to read a valid 3D array', scan.scan_name)
return
except:
self.logger.warning('skipping file : %s, was not able to read a valid 3D array', scan.scan_name)
return
if scan.mca_arr.sum() == 0:
self.logger.warning('skipping file : %s, contains 3D array with all elements EQ 0', scan.scan_name)
return
mca_arr_dimensions = scan.mca_arr.shape
n_cols = mca_arr_dimensions[0]
n_rows = mca_arr_dimensions[1]
n_channels = 2048
n_mca_channels = mca_arr_dimensions[2]
if total_number_detectors > 1 or (total_number_detectors == 1 and len(scan.mca_arr.shape) == 4):
if (self.netcdf_fly_scan != 1) and (beamline != 'GSE-CARS') and (beamline != 'Bio-CAT'):
# if it is a fly scan, ie netcdf_fly_scan is 1, the even a multi
# element detector scan is read in as a single element
old_mca_arr = scan.mca_arr.copy()
scan.mca_arr = np.zeros((n_cols, n_rows, n_mca_channels))
scan.mca_arr[:, :, :] = old_mca_arr[:, :, :, this_detector]
del old_mca_arr
mca_arr_dimensions = scan.mca_arr.shape
# IF quick_dirty is set, just sum up all detector elements and treat
# them as a single detector, to speed initial analysis up
if quick_dirty > 0:
if self.netcdf_fly_scan != 1:
# if it is a fly scan, ie netcdf_fly_scan is 1, the even a multi
# element detector scan is read in as a single element
scan_shape = scan.mca_arr.shape
old_mca_arr = scan.mca_arr.copy()
scan.mca_arr = np.zeros((n_cols, n_rows, scan_shape[2]))
old_mca_no_dets = old_mca_arr.shape
if len(old_mca_no_dets) == 4 :
old_mca_no_dets = old_mca_no_dets[3]
for ii in range(old_mca_no_dets):
scan.mca_arr[:, :, :] = scan.mca_arr[:, :, :] + old_mca_arr[:, :, :, ii]
del old_mca_arr
mca_arr_dimensions = scan.mca_arr.shape
#dataset = np.zeros((n_cols, n_rows, make_maps_conf.n_used_chan, 3))
# These are n_mca_channels - why n < 20?
if len(mca_arr_dimensions) == 4:
for n in range(mca_arr_dimensions[3]):
if n < 20 :
if make_maps_conf.use_det[n] == 0:
scan.mca_arr[:, :, :, n] = 0
if len(mca_arr_dimensions) == 4:
no_detectors = mca_arr_dimensions[3]
else:
no_detectors = 1
h5 = maps_hdf5.h5(self.logger)
if self.save_h5 == 1:
# Save full spectra to HDF5 file
h5file = os.path.join(self.main_dict['img_dat_dir'], header + xrf_bin_ext + '.h5' + suffix)
self.logger.info('now trying to write the mca spectra into the HDF5 file %s', h5file)
h5.write_mca_hdf5(h5file, scan.mca_arr)
max_chan_spec = np.zeros((n_channels, 5))
if no_detectors > 1:
for kk in range(n_mca_channels):
temp = scan.mca_arr[:, :, kk, :].flatten()
sortind = temp.argsort()
sortind = sortind[::-1]
max_chan_spec[kk, 1] = np.sum(temp[sortind[0:np.amin([11, sortind.size])]])
max_chan_spec[kk, 0] = np.amax(temp)
del temp
else:
for kk in range(n_mca_channels):
temp = scan.mca_arr[:, :, kk].flatten()
sortind = temp.argsort()
sortind = sortind[::-1]
max_chan_spec[kk, 1] = np.sum(temp[sortind[0:np.amin([11, sortind.size])]])
max_chan_spec[kk, 0] = np.amax(temp)
del temp
temp = 0
raw_spec = scan.mca_arr.sum(axis=0)
raw_spec = raw_spec.sum(axis=0)
if no_detectors > 1:
spec_all = raw_spec.sum(axis=1)
else:
spec_all = raw_spec
if no_detectors > make_maps_conf.use_det.sum():
no_detectors = make_maps_conf.use_det.sum()
dataset_size = 3
thisdata = self.maps_def.define_xrfmaps_info(scan.x_pixels, scan.y_pixels, dataset_size,
n_channels, n_channels, no_detectors,
make_maps_conf.n_used_chan,
make_maps_conf.n_used_dmaps,
make_maps_conf,
version = 9)
if scan.extra_pv:
self.maps_def.xrfmaps_add_extra(scan.extra_pv, extra_pv_order = scan.extra_pv_key_list)
wo = np.where(make_maps_conf.use_det == 1)
self.logger.debug('wo len %s raw_spec len %s no_detectors %s', len(wo), len(thisdata.raw_spec), no_detectors)
for ii in range(no_detectors-1):
thisdata.raw_spec[0:len(spec_all), ii] = raw_spec[:, wo[ii]]
if beamline == '2-BM':
det_descr = ['srcurrent', 'us_ic', 'ds_ic', 'ELT1', 'ERT1']
if beamline =='2-ID-B':
det_descr = ['srcurrent', 'us_ic', 'ds_ic', 'dpc1_ic', 'dpc2_ic',
'cfg_1', 'cfg_2', 'cfg_3', 'cfg_4', 'cfg_5', 'cfg_6', 'cfg_7', 'cfg_8',
'cfg_9', 'ELT1', 'ERT1']
if beamline == '2-ID-D':
det_descr = ['srcurrent', 'us_ic', 'ds_ic', 'ELT1', 'ERT1']
if scan_date > datetime.date(2009, 9, 01) :
det_descr = ['srcurrent', 'us_ic', 'ds_ic', 'ELT1', 'ERT1', 'ICR1', 'OCR1']
if scan_date > datetime.date(2009, 01, 01) :
scan_date = ['srcurrent', 'us_ic', 'ds_ic',
'cfg_1', 'cfg_2', 'cfg_3', 'cfg_4', 'cfg_5', 'cfg_6', 'cfg_7', 'cfg_8',
'cfg_9', 'cfg_10', 'ELT1', 'ERT1', 'ICR1', 'OCR1']
if beamline == '2-ID-E':
det_descr = ['srcurrent', 'us_ic', 'ds_ic', 'dpc1_ic', 'dpc2_ic',
'cfg_1', 'cfg_2', 'cfg_3', 'cfg_4', 'cfg_5', 'cfg_6', 'cfg_7', 'cfg_8',
'ELT1', 'ERT1', 'ELT2', 'ERT2', 'ELT3', 'ERT3']
if scan_date > datetime.date(2007, 9, 01):
det_descr = ['srcurrent', 'us_ic', 'ds_ic', 'dpc1_ic', 'dpc2_ic',
'cfg_1', 'cfg_2', 'cfg_3', 'cfg_4', 'cfg_5', 'cfg_6', 'cfg_7', 'cfg_8',
'cfg_9', 'ELT1', 'ERT1', 'ELT2', 'ERT2', 'ELT3', 'ERT3', 'ICR1', 'OCR1']
if beamline == 'Bio-CAT':
det_descr = ['srcurrent', 'us_ic', 'ds_ic', 'ELT1', 'ERT1']
if beamline == 'GSE-CARS':
det_descr = ['srcurrent', 'us_ic', 'ds_ic', 'ELT1', 'ERT1']
if beamline == 'Bionanoprobe':
det_descr = ['srcurrent', 'us_ic', 'ds_ic',
'cfg_1', 'cfg_2', 'cfg_3', 'cfg_4', 'ELT1', 'ERT1', 'ICR1', 'OCR1']
if (beamline == 'DLS-I08'):
det_descr = []
dmaps_set = np.zeros((n_cols, n_rows, make_maps_conf.n_used_dmaps))
# generate direct maps, such as SR current, ICs, life time in subroutine
dmaps_set = maps_detector.find_detector_name(det_descr, scan_date, scan.detector_arr, scan.detector_description_arr,
make_maps_conf, scan.x_coord_arr, scan.y_coord_arr, beamline,
n_cols, n_rows, maps_overridefile, self.logger)
dmaps_names = []
for item in make_maps_conf.dmaps:
dmaps_names.append(item.name)
# elt1_ = dmaps_set[:, :, dmaps_names.index('ELT1')]
ert1_ = dmaps_set[:, :, dmaps_names.index('ERT1')]
icr1_ = dmaps_set[:, :, dmaps_names.index('ICR1')]
ocr1_ = dmaps_set[:, :, dmaps_names.index('OCR1')]
if ert1_.sum() > 0 and icr1_.sum() > 0 and ocr1_.sum() > 0:
#dmaps_set[:, :, dmaps_names.index('ELT1')] = dmaps_set[:, :, dmaps_names.index('ERT1')] * dmaps_set[:, :, dmaps_names.index('OCR1')] / dmaps_set[:, :, dmaps_names.index('ICR1')]
elt1_ = ert1_ * ocr1_ / icr1_
dmaps_set[:, :, dmaps_names.index('ELT1')] = elt1_[:]
# # ICR = Input Counts/Trigger Filter Livetime OCR = Output Counts / Real Time Energy Filter Livetime = Real Time * OCR/ICR.
elt1_arr = []
if 'ELT1' in dmaps_names:
elt1_arr = dmaps_set[:, :, dmaps_names.index('ELT1')]
elt1_arr = np.array(elt1_arr)
if np.sum(elt1_arr) == 0.0:
self.logger.warning('WARNING: did not find elapsed life time. Will continue assuming ELT1 was 1s, but this is just an ARBITRARY value')
elt1_arr[:, :] = 1.
elt2_arr = []
if 'ELT2' in dmaps_names:
elt2_arr = dmaps_set[:, :, dmaps_names.index('ELT2')]
elt2_arr = np.array(elt2_arr)
if np.sum(elt2_arr) == 0.0:
self.logger.warning('WARNING: did not find elapsed life time. Will continue assuming ELT2 was 1s, but this is just an ARBITRARY value')
elt2_arr[:, :] = 1.
elt3_arr = []
if 'ELT3' in dmaps_names:
elt3_arr = dmaps_set[:, :, dmaps_names.index('ELT3')]
elt3_arr = np.array(elt3_arr)
if np.sum(elt3_arr) == 0.0:
self.logger.warning('WARNING: did not find elapsed life time. Will continue assuming ELT3 was 1s, but this is just an ARBITRARY value')
elt3_arr[:, :] = 1.
# Bin the data if required
if xrf_bin > 0:
elt1_arr = self.__binning__(scan, xrf_bin, n_cols, n_rows, mca_arr_dimensions, elt1_arr)
if 'ELT2' in dmaps_names:
elt2_arr = dmaps_set[:, :, dmaps_names.index('ELT2')]
if 'ELT3' in dmaps_names:
elt3_arr = dmaps_set[:, :, dmaps_names.index('ELT3')]
# calculate elemental maps using XRF
temp_elementsuse = []
for item in make_maps_conf.chan:
temp_elementsuse.append(item.use)
elements_to_use = np.where(np.array(temp_elementsuse) == 1)
elements_to_use = elements_to_use[0]
if elements_to_use.size == 0:
return
spectra = self.maps_def.define_spectra(self.max_spec_channels, self.main_max_spectra, self.max_ICs, mode='plot_spec')
fp = maps_fit_parameters.maps_fit_parameters(self.logger)
fitp = fp.define_fitp(beamline, info_elements)
text = ' spec_name, inner_coord, outer_coord, '
for i in range(fitp.g.n_fitp):
text = text + str(fitp.s.name[i]) + ', '
text = text + ' live_time, ' + ' total_counts, ' + ' status, ' + ' niter, ' + \
' total_perror, ' + ' abs_error, ' + ' relative_error, ' + 'fit_time_per_pix, ' + \
' srcurrent, ' + ' us_ic, ' + ' ds_ic, '
for i in range(np.amin(fitp.keywords.kele_pos), np.amax(fitp.keywords.mele_pos)):
text = text + 'perror_' + str(fitp.s.name[i]) + ', '
roi_start = datetime.datetime.now()
# below is the routine for straight ROI mapping
for jj in range(len(elements_to_use)):
counts = 0.
for kk in range(no_detectors):
if kk == 0 : elt_arr = elt1_arr
if kk == 1 : elt_arr = elt2_arr
if kk == 2 : elt_arr = elt3_arr
wo = elements_to_use[jj]
# note: center position for peaks/rois is in keV, widths of ROIs is in eV
left_roi = int(((make_maps_conf.chan[wo].center-make_maps_conf.chan[wo].width/2./1000.) - make_maps_conf.calibration.offset[kk])/make_maps_conf.calibration.slope[kk])
right_roi = int(((make_maps_conf.chan[wo].center+make_maps_conf.chan[wo].width/2./1000.) - make_maps_conf.calibration.offset[kk])/make_maps_conf.calibration.slope[kk])
if right_roi >= n_mca_channels:
right_roi = n_mca_channels - 2
if left_roi > right_roi:
left_roi = right_roi - 1
if left_roi < 0:
left_roi = 1
if right_roi < 0:
right_roi = n_mca_channels - 2
make_maps_conf.chan[wo].left_roi[kk] = left_roi
make_maps_conf.chan[wo].right_roi[kk] = right_roi
roi_width = right_roi - left_roi + 1
if no_detectors == 1:
if (roi_width < 200) or (n_cols < 50) or (n_rows < 50) :
# note: when looking for tfy, or other elements with a large
# number of channels (big difference between left and right
# roi, the line below can gobble up a lot of memory (eg, a
# total of 1/3 of content.
these_counts = scan.mca_arr[:, :, left_roi:right_roi + 1]
else:
these_counts = scan.mca_arr[:, :, left_roi]
for ii_temp in range(left_roi + 1, right_roi + 1):
these_counts = these_counts + scan.mca_arr[:, :, ii_temp]
else:
if (roi_width < 200) or (n_cols < 50) or (n_rows < 50) :
# note: when looking for tfy, or other elements with a large
# number of channels (big difference between left and right
# roi, the line below can gobble up a lot of memory (eg, a
# total of 1/3 of content.
these_counts = scan.mca_arr[:, :, left_roi:right_roi + 1, kk]
else:
these_counts = scan.mca_arr[:, :, left_roi, kk]
for ii_temp in range(left_roi + 1, right_roi + 1):
these_counts = these_counts + scan.mca_arr[:, :, ii_temp, kk]
if len(these_counts.shape) >= 3:
these_counts = these_counts.sum(axis=2)
these_counts = these_counts / elt_arr
counts = counts + these_counts
thisdata.dataset_orig[:, :, jj, 0] = counts
roi_end = datetime.datetime.now()
total_roi_time = roi_end - roi_start
total_time_str = '\n\n %%%%%%%% roi total time = '+str(total_roi_time.total_seconds())+' %%%%%%% \n\n'
self.logger.info(total_time_str)
# below is the routine for using matrix math to calculate elemental
# content with overlap removal
self.logger.info('now using matrix math for analysis')
kk = 0
#element_pos = [fitp.keywords.kele_pos, fitp.keywords.lele_pos, fitp.keywords.mele_pos]
det = kk
pileup_string = ''
test_string = ''
fitp, test_string, pileup_string = fp.read_fitp(maps_overridefile, info_elements, det)
for jj in range(fitp.g.n_fitp) :
if fitp.s.name[jj] in test_string :
fitp.s.val[jj] = 1.
fitp.s.use[jj] = 5
else:
fitp.s.use[jj] = 1
n_pars = fitp.g.n_fitp
parinfo_value = np.zeros((n_pars))
parinfo_fixed = np.zeros((n_pars), dtype=np.int)
parinfo_limited = np.zeros((n_pars, 2), dtype=np.int)
parinfo_limits = np.zeros((n_pars, 2))
parinfo_relstep = np.zeros((n_pars))
parinfo_mpmaxstep = np.zeros((n_pars))
parinfo_mpminstep = np.zeros((n_pars))
for i in range(n_pars) :
parinfo_value[i] = float(fitp.s.val[i])
if fitp.s.use[i] == 1 :
parinfo_fixed[i] = 1
else:
parinfo_fixed[i] = 0
wo = np.where(fitp.keywords.peaks == i)
if wo[0].size > 0 :
parinfo_value[i] = np.log10(fitp.s.val[i])
thisdata.dataset_orig[:, :, 0, 2] = 0.
x = np.arange(float(n_channels))
parinfo_prime_val = parinfo_value[np.amin(fitp.keywords.kele_pos):np.amax(fitp.keywords.mele_pos)+1]
parinfo_prime_val = np.concatenate((parinfo_prime_val, [parinfo_value[fitp.keywords.coherent_pos[1]], parinfo_value[fitp.keywords.compton_pos[2]]],
parinfo_value[fitp.keywords.added_params[4:13]], parinfo_value[fitp.keywords.added_params[1:4]]), axis=0)
parinfo_prime_fixed = parinfo_fixed[np.amin(fitp.keywords.kele_pos):np.amax(fitp.keywords.mele_pos)+1]
parinfo_prime_fixed = np.concatenate((parinfo_prime_fixed, [parinfo_fixed[fitp.keywords.coherent_pos[1]], parinfo_fixed[fitp.keywords.compton_pos[2]]],
parinfo_fixed[fitp.keywords.added_params[4:13]], parinfo_fixed[fitp.keywords.added_params[1:4]]), axis=0)
parinfo_prime_limited = parinfo_limited[np.amin(fitp.keywords.kele_pos):np.amax(fitp.keywords.mele_pos)+1,:]
parinfo_prime_limited = np.concatenate((parinfo_prime_limited, [parinfo_limited[fitp.keywords.coherent_pos[1],:], parinfo_limited[fitp.keywords.compton_pos[2],:]],
parinfo_limited[fitp.keywords.added_params[4:13],:], parinfo_limited[fitp.keywords.added_params[1:4],:]), axis=0)
parinfo_prime_limits = parinfo_limits[np.amin(fitp.keywords.kele_pos):np.amax(fitp.keywords.mele_pos)+1,:]
parinfo_prime_limits = np.concatenate((parinfo_prime_limits, [parinfo_limits[fitp.keywords.coherent_pos[1],:], parinfo_limits[fitp.keywords.compton_pos[2],:]],
parinfo_limits[fitp.keywords.added_params[4:13],:], parinfo_limits[fitp.keywords.added_params[1:4],:]), axis=0)
fitp.keywords.use_this_par[:] = 0
fitp.keywords.use_this_par[np.where(parinfo_prime_fixed != 1)] = 1
# force the last three to be 0, to make sure they do NOT get fitted as peaks.
fitp.keywords.use_this_par[parinfo_prime_val.size-3:parinfo_prime_val.size] = 0
wo_use_this_par = (np.nonzero(fitp.keywords.use_this_par[0:(np.max(fitp.keywords.mele_pos)-np.min(fitp.keywords.kele_pos)+1)] == 1))[0]
no_use_pars = wo_use_this_par.size+2
sol_intermediate = np.zeros((no_use_pars, self.main_dict['max_spec_channels']))
fitmatrix_reduced = np.zeros((self.main_dict['max_spec_channels'], no_use_pars))
# Read in intermediate solution
filepath = os.path.join(self.main_dict['output_dir'], maps_intermediate_solution_file) + suffix
#saveddatafile = np.load(filepath)
saveddatafile = call_function_with_retry(np.load, 5, 0.1, 1.1, (filepath,))
if saveddatafile == None:
self.logger.error('Error opening %s', filepath)
else:
sol_intermediate = saveddatafile['sol_intermediate']
fitmatrix_reduced = saveddatafile['fitmatrix_reduced']
saveddatafile.close()
self.logger.info('elements to use as per make_maps_conf')
maps_conf_chan_elstouse_names = []
for iel in range(len(elements_to_use)):
maps_conf_chan_elstouse_names.append(make_maps_conf.chan[elements_to_use[iel]].name)
self.logger.debug('maps_conf_chan_elstouse_names %s', maps_conf_chan_elstouse_names)
temp_fitp_use = fitp.s.use[np.amin(fitp.keywords.kele_pos):np.amax(fitp.keywords.mele_pos)+1]
temp_fitp_name = fitp.s.name[np.amin(fitp.keywords.kele_pos):np.amax(fitp.keywords.mele_pos)+1]
which_elements_to_fit = (np.nonzero(temp_fitp_use != 1))[0]
self.logger.info('elements to fit as per fitp: %s', temp_fitp_name[which_elements_to_fit])
element_lookup_in_reduced = np.zeros((len(elements_to_use)), dtype=int)
element_lookup_in_reduced[:] = -1
j_temp = 0
for i_temp in range(len(elements_to_use)):
if make_maps_conf.chan[elements_to_use[i_temp]].name in fitp.s.name[min(fitp.keywords.kele_pos) + which_elements_to_fit]:
wo_temp = np.where(fitp.s.name[np.amin(fitp.keywords.kele_pos)+which_elements_to_fit] == make_maps_conf.chan[elements_to_use[i_temp]].name)
element_lookup_in_reduced[i_temp] = wo_temp[0]
if 's_i' == make_maps_conf.chan[elements_to_use[i_temp]].name:
element_lookup_in_reduced[i_temp] = len(wo_use_this_par) + 1
if 's_e' == make_maps_conf.chan[elements_to_use[i_temp]].name:
element_lookup_in_reduced[i_temp] = len(wo_use_this_par) + 0
self.logger.debug('c %s', tm.time())
if nnls == 0:
fitting_start = datetime.datetime.now()
for i_temp in range(n_cols):
for j_temp in range(n_rows):
these_counts = np.zeros((len(x)))
n_relev_channels = min(len(x), n_mca_channels)
these_counts[0:n_relev_channels] = scan.mca_arr[i_temp, j_temp, 0:n_relev_channels]
# mca_arr is the 4d array, where pixel_x, pixel_y,spectrum at
# this point (eg 2000), adetector number (legacy), typically 1
solution = np.dot(sol_intermediate[:, 0:len(x)], these_counts)
solution = solution/elt_arr[i_temp, j_temp]
for mm in range(len(elements_to_use)):
if element_lookup_in_reduced[mm] != -1:
thisdata.dataset_orig[i_temp, j_temp, mm, 2] = solution[element_lookup_in_reduced[mm]]
fitting_end = datetime.datetime.now()
total_fitting_time = fitting_end - fitting_start
total_time_str = '\n\n %%%%%%%% SVD total time = '+str(total_fitting_time.total_seconds())+' %%%%%%% \n\n'
self.logger.info(total_time_str)
if nnls > 0:
results_line = np.zeros((n_rows, len(elements_to_use)))
if (no_processors_to_use > 1):
self.logger.info('no_processors_to_use = %s', no_processors_to_use)
self.logger.info('cpu_count() = %d\n', multiprocessing.cpu_count())
self.logger.info('Creating pool with %d processes\n', no_processors_to_use)
pool = multiprocessing.Pool(no_processors_to_use)
count = n_cols
data_lines = np.zeros((self.main_dict['max_spec_channels'], n_rows, n_cols))
for i_fit in range(count):
for jj in range(n_rows):
data_lines[0:scan.mca_arr[i_fit, jj, :].size, jj, i_fit] = scan.mca_arr[i_fit, jj, :]
# Single processor version for debugging
# for i_fit in range(count):
# self.logger.debug( 'Doing line ', i_fit, ' of ', count
# results_line = maps_tools.maps_nnls_line(data_lines[:, :, i_fit], n_channels, fitmatrix_reduced, n_mca_channels,
# elements_to_use, element_lookup_in_reduced, n_rows)
results_pool = [pool.apply_async(maps_tools.maps_nnls_line, (data_lines[:, :, i_fit], n_channels, fitmatrix_reduced, n_mca_channels,
elements_to_use, element_lookup_in_reduced, n_rows)) for i_fit in range(count)]
self.logger.info('Ordered results using pool.apply_async():')
results = []
for r in results_pool:
results.append(r.get())
pool.terminate()
pool.join()
results = np.array(results)
for iline in range(count):
results_line = results[iline, :, :]
for mm in range(len(elements_to_use)):
if element_lookup_in_reduced[mm] != -1:
thisdata.dataset_orig[iline, :, mm, 2] = results_line[:, mm]
else:
data_line = np.zeros((self.main_dict['max_spec_channels'], n_rows))
count = n_cols
for i_fit in range(count):
data_line[:, :] = 0.
for jj in range(n_rows):
data_line[0:scan.mca_arr[i_fit, jj, :].size, jj] = scan.mca_arr[i_fit, jj, :]
results_line = maps_tools.maps_nnls_line(data_line, n_channels, fitmatrix_reduced, n_mca_channels,
elements_to_use, element_lookup_in_reduced, n_rows)
for mm in range(len(elements_to_use)):
if element_lookup_in_reduced[mm] != -1:
thisdata.dataset_orig[i_fit, :, mm, 2] = results_line[:, mm]
self.logger.debug('d %s', tm.time())
if ('s_a' in maps_conf_chan_elstouse_names) and \
('s_i' in maps_conf_chan_elstouse_names) and \
('s_e' in maps_conf_chan_elstouse_names) :
wo = maps_conf_chan_elstouse_names.index('s_a')
wo_i = maps_conf_chan_elstouse_names.index('s_i')
wo_e = maps_conf_chan_elstouse_names.index('s_e')
thisdata.dataset_orig[:, :, wo, 2] = thisdata.dataset_orig[:, :, wo_i, 2] + thisdata.dataset_orig[:, :, wo_e, 2]
if 'TFY' in maps_conf_chan_elstouse_names:
wo_tfy = maps_conf_chan_elstouse_names.index('TFY')
thisdata.dataset_orig[:, :, wo_tfy, 2] = thisdata.dataset_orig[:, :, wo_tfy, 0]
fitp = fp.define_fitp(beamline, info_elements)
self.logger.debug('make_maps_conf.use_fit = %s', make_maps_conf.use_fit)
# Spectrum fitting goes here if enabled
if (make_maps_conf.use_fit > 0) or (self.pca > 0):
spectral_binning = 2
if spectral_binning > 0:
t = scan.mca_arr.shape
mca_arr_dim = len(t)
# the statement below is the one that causes trouble with large arrays in IDL
if mca_arr_dim == 3:
scan.mca_arr = rebin(scan.mca_arr[:, :, 0:int(t[2] / spectral_binning)*spectral_binning], (t[0], t[1], int(t[2] / spectral_binning)))
if mca_arr_dim == 4:
scan.mca_arr = rebin(scan.mca_arr[:, :, 0:int(t[2] / spectral_binning)*spectral_binning, :], (t[0], t[2], int(t[2] / spectral_binning), t[3]))
mca_arr_dimensions = scan.mca_arr.shape
n_cols = mca_arr_dimensions[0]
n_rows = mca_arr_dimensions[1]
n_channels = mca_arr_dimensions[2]
keywords = fitp.keywords
if (make_maps_conf.use_fit > 0) and (xrfflyscan == 0):
fit = maps_analyze.analyze(self.logger)
seconds_fit_start = tm.time()
# note: spectral binning needs to be even !!
#data_temp = np.zeros((self.max_spec_channels))
data_line = np.zeros((self.max_spec_channels, n_rows))
fitted_line = np.zeros((self.max_spec_channels, n_rows))
ka_line = np.zeros((self.max_spec_channels, n_rows))
l_line = np.zeros((self.max_spec_channels, n_rows))
bkground_line = np.zeros((self.max_spec_channels, n_rows))
'''
fitted_line2 = np.zeros((self.max_spec_channels, n_rows))
ka_line2 = np.zeros((self.max_spec_channels, n_rows))
l_line2 = np.zeros((self.max_spec_channels, n_rows))
bkground_line2 = np.zeros((self.max_spec_channels, n_rows))
'''
fitted_temp = np.zeros((self.max_spec_channels, no_detectors + 1))
Ka_temp = np.zeros((self.max_spec_channels, no_detectors + 1))
l_temp = np.zeros((self.max_spec_channels, no_detectors + 1))
bkground_temp = np.zeros((self.max_spec_channels, no_detectors + 1))
raw_temp = np.zeros((self.max_spec_channels, no_detectors + 1))
add_plot_spectra = np.zeros((self.max_spec_channels, 12, n_rows), dtype=np.float32)
#temp_add_plot_spectra = np.zeros((self.max_spec_channels, 12, n_rows), dtype=np.float32)
add_plot_names = ['fitted', 'K alpha', 'background', 'K beta', 'L lines', 'M lines', 'step', 'tail', 'elastic', 'compton', 'pileup', 'escape']
values = np.zeros((n_cols, n_rows, fitp.g.n_fitp), dtype=np.float32)
values_line = np.zeros((n_rows, fitp.g.n_fitp), dtype=np.float32)
bkgnd = np.zeros((n_cols, n_rows), dtype=np.float32)
bkgnd_line = np.zeros((n_rows), dtype=np.float32)
tfy = np.zeros((n_cols, n_rows), dtype=np.float32)
tfy_line = np.zeros((n_rows), dtype=np.float32)
#sigma = np.zeros((n_cols, n_rows, fitp.g.n_fitp), dtype=np.float32)
elt_line = np.zeros((n_rows), dtype=np.float32)
'''
values_line2 = np.zeros((n_rows, fitp.g.n_fitp), dtype=np.float32)
bkgnd_line2 = np.zeros((n_rows), dtype=np.float32)
tfy_line2 = np.zeros((n_rows), dtype=np.float32)
'''
#test = np.zeros(self.max_spec_channels)
which_dets_to_use = np.where(make_maps_conf.use_det == 1)
for bb in range(no_detectors):
kk = which_dets_to_use[0][bb]
if kk == 0:
elt_arr = elt1_arr
if kk == 1:
elt_arr = elt2_arr
if kk == 2:
elt_arr = elt3_arr
matrix = 1
temp = 0
if NO_MATRIX:
matrix = 0
temp = 1
fitp.g.no_iters = 4
fitp.s.use[:] = 1
fitp.s.val[min(fitp.keywords.kele_pos):max(fitp.keywords.mele_pos)-1] = 1e-10
# execute below if do fixed fit per pixel
if make_maps_conf.use_fit == 1 :
for j in range(fitp.keywords.kele_pos[0]):
fitp.s.use[j] = fitp.s.batch[j,1]
# if matrix is not 1, then global variable NO_MATRIX is used to
# override, and will keep energy calibration floating. at every pixel
if matrix == 0 :
for j in range(fitp.keywords.kele_pos[0]):
fitp.s.use[j] = fitp.s.batch[j,4]
det = kk
pileup_string = ''
test_string = ''
self.logger.debug('maps_overridefile %s', maps_overridefile)
#for ie in range(len(info_elements)): self.logger.debug( info_elements[ie].xrf_abs_yield
fitp, test_string, pileup_string = fp.read_fitp(maps_overridefile, info_elements, det)
for jj in range(fitp.g.n_fitp):
if fitp.s.name[jj] in test_string :
fitp.s.val[jj] = 1.
fitp.s.use[jj] = 5
if temp == 0:
temp = jj
else:
temp = [temp, jj]
calib = {'off':0., 'lin':0., 'quad':0.}
calib['off'] = fitp.s.val[fitp.keywords.energy_pos[0]]
calib['lin'] = fitp.s.val[fitp.keywords.energy_pos[1]]
calib['quad'] = fitp.s.val[fitp.keywords.energy_pos[2]]
fp.parse_pileupdef(fitp, pileup_string, info_elements)
add_matrixfit_pars = np.zeros((6))
add_matrixfit_pars[0] = fitp.s.val[fitp.keywords.energy_pos[0]]
add_matrixfit_pars[1] = fitp.s.val[fitp.keywords.energy_pos[1]]
add_matrixfit_pars[2] = fitp.s.val[fitp.keywords.energy_pos[2]]
add_matrixfit_pars[3] = fitp.s.val[fitp.keywords.added_params[1]]
add_matrixfit_pars[4] = fitp.s.val[fitp.keywords.added_params[2]]
add_matrixfit_pars[5] = fitp.s.val[fitp.keywords.added_params[3]]
#if len(which_par_str) : text = [which_par_str, text]
old_fitp = fp.define_fitp(beamline, info_elements)
old_fitp.s.val[:]=fitp.s.val[:]
if no_processors_to_use > 1:
self.logger.info('Multi-threaded fitting started')
self.logger.info('no_processors_to_use = %s', no_processors_to_use)
self.logger.info('cpu_count() = %s\n', multiprocessing.cpu_count())
self.logger.info('Creating pool with %s processes\n', no_processors_to_use)
pool = multiprocessing.Pool(no_processors_to_use)
count = n_cols
data_lines = np.zeros((self.main_dict['max_spec_channels'], n_rows, n_cols))
for i_fit in range(n_cols):
for jj in range(n_rows):
data_lines[0:scan.mca_arr[i_fit, jj, :].size, jj, i_fit] = scan.mca_arr[i_fit, jj, :]
self.logger.info('Started fitting')
fitting_start = datetime.datetime.now()
results_pool = []
start = 0
for i_fit in range(count):
elt_line[:] = elt1_arr[i_fit, :]
fitp.s.val[:]=old_fitp.s.val[:]
if (xrf_bin > 0) and (i_fit < count -2) :
if (xrf_bin == 2) and (n_cols > 5) and (n_rows > 5) :
if i_fit % 2 != 0 :
continue
if (xrf_bin == 4) and (n_cols > 5) and (n_rows > 5) :
if i_fit % 3 != 0:
continue
for jj in range(n_rows):
raw_temp[:, kk] = raw_temp[:, kk] + data_lines[:, jj, i_fit]
results_pool.append(pool.apply_async(fit_line_threaded, (self.logger.name, i_fit, data_lines[:,:,i_fit],
n_rows, matrix, spectral_binning, elt1_arr[i_fit, :], fitp, old_fitp, keywords, xrf_bin, calib)) )
#self.logger.info( '------ Waiting for fitting to finish ------')
#del data_lines
pool.close()
pool.join()
results = []
for r in results_pool:
results.append(r.get())
fitting_end = datetime.datetime.now()
total_fitting_time = fitting_end - fitting_start
total_time_str = '\n\n %%%%%%%% fitting total time = '+str(total_fitting_time.total_seconds())+' %%%%%%% \n\n'
self.logger.info(total_time_str)
for iline in range(count):
results_line = results[iline]
#self.logger.info( 'results_line=', results_line)
fitted_line[...] = results_line[0][...]
ka_line[...] = results_line[1][...]
l_line[...] = results_line[2][...]
bkground_line[...] = results_line[3][...]
values_line[...] = results_line[4][...]
bkgnd_line[...] = results_line[5][...]
tfy_line[...] = results_line[6][...]
xmin = results_line[7]
xmax = results_line[8]
values[start+iline, :, :] = values_line[:, :]
bkgnd[start+iline, :] = bkgnd_line[:]
tfy[start+iline, :] = tfy_line[:]
if fitted_line == None:
continue
for jj in range(n_rows):
fitted_temp[xmin:xmax+1, kk] = fitted_temp[xmin:xmax+1, kk] + fitted_line[xmin:xmax+1, jj]
Ka_temp[xmin:xmax+1, kk] = Ka_temp[xmin:xmax+1, kk] + ka_line[xmin:xmax+1, jj]
l_temp[xmin:xmax+1, kk] = l_temp[xmin:xmax+1, kk] + l_line[xmin:xmax+1, jj]
bkground_temp[xmin:xmax+1, kk] = bkground_temp[xmin:xmax+1, kk] + bkground_line[xmin:xmax+1, jj]
#import matplotlib.pyplot as plt
#plt.plot(range(0,2048), ka_line[:,0])
##plt.semilogy(range(70,500), ka_line[70:500])
#plt.show()
#exit(1)
self.logger.debug('before %s', thisdata.energy_fit[0, kk])
thisdata.energy_fit[0, kk] = calib['off']
thisdata.energy_fit[1, kk] = calib['lin']
thisdata.energy_fit[2, kk] = calib['quad']
self.logger.debug('after %s', thisdata.energy_fit[0, kk])
else:
count = n_cols
fitting_start = datetime.datetime.now()
for i_fit in range(count):
self.logger.info('fitting row number %s of %s', i_fit, count)
if (xrf_bin > 0) and (i_fit < count -2) :
if (xrf_bin == 2) and (n_cols > 5) and (n_rows > 5) :
if i_fit % 2 != 0 :
continue
if (xrf_bin == 4) and (n_cols > 5) and (n_rows > 5) :
if i_fit % 3 != 0:
continue
data_line[:, :] = 0.
for jj in range(n_rows):
data_line[0:scan.mca_arr[i_fit, jj, :].size, jj] = scan.mca_arr[i_fit, jj, :]
elt_line[:] = elt1_arr[i_fit, :]
fitp.s.val[:]=old_fitp.s.val[:]
fitted_line, ka_line, l_line, bkground_line, values_line, bkgnd_line, tfy_line, xmin, xmax = fit.fit_line(data_line,
n_rows, matrix, spectral_binning, elt_line, fitp, old_fitp, keywords, xrf_bin, calib)
if fitted_line == None:
continue
for jj in range(n_rows):
fitted_temp[xmin:xmax + 1, kk] = fitted_temp[xmin:xmax + 1, kk] + fitted_line[xmin:xmax + 1, jj]
Ka_temp[xmin:xmax + 1, kk] = Ka_temp[xmin:xmax + 1, kk] + ka_line[xmin:xmax + 1, jj]
l_temp[xmin:xmax + 1, kk] = l_temp[xmin:xmax + 1, kk] + l_line[xmin:xmax + 1, jj]
bkground_temp[xmin:xmax + 1, kk] = bkground_temp[xmin:xmax + 1, kk] + bkground_line[xmin:xmax + 1, jj]
raw_temp[:, kk] = raw_temp[:, kk] + data_line[:, jj]
values[i_fit, :, :] = values_line[:, :]
bkgnd[i_fit, :] = bkgnd_line[:]
tfy[i_fit, :] = tfy_line[:]
fitting_end = datetime.datetime.now()
total_fitting_time = fitting_end - fitting_start
total_time_str = '\n\n %%%%%%%% fitting total time = '+str(total_fitting_time.total_seconds())+' %%%%%%% \n\n'
self.logger.info(total_time_str)
self.logger.debug('before %s', thisdata.energy_fit[0, kk])
thisdata.energy_fit[0, kk] = calib['off']
thisdata.energy_fit[1, kk] = calib['lin']
thisdata.energy_fit[2, kk] = calib['quad']
self.logger.info('after %s', thisdata.energy_fit[0, kk])
for i_fit in range(n_cols):
for j_fit in range(n_rows):
for jj in range(len(elements_to_use)):
if make_maps_conf.chan[elements_to_use[jj]].name in fitp.s.name:
wo= np.where( fitp.s.name == make_maps_conf.chan[elements_to_use[jj]].name)[0]
else:
if make_maps_conf.chan[elements_to_use[jj]].name == 's_e':
wo = np.where(fitp.s.name == 'coherent_sct_amplitude')[0]
if make_maps_conf.chan[elements_to_use[jj]].name == 's_i':
wo = np.where(fitp.s.name == 'compton_amplitude')[0]
if make_maps_conf.chan[elements_to_use[jj]].name == 's_a':
wo = np.concatenate((np.where(fitp.s.name == 'compton_amplitude')[0], np.where(fitp.s.name == 'coherent_sct_amplitude')[0]), axis=0)
if len(wo) == 0:
continue
thisdata.dataset_orig[i_fit, j_fit, jj, 1] = np.sum(values[i_fit, j_fit, wo])
for ie in range(len(elements_to_use[:])):
if 'TFY' == make_maps_conf.chan[elements_to_use[ie]].name:
thisdata.dataset_orig[i_fit, j_fit, ie, 1] = tfy[i_fit, j_fit]
if 'Bkgnd' == make_maps_conf.chan[elements_to_use[ie]].name:
thisdata.dataset_orig[i_fit, j_fit, ie, 1] = bkgnd[i_fit, j_fit]
seconds_fit_end = tm.time()
self.logger.info('fitting of this scan finished in %s seconds', seconds_fit_end-seconds_fit_start)
if make_maps_conf.use_fit == 2:
kk_loop_length = no_detectors + 1
else:
kk_loop_length = no_detectors
for kk in range(kk_loop_length):
name_pre = 'fit_'
if kk == no_detectors:
name_after = '_integrated'
else:
name_after = '_det'+str(kk).strip()
spectra[self.main_max_spectra-8].data[:] = fitted_temp[:, kk]
spectra[self.main_max_spectra-7].data[:] = Ka_temp[:, kk]
spectra[self.main_max_spectra-4].data[:] = bkground_temp[:, kk]
spectra[0].data[:] = raw_temp[:, kk]
spectra[0].name = name_pre+header+name_after
# need to be in here for B station, for files w/o standards
if beamline == '2-ID-B' :
spectra[self.main_max_spectra-8].name = 'fitted'
spectra[self.main_max_spectra-7].name = 'alpha'
spectra[self.main_max_spectra-4].name = 'background'
spectra[0].used_chan = raw_temp[:, 0].size
spectra[0].calib['off'] = calib['off']
spectra[0].calib['lin'] = calib['lin']
if spectral_binning > 0:
spectra[0].calib['lin'] = spectra[0].calib['lin'] * spectral_binning
spectra[0].calib['quad'] = calib['quad']
for isp in range(self.main_max_spectra-8,self.main_max_spectra-3):
spectra[isp].used_chan = spectra[0].used_chan
spectra[isp].calib['off'] = spectra[0].calib['off']
spectra[isp].calib['lin'] = spectra[0].calib['lin']
spectra[isp].calib['quad'] = spectra[0].calib['quad']
# need to be in here for B station, for files w/o standards
if beamline == '2-ID-B':
names = spectra[np.where(spectra.name != '')].name
names.insert(0, 'none')
n_names = len(names)
temp_this_max = max_chan_spec[:, 0].size
temp = np.repeat(fitted_temp[:, 0], 2)
max_chan_spec[0:temp_this_max, 2] = temp[0:temp_this_max]
temp = np.repeat(Ka_temp[:, 0], 2)
max_chan_spec[0:temp_this_max, 3] = temp[0:temp_this_max]
temp = np.repeat(bkground_temp[:, 0], 2)
max_chan_spec[0:temp_this_max, 4] = temp[0:temp_this_max]
add_plot_spectra[:, 0, kk] = fitted_temp[:, kk]
add_plot_spectra[:, 1, kk] = Ka_temp[:, kk]
add_plot_spectra[:, 2, kk] = bkground_temp[:, kk]
add_plot_spectra[:, 4, kk] = l_temp[:, kk]
this_add_plot_spectra = np.zeros((self.max_spec_channels, 12))
this_add_plot_spectra[:, :] = add_plot_spectra[:, :, kk]
self.plot_fit_spec(info_elements, spectra=spectra, add_plot_spectra=this_add_plot_spectra, add_plot_names=add_plot_names, fitp=fitp)
if xrf_bin > 0:
if (xrf_bin == 2) and (n_cols > 5) and (n_rows > 5):
for i_bin in range(n_cols-2):
if i_bin % 2 == 0 :
for jj in range(n_rows-2):
if jj % 2 == 0:
thisdata.dataset_orig[i_bin+1, jj, :, :] = (thisdata.dataset_orig[i_bin, jj, :, :] + thisdata.dataset_orig[i_bin+2, jj, :, :])/2.
thisdata.dataset_orig[i_bin, jj+1, :, :] = (thisdata.dataset_orig[i_bin, jj, :, :] + thisdata.dataset_orig[i_bin, jj+2, :, :])/2.
thisdata.dataset_orig[i_bin+1, jj+1, :, :] = (thisdata.dataset_orig[i_bin, jj, :, :] + thisdata.dataset_orig[i_bin+2, jj+2, :, :])/2.
if (xrf_bin == 4) and (n_cols > 5) and (n_rows > 5) :
this_dimensions = thisdata.dataset_orig.shape
congrid_arr = np.zeros((np.floor(n_cols/3.), np.floor(n_rows/3.), this_dimensions[2], this_dimensions[3]))
for i_bin in range(n_cols-3):
if i_bin % 3 == 0 :
for jj in range(n_rows-3):
if jj % 3 == 0 :
congrid_arr[i_bin/3, jj/3, :, :] = thisdata.dataset_orig[i_bin, jj, :, :]
for i_a in range(this_dimensions[2]):
for i_b in range(this_dimensions[3]):
temp_congrid = congrid_arr[:, :, i_a, i_b]
temp_congrid = maps_tools.congrid(temp_congrid, (n_cols, n_rows), self.logger)
thisdata.dataset_orig[:, :, i_a, i_b] = temp_congrid[:, :]
#begin pca part:
if self.pca > 0:
seconds_PCA_start = tm.time()
input_arr = np.zeros((n_cols * n_rows, n_channels))
l = 0
for i_x_pixels in range(n_cols):
for i_y_pixels in range(n_rows - 1):
input_arr[l, :] = scan.mca_arr[i_x_pixels, i_y_pixels, :]
l = l + 1
input_arr = np.transpose(input_arr)
U, eigen_values_vec, V = np.linalg.svd(input_arr, full_matrices=False)
temp_filename = os.path.basename(mdafilename)
basename, extension = os.path.splitext(temp_filename)
filename = os.path.join(self.main_dict['pca_dir'], basename) + '.pca.h5'
gzip = 7
f = call_function_with_retry(h5py.File, 5, 0.1, 1.1, (filename, 'w'))
if f == None:
self.logger.error('maps_generate_img_dat: Error opening file %s', filename)
else:
# create a group for maps to hold the data
pcaGrp = f.create_group('PCA')
ds_data = pcaGrp.create_dataset('n_channels', data=n_channels)
ds_data = pcaGrp.create_dataset('n_cols', data=n_cols)
ds_data = pcaGrp.create_dataset('n_rows', data=n_rows)
data = long(len(scan.detector_description_arr))
ds_data = pcaGrp.create_dataset('n_detector_description', data=data)
ds_data = pcaGrp.create_dataset('eigen_vec', data=eigen_values_vec)
ds_data = pcaGrp.create_dataset('U', data=U, compression='gzip', compression_opts=7)
ds_data = pcaGrp.create_dataset('V', data=V, compression='gzip', compression_opts=7)
ds_data = pcaGrp.create_dataset('input_arr', data=input_arr, compression='gzip', compression_opts=7)
ds_data = pcaGrp.create_dataset('scan_time_stamp', data=scan.scan_time_stamp)
ds_data = pcaGrp.create_dataset('y_coord_arr', data=scan.y_coord_arr)
ds_data = pcaGrp.create_dataset('x_coord_arr', data=scan.x_coord_arr)
ds_data = pcaGrp.create_dataset('x_pixels', data=scan.x_pixels)
ds_data = pcaGrp.create_dataset('y_pixels', data=scan.y_pixels)
ds_data = pcaGrp.create_dataset('detector_description_arr', data=scan.detector_description_arr)
ds_data = pcaGrp.create_dataset('detector_arr', data=scan.detector_arr)
f.close()
seconds_PCA_end = tm.time()
delta_sec = str(seconds_PCA_end-seconds_PCA_start)
delta_min = str((seconds_PCA_end-seconds_PCA_start) / 60.)
delta_hour = str((seconds_PCA_end-seconds_PCA_start) / 60. / 24.)
self.logger.info('PCA part of the analysis took : %s seconds corresponding to %s minutes corresponding to %s hours', delta_sec, delta_min, delta_hour)
#########################################################################################
thisdata.dataset_names = ['ROI sum', 'fitted', 'lin_fit']
thisdata.scan_time_stamp = scan.scan_time_stamp
thisdata.write_date = datetime.datetime.utcnow()
thisdata.x_coord_arr = scan.x_coord_arr
thisdata.y_coord_arr = scan.y_coord_arr
thisdata.dmaps_set = dmaps_set
for item in make_maps_conf.dmaps:
if item.use == 1:
thisdata.dmaps_names.append(item.name)
thisdata.dmaps_units.append(item.units)
dmaps_use = []
for item in make_maps_conf.dmaps: dmaps_use.append(item.use)
dmaps_use = np.array(dmaps_use)
for i in range(len(make_maps_conf.chan)):
if make_maps_conf.chan[i].use == 1:
thisdata.chan_names.append(make_maps_conf.chan[i].name)
thisdata.chan_units.append([make_maps_conf.chan[i].units[0],make_maps_conf.chan[i].units[1],make_maps_conf.chan[i].units[2]])
thisdata.version = self.version
thisdata.us_amp = us_amp
thisdata.ds_amp = ds_amp
chan_use = []
for ii in range(len(make_maps_conf.chan)):
chan_use.append( make_maps_conf.chan[ii].use)
wo = np.where(np.array(chan_use) == 1)
for i in range(3):
for j in range(3):
thisdata.dataset_calibration[:, i, j] = make_maps_conf.e_cal[wo, i, j]
if (make_maps_conf.use_fit == 0) or (xrfflyscan == 1):
for i in range(no_detectors) : thisdata.energy_fit[0, i] = make_maps_conf.calibration.offset[i]
for i in range(no_detectors) : thisdata.energy_fit[1, i] = make_maps_conf.calibration.slope[i]
for i in range(no_detectors) : thisdata.energy_fit[2, i] = make_maps_conf.calibration.quad[i]
n_channels = 2048
thisdata.n_energy = n_channels
thisdata.energy = np.arange(float(n_channels)) * thisdata.energy_fit[1, 0] + thisdata.energy_fit[0, 0]
thisdata.energy_spec = np.zeros((n_channels))
thisdata.energy_spec[0:len(spec_all)] = spec_all[:]
for j in range(5) :
thisdata.max_chan_spec[0:len(max_chan_spec[:, 0]), j] = max_chan_spec[:, j]
if self.xanes == 0:
h5file = os.path.join(self.main_dict['img_dat_dir'], header + xrf_bin_ext + '.h5' + suffix)
self.logger.info('now trying to write HDF5 file %s', h5file)
energy_channels = spectra[0].calib['off'] + spectra[0].calib['lin'] * np.arange((n_channels), dtype=np.float)
try:
h5.write_hdf5(thisdata, h5file, scan.mca_arr, energy_channels, extra_pv=extra_pv, extra_pv_order=scan.extra_pv_key_list, update=True)
except:
self.logger.exception("Error writing "+h5file)
'''
#Generate average images
if (total_number_detectors > 1):
self.logger.info( ' we are now going to create the maps_generate_average...'
if this_detector == total_number_detectors -1:
self.logger.info( 'now doing maps_generate_average_img_dat, total_number_detectors: ', total_number_detectors, ' this_detector: ', this_detector, ' this_file = ', mdafilename
energy_channels = spectra[0].calib['off'] + spectra[0].calib['lin'] * np.arange((n_channels), dtype=np.float)
self.generate_average_img_dat(total_number_detectors, make_maps_conf, energy_channels, this_file=mdafilename, extra_pv=extra_pv)
'''
return
# ----------------------------------------------------------------------
def generate_average_img_dat(self, main_dict, make_maps_conf, energy_channels, extra_pv=None):
self.logger.info("Generating average image")
total_number_detectors = main_dict['total_number_detectors']
h5p = maps_hdf5.h5(self.logger)
imgdat_filenames = []
if main_dict['dataset_files_to_proc'][0] == 'all':
imgdat_filenames = []
dirList=os.listdir(main_dict['XRFmaps_dir'])
for fname in dirList:
if fname[-4:] == '.h50':
imgdat_filenames.append(fname)
else:
imgdat_filenames = [mdafile.replace('.mda', '.h50') for mdafile in main_dict['dataset_files_to_proc']]
#self.logger.debug('imgdat_filename %s', imgdat_filenames)
no_files = len(imgdat_filenames)
for i_temp in range(no_files):
basename, extension = os.path.splitext(imgdat_filenames[i_temp])
imgdat_filenames[i_temp] = basename
main_XRFmaps_names = imgdat_filenames
for n_filenumber in range(no_files):
# is the avergae .dat file older than the dat0 file ? if so, generate a
# new avg file, otherwise skip it.
valid_read = 0
avg_XRFmaps_info = None
try:
added_number_detectors = 0
for this_detector_element in range(total_number_detectors):
sfile = os.path.join(self.main_dict['XRFmaps_dir'], imgdat_filenames[n_filenumber] + '.h5' + str(this_detector_element).strip())
#self.logger.debug('sfile %s', sfile)
n_ev, n_rows, n_cols, n_energy, energy, energy_spec, scan_time_stamp, dataset_orig = self.change_xrf_resetvars()
#temp = max([sfile.split('/'), sfile.split('\\')])
#if temp == -1:
# temp = 0
if not os.path.isfile(sfile) :
self.logger.warning('WARNING: did not find :%s skipping to next',sfile)
continue
XRFmaps_info, n_cols, n_rows, n_channels, valid_read = h5p.maps_change_xrf_read_hdf5(sfile, make_maps_conf)
if valid_read == 0 and XRFmaps_info == None:
self.logger.error('Error calling h5p.maps_change_xrf_read_hdf5(%s,%s)', sfile, make_maps_conf)
break
f = call_function_with_retry(h5py.File, 5, 0.1, 1.1, (sfile, 'r'))
if f == None:
self.logger.error('Error opening file %s', sfile)
break
if 'MAPS' not in f:
self.logger.error('error, hdf5 file does not contain the required MAPS group. I am aborting this action')
break
maps_group_id = f['MAPS']
entryname = 'mca_arr'
mca_arr, valid_read = h5p.read_hdf5_core(maps_group_id, entryname)
mca_arr = np.transpose(mca_arr)
if valid_read == 0:
self.logger.warning('warning: did not find the valid mca array in dataset. cannot extract spectra')
break
f.close()
if added_number_detectors == 0:
avg_XRFmaps_info, n_cols, n_rows, n_channels, valid_read = h5p.maps_change_xrf_read_hdf5(sfile, make_maps_conf)
if valid_read == 0 and avg_XRFmaps_info == None:
self.logger.error('Error calling h5p.maps_change_xrf_read_hdf5(%s, %s)', sfile, make_maps_conf)
break
avg_mca_arr = mca_arr.copy()
elif added_number_detectors >= 1:
avg_XRFmaps_info.dmaps_set[:, :, :] = avg_XRFmaps_info.dmaps_set[:, :, :] + XRFmaps_info.dmaps_set[:, :, :]
avg_XRFmaps_info.dataset[:, :, :] = avg_XRFmaps_info.dataset[:, :, :] + XRFmaps_info.dataset[:, :, :]
avg_XRFmaps_info.dataset_orig[:, :, :, :] = avg_XRFmaps_info.dataset_orig[:, :, :, :] + XRFmaps_info.dataset_orig[:, :, :, :]
avg_XRFmaps_info.dataset_calibration[:, :, :] = avg_XRFmaps_info.dataset_calibration[:, :, :] + XRFmaps_info.dataset_calibration[:, :, :]
avg_XRFmaps_info.energy_spec[:] = avg_XRFmaps_info.energy_spec[:] + XRFmaps_info.energy_spec[:]
avg_XRFmaps_info.max_chan_spec[:, :] = avg_XRFmaps_info.max_chan_spec[:, :] + XRFmaps_info.max_chan_spec[:, :]
avg_XRFmaps_info.raw_spec[:, :] = avg_XRFmaps_info.raw_spec[:, :] + XRFmaps_info.raw_spec[:, :]
avg_mca_arr = avg_mca_arr + mca_arr
added_number_detectors = added_number_detectors+1
if not os.path.isfile(sfile):
self.logger.warning('WARNING: did not find any of these: %s skipping to next level', sfile)
continue
if avg_XRFmaps_info != None:
avg_XRFmaps_info.dmaps_set[:, :, :] = avg_XRFmaps_info.dmaps_set[:, :, :] / added_number_detectors
avg_XRFmaps_info.dataset[:, :, :] = avg_XRFmaps_info.dataset[:, :, :] / added_number_detectors
avg_XRFmaps_info.dataset_orig[:, :, :, :] = avg_XRFmaps_info.dataset_orig[:, :, :, :] / added_number_detectors
avg_XRFmaps_info.dataset_calibration[:, :, :] = avg_XRFmaps_info.dataset_calibration[:, :, :] / added_number_detectors
avg_XRFmaps_info.energy_spec[:] = avg_XRFmaps_info.energy_spec[:] / added_number_detectors
avg_XRFmaps_info.max_chan_spec[:, :] = avg_XRFmaps_info.max_chan_spec[:, :] / added_number_detectors
avg_XRFmaps_info.raw_spec[:, :] = avg_XRFmaps_info.raw_spec[:, :] / added_number_detectors
h5p.write_hdf5(avg_XRFmaps_info, os.path.join(self.main_dict['XRFmaps_dir'], imgdat_filenames[n_filenumber] + '.h5'), avg_mca_arr, energy_channels, extra_pv=XRFmaps_info.extra_pv, extra_pv_order=XRFmaps_info.extra_pv_as_csv)
except:
self.logger.exception('exception occured')
return
# ----------------------------------------------------------------------
def change_xrf_resetvars(self):
n_ev = 0L
n_rows = 3L
n_cols = 3L
n_energy = 1100L
energy = np.zeros(n_energy)
energy_spec = np.arange(float(n_energy))
scan_time_stamp = ''
dataset_orig = 0
return n_ev, n_rows, n_cols, n_energy, energy, energy_spec, scan_time_stamp, dataset_orig
# ----------------------------------------------------------------------
def plot_fit_spec(self, info_elements, spectra=0, add_plot_spectra=0, add_plot_names=0, ps=0, fitp=0, perpix=0, save_csv=1):
self.logger.info('ploting spectrum')
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
mplot.rcParams['pdf.fonttype'] = 42
fontsize = 9
mplot.rcParams['font.size'] = fontsize
colortable = []
colortable.append((0., 0., 0.)) # ; black
colortable.append((1., 0., 0.)) # ; red
colortable.append((0., 1., 0.)) # ; green
colortable.append((0., 0., 1.)) # ; blue
colortable.append((0., 1., 1.)) # ; turquois
colortable.append((1., 0., 1.)) # ; magenta
colortable.append((1., 1., 0.)) # ; yellow
colortable.append((0.7, 0.7, 0.7)) # ; light grey
colortable.append((1., 0.8, 0.75)) # ; flesh
colortable.append(( 0.35, 0.35, 0.35)) # ; dark grey
colortable.append((0., 0.5, 0.5)) # ; sea green
colortable.append((1., 0., 0.53)) # ; pink-red
colortable.append((0., 1., 0.68)) # ; bluegreen
colortable.append((1., 0.5, 0.)) # ; orange
colortable.append((0., 0.68, 1.)) # ; another blue
colortable.append((0.5, 0., 1.)) # ; violet
colortable.append((1., 1., 1.)) # ; white
#foreground_color = colortable[-1]
#background_color = colortable[0]
droplist_spectrum = 0
#droplist_scale = 0
png = 0
if ps == 0:
png = 2
if spectra == 0:
return
have_name = 0
for isp in range(len(spectra)):
if spectra[isp].name != '':
have_name = 1
if have_name == 0:
return
filename = spectra[0].name
if save_csv == 1:
csvfilename = 'csv_' + filename + '.csv'
file_csv = os.path.join(self.main_dict['output_dir'], csvfilename)
if (png > 0) or (ps > 0):
if png > 0:
dpi = 100
canvas_xsize_in = 900. / dpi
canvas_ysize_in = 700. / dpi
fig = mplot.figure.Figure(figsize=(canvas_xsize_in, canvas_ysize_in), dpi=dpi, edgecolor=None)
canvas = FigureCanvas(fig)
fig.add_axes()
axes = fig.gca()
for child in axes.get_children():
if isinstance(child, mplot.spines.Spine):
child.set_color((0., 0., 0.))
#axes.set_axis_bgcolor(background_color)
ya = axes.yaxis
xa = axes.xaxis
ya.set_tick_params(labelcolor=(0., 0., 0.))
ya.set_tick_params(color=(0., 0., 0.))
xa.set_tick_params(labelcolor=(0., 0., 0.))
xa.set_tick_params(color=(0., 0., 0.))
if ps > 0:
ps_filename = 'ps_' + filename + '.pdf'
if ps_filename == '':
return
eps_plot_xsize = 8.
eps_plot_ysize = 6.
fig = mplot.figure.Figure(figsize=(eps_plot_xsize, eps_plot_ysize))
canvas = FigureCanvas(fig)
fig.add_axes()
axes = fig.gca()
file_ps = os.path.join(self.main_dict['output_dir'], ps_filename)
if spectra[droplist_spectrum].used_chan > 0:
this_axis_calib = droplist_spectrum
xaxis = (np.arange(spectra[this_axis_calib].used_chan))**2*spectra[this_axis_calib].calib['quad'] + \
np.arange(spectra[this_axis_calib].used_chan) * spectra[this_axis_calib].calib['lin'] + \
spectra[this_axis_calib].calib['off']
xtitle = 'energy [keV]'
xmin = fitp.g.xmin * 0.5
xmax = fitp.g.xmax + (fitp.g.xmax - fitp.g.xmin) * 0.10
wo_a = np.where(xaxis > xmax)[0]
if len(wo_a) > 0 :
wo_xmax = np.amin(wo_a)
else:
wo_xmax = spectra[droplist_spectrum].used_chan * 8. / 10.
wo_b = np.where(xaxis < xmin)[0]
if len(wo_b) >0:
wo_xmin = np.amax(wo_b)
else:
wo_xmin = 0
wo = np.where(spectra[droplist_spectrum].data[wo_xmin:wo_xmax + 1] > 0.)
if len(wo[0]) > 0:
ymin = np.amin(spectra[droplist_spectrum].data[wo + wo_xmin]) * 0.9
else:
ymin = 0.1
if perpix > 0:
ymin = 0.001
if len(wo[0]) > 0:
ymax = np.amax(spectra[droplist_spectrum].data[wo+wo_xmin] * 1.1)
else:
ymax = np.amax(spectra[droplist_spectrum].data)
# make sure ymax is larger than ymin, so as to avoid a crash during plotting
if ymax <= ymin:
ymax = ymin + 0.001
'''
yanno = (1.01 + 0.04 * (1 - droplist_scale)) * ymax
yanno_beta = (1.07 + 0.53 * (1 - droplist_scale)) * ymax
if droplist_scale == 0:
yanno_below = 0.8 * ymin
else:
yanno_below = ymin -(ymax - ymin) * .04
yanno_lowest = (0.8 + 0.15 * (1 - (1 - droplist_scale))) * ymin
'''
this_spec = spectra[droplist_spectrum].data[0:spectra[droplist_spectrum].used_chan]
wo = np.where(this_spec <= 0)[0]
if len(wo) > 0:
this_spec[wo] = ymin
plot1 = axes.semilogy(xaxis, this_spec, linewidth=1.0)
axes.set_xlabel(xtitle)
axes.set_ylabel('counts')
axes.set_xlim((xmin, xmax))
axes.set_ylim((ymin, ymax))
axes.set_position([0.10,0.18,0.85,0.75])
self.logger.debug('spectra[droplist_spectrum].name %s', spectra[droplist_spectrum].name)
axes.text(-0.10, -0.12, spectra[droplist_spectrum].name, transform=axes.transAxes)
if add_plot_spectra.any():
size = add_plot_spectra.shape
if len(size) == 2:
#for k = size[2]-1, 0, -1 :
for k in np.arange(size[1] - 1, -1, -1):
plot2 = axes.semilogy(xaxis, add_plot_spectra[:, k], color=colortable[1 + k], linewidth=1.0)
if k <= 2:
axes.text(-0.10 + 0.4 + 0.2 * k, -0.12, add_plot_names[k], color=colortable[1 + k], transform=axes.transAxes)
if (k >= 3) and (k <= 6):
axes.text(-0.10 + 0.2 * (k - 3), -0.15, add_plot_names[k], color=colortable[1 + k], transform=axes.transAxes)
if k >= 7:
axes.text(-0.10 + 0.2 * (k - 7), -0.18, add_plot_names[k], color=colortable[1 + k], transform=axes.transAxes)
# plot background next to last
plot3 = axes.semilogy(xaxis, add_plot_spectra[:, 2], color=colortable[1 + 2], linewidth=1.0)
# plot fit last
plot4 = axes.semilogy(xaxis, add_plot_spectra[:, 0], color=colortable[1 + 0], linewidth=1.0)
# plot xrf ticks
element_list = np.array([11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 22, 24, 25, 26, 27, 28, 29, 30, 32, 33, 35]) - 1
x_positions = []
for i in range(len(info_elements)): x_positions.append(info_elements[i].xrf['ka1'])
color = 2
local_ymax = np.array([1.03, 1.15, 1.3]) * ymax
local_ymin = ymax * 0.9
for k in range(len(element_list)):
i = element_list[k]
line=mplot.lines.Line2D([x_positions[i], x_positions[i]], [local_ymin, local_ymax[(i - int(i / 3) * 3)]], color=colortable[color])
line.set_clip_on(False)
axes.add_line(line)
axes.text(x_positions[i], local_ymax[(i - int(i / 3) * 3)], info_elements[i].name, ha='center', va='bottom', color=colortable[color])
if (png > 0) or (ps > 0):
if png > 0:
axes.text(0.97, -0.23, 'mapspy', transform=axes.transAxes)
if (png == 1) or (png == 2):
image_filename = filename + '.png'
self.logger.info('saving png %s', os.path.join(self.main_dict['output_dir'], image_filename))
fig.savefig(os.path.join(self.main_dict['output_dir'], image_filename), dpi=dpi, edgecolor=None)
if ps > 0:
fig.savefig(file_ps)
if save_csv == 1:
if add_plot_spectra.any():
size = add_plot_spectra.shape
if len(size) == 2:
spectra_names = ['Energy', 'Spectrum']
for i in range(len(add_plot_names)):
spectra_names.append(add_plot_names[i])
dims = add_plot_spectra.shape
allspectra = np.zeros((dims[0], dims[1] + 2))
allspectra[:, 2:] = add_plot_spectra
allspectra[:, 0] = xaxis
allspectra[:, 1] = this_spec
file_ptr = open_file_with_retry(file_csv, 'wb')
if file_ptr == None:
self.logger.error('Error opening file: %s', file_csv)
else:
writer = csv.writer(file_ptr)
writer.writerow(spectra_names)
writer.writerows(allspectra)
return
|
MapsPy/MapsPy
|
maps_generate_img_dat.py
|
Python
|
bsd-2-clause
| 88,104
|
[
"NetCDF"
] |
7c95bc52392c5a257c1b71ed82ad342d036b7d3e5da973a7bddebecd5f135e98
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from abc import ABCMeta
from pyspark import since, keyword_only
from pyspark.ml import Predictor, PredictionModel
from pyspark.ml.base import _PredictorParams
from pyspark.ml.param.shared import *
from pyspark.ml.tree import _DecisionTreeModel, _DecisionTreeParams, \
_TreeEnsembleModel, _TreeEnsembleParams, _RandomForestParams, _GBTParams, \
_HasVarianceImpurity, _TreeRegressorParams
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, \
JavaPredictor, JavaPredictionModel, JavaWrapper
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
__all__ = ['AFTSurvivalRegression', 'AFTSurvivalRegressionModel',
'DecisionTreeRegressor', 'DecisionTreeRegressionModel',
'GBTRegressor', 'GBTRegressionModel',
'GeneralizedLinearRegression', 'GeneralizedLinearRegressionModel',
'GeneralizedLinearRegressionSummary', 'GeneralizedLinearRegressionTrainingSummary',
'IsotonicRegression', 'IsotonicRegressionModel',
'LinearRegression', 'LinearRegressionModel',
'LinearRegressionSummary', 'LinearRegressionTrainingSummary',
'RandomForestRegressor', 'RandomForestRegressionModel',
'FMRegressor', 'FMRegressionModel']
class Regressor(Predictor, _PredictorParams):
"""
Regressor for regression tasks.
.. versionadded:: 3.0.0
"""
__metaclass__ = ABCMeta
class RegressionModel(PredictionModel, _PredictorParams):
"""
Model produced by a ``Regressor``.
.. versionadded:: 3.0.0
"""
__metaclass__ = ABCMeta
class _JavaRegressor(Regressor, JavaPredictor):
"""
Java Regressor for regression tasks.
.. versionadded:: 3.0.0
"""
__metaclass__ = ABCMeta
class _JavaRegressionModel(RegressionModel, JavaPredictionModel):
"""
Java Model produced by a ``_JavaRegressor``.
To be mixed in with :class:`pyspark.ml.JavaModel`
.. versionadded:: 3.0.0
"""
__metaclass__ = ABCMeta
class _LinearRegressionParams(_PredictorParams, HasRegParam, HasElasticNetParam, HasMaxIter,
HasTol, HasFitIntercept, HasStandardization, HasWeightCol, HasSolver,
HasAggregationDepth, HasLoss, HasBlockSize):
"""
Params for :py:class:`LinearRegression` and :py:class:`LinearRegressionModel`.
.. versionadded:: 3.0.0
"""
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: auto, normal, l-bfgs.", typeConverter=TypeConverters.toString)
loss = Param(Params._dummy(), "loss", "The loss function to be optimized. Supported " +
"options: squaredError, huber.", typeConverter=TypeConverters.toString)
epsilon = Param(Params._dummy(), "epsilon", "The shape parameter to control the amount of " +
"robustness. Must be > 1.0. Only valid when loss is huber",
typeConverter=TypeConverters.toFloat)
def __init__(self):
super(_LinearRegressionParams, self).__init__()
self._setDefault(maxIter=100, regParam=0.0, tol=1e-6, loss="squaredError", epsilon=1.35,
blockSize=1)
@since("2.3.0")
def getEpsilon(self):
"""
Gets the value of epsilon or its default value.
"""
return self.getOrDefault(self.epsilon)
@inherit_doc
class LinearRegression(_JavaRegressor, _LinearRegressionParams, JavaMLWritable, JavaMLReadable):
"""
Linear regression.
The learning objective is to minimize the specified loss function, with regularization.
This supports two kinds of loss:
* squaredError (a.k.a squared loss)
* huber (a hybrid of squared error for relatively small errors and absolute error for \
relatively large ones, and we estimate the scale parameter from training data)
This supports multiple types of regularization:
* none (a.k.a. ordinary least squares)
* L2 (ridge regression)
* L1 (Lasso)
* L2 + L1 (elastic net)
Note: Fitting with huber loss only supports none and L2 regularization.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, 2.0, Vectors.dense(1.0)),
... (0.0, 2.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
>>> lr = LinearRegression(regParam=0.0, solver="normal", weightCol="weight")
>>> lr.setMaxIter(5)
LinearRegression...
>>> lr.getMaxIter()
5
>>> lr.setRegParam(0.1)
LinearRegression...
>>> lr.getRegParam()
0.1
>>> lr.setRegParam(0.0)
LinearRegression...
>>> model = lr.fit(df)
>>> model.setFeaturesCol("features")
LinearRegressionModel...
>>> model.setPredictionCol("newPrediction")
LinearRegressionModel...
>>> model.getMaxIter()
5
>>> model.getBlockSize()
1
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> abs(model.predict(test0.head().features) - (-1.0)) < 0.001
True
>>> abs(model.transform(test0).head().newPrediction - (-1.0)) < 0.001
True
>>> abs(model.coefficients[0] - 1.0) < 0.001
True
>>> abs(model.intercept - 0.0) < 0.001
True
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> abs(model.transform(test1).head().newPrediction - 1.0) < 0.001
True
>>> lr.setParams("vector")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> lr_path = temp_path + "/lr"
>>> lr.save(lr_path)
>>> lr2 = LinearRegression.load(lr_path)
>>> lr2.getMaxIter()
5
>>> model_path = temp_path + "/lr_model"
>>> model.save(model_path)
>>> model2 = LinearRegressionModel.load(model_path)
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.intercept == model2.intercept
True
>>> model.numFeatures
1
>>> model.write().format("pmml").save(model_path + "_2")
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, solver="auto", weightCol=None, aggregationDepth=2,
loss="squaredError", epsilon=1.35, blockSize=1):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
standardization=True, solver="auto", weightCol=None, aggregationDepth=2, \
loss="squaredError", epsilon=1.35, blockSize=1)
"""
super(LinearRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.LinearRegression", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, solver="auto", weightCol=None, aggregationDepth=2,
loss="squaredError", epsilon=1.35, blockSize=1):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
standardization=True, solver="auto", weightCol=None, aggregationDepth=2, \
loss="squaredError", epsilon=1.35, blockSize=1)
Sets params for linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return LinearRegressionModel(java_model)
@since("2.3.0")
def setEpsilon(self, value):
"""
Sets the value of :py:attr:`epsilon`.
"""
return self._set(epsilon=value)
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
def setElasticNetParam(self, value):
"""
Sets the value of :py:attr:`elasticNetParam`.
"""
return self._set(elasticNetParam=value)
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
def setStandardization(self, value):
"""
Sets the value of :py:attr:`standardization`.
"""
return self._set(standardization=value)
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
def setLoss(self, value):
"""
Sets the value of :py:attr:`loss`.
"""
return self._set(lossType=value)
@since("3.1.0")
def setBlockSize(self, value):
"""
Sets the value of :py:attr:`blockSize`.
"""
return self._set(blockSize=value)
class LinearRegressionModel(_JavaRegressionModel, _LinearRegressionParams, GeneralJavaMLWritable,
JavaMLReadable, HasTrainingSummary):
"""
Model fitted by :class:`LinearRegression`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("1.4.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("2.3.0")
def scale(self):
r"""
The value by which :math:`\|y - X'w\|` is scaled down when loss is "huber", otherwise 1.0.
"""
return self._call_java("scale")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. residuals, mse, r-squared ) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
return LinearRegressionTrainingSummary(super(LinearRegressionModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("2.0.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_lr_summary = self._call_java("evaluate", dataset)
return LinearRegressionSummary(java_lr_summary)
class LinearRegressionSummary(JavaWrapper):
"""
Linear regression results evaluated on a dataset.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Dataframe outputted by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def predictionCol(self):
"""
Field in "predictions" which gives the predicted value of
the label at each instance.
"""
return self._call_java("predictionCol")
@property
@since("2.0.0")
def labelCol(self):
"""
Field in "predictions" which gives the true label of each
instance.
"""
return self._call_java("labelCol")
@property
@since("2.0.0")
def featuresCol(self):
"""
Field in "predictions" which gives the features of each instance
as a vector.
"""
return self._call_java("featuresCol")
@property
@since("2.0.0")
def explainedVariance(self):
r"""
Returns the explained variance regression score.
explainedVariance = :math:`1 - \frac{variance(y - \hat{y})}{variance(y)}`
.. seealso:: `Wikipedia explain variation
<http://en.wikipedia.org/wiki/Explained_variation>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("explainedVariance")
@property
@since("2.0.0")
def meanAbsoluteError(self):
"""
Returns the mean absolute error, which is a risk function
corresponding to the expected value of the absolute error
loss or l1-norm loss.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("meanAbsoluteError")
@property
@since("2.0.0")
def meanSquaredError(self):
"""
Returns the mean squared error, which is a risk function
corresponding to the expected value of the squared error
loss or quadratic loss.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("meanSquaredError")
@property
@since("2.0.0")
def rootMeanSquaredError(self):
"""
Returns the root mean squared error, which is defined as the
square root of the mean squared error.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("rootMeanSquaredError")
@property
@since("2.0.0")
def r2(self):
"""
Returns R^2, the coefficient of determination.
.. seealso:: `Wikipedia coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("r2")
@property
@since("2.4.0")
def r2adj(self):
"""
Returns Adjusted R^2, the adjusted coefficient of determination.
.. seealso:: `Wikipedia coefficient of determination, Adjusted R^2
<https://en.wikipedia.org/wiki/Coefficient_of_determination#Adjusted_R2>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark versions.
"""
return self._call_java("r2adj")
@property
@since("2.0.0")
def residuals(self):
"""
Residuals (label - predicted value)
"""
return self._call_java("residuals")
@property
@since("2.0.0")
def numInstances(self):
"""
Number of instances in DataFrame predictions
"""
return self._call_java("numInstances")
@property
@since("2.2.0")
def degreesOfFreedom(self):
"""
Degrees of freedom.
"""
return self._call_java("degreesOfFreedom")
@property
@since("2.0.0")
def devianceResiduals(self):
"""
The weighted residuals, the usual residuals rescaled by the
square root of the instance weights.
"""
return self._call_java("devianceResiduals")
@property
@since("2.0.0")
def coefficientStandardErrors(self):
"""
Standard error of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("coefficientStandardErrors")
@property
@since("2.0.0")
def tValues(self):
"""
T-statistic of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("tValues")
@property
@since("2.0.0")
def pValues(self):
"""
Two-sided p-value of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("pValues")
@inherit_doc
class LinearRegressionTrainingSummary(LinearRegressionSummary):
"""
Linear regression training results. Currently, the training summary ignores the
training weights except for the objective trace.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def objectiveHistory(self):
"""
Objective function (scaled loss + regularization) at each
iteration.
This value is only available when using the "l-bfgs" solver.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("objectiveHistory")
@property
@since("2.0.0")
def totalIterations(self):
"""
Number of training iterations until termination.
This value is only available when using the "l-bfgs" solver.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("totalIterations")
class _IsotonicRegressionParams(HasFeaturesCol, HasLabelCol, HasPredictionCol, HasWeightCol):
"""
Params for :py:class:`IsotonicRegression` and :py:class:`IsotonicRegressionModel`.
.. versionadded:: 3.0.0
"""
isotonic = Param(
Params._dummy(), "isotonic",
"whether the output sequence should be isotonic/increasing (true) or" +
"antitonic/decreasing (false).", typeConverter=TypeConverters.toBoolean)
featureIndex = Param(
Params._dummy(), "featureIndex",
"The index of the feature if featuresCol is a vector column, no effect otherwise.",
typeConverter=TypeConverters.toInt)
def __init__(self):
super(_IsotonicRegressionParams, self).__init__()
self._setDefault(isotonic=True, featureIndex=0)
def getIsotonic(self):
"""
Gets the value of isotonic or its default value.
"""
return self.getOrDefault(self.isotonic)
def getFeatureIndex(self):
"""
Gets the value of featureIndex or its default value.
"""
return self.getOrDefault(self.featureIndex)
@inherit_doc
class IsotonicRegression(JavaEstimator, _IsotonicRegressionParams, HasWeightCol,
JavaMLWritable, JavaMLReadable):
"""
Currently implemented using parallelized pool adjacent violators algorithm.
Only univariate (single feature) algorithm supported.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> ir = IsotonicRegression()
>>> model = ir.fit(df)
>>> model.setFeaturesCol("features")
IsotonicRegressionModel...
>>> model.numFeatures
1
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> model.predict(test0.head().features[model.getFeatureIndex()])
0.0
>>> model.boundaries
DenseVector([0.0, 1.0])
>>> ir_path = temp_path + "/ir"
>>> ir.save(ir_path)
>>> ir2 = IsotonicRegression.load(ir_path)
>>> ir2.getIsotonic()
True
>>> model_path = temp_path + "/ir_model"
>>> model.save(model_path)
>>> model2 = IsotonicRegressionModel.load(model_path)
>>> model.boundaries == model2.boundaries
True
>>> model.predictions == model2.predictions
True
.. versionadded:: 1.6.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol=None, isotonic=True, featureIndex=0):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
weightCol=None, isotonic=True, featureIndex=0):
"""
super(IsotonicRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.IsotonicRegression", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol=None, isotonic=True, featureIndex=0):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
weightCol=None, isotonic=True, featureIndex=0):
Set the params for IsotonicRegression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return IsotonicRegressionModel(java_model)
def setIsotonic(self, value):
"""
Sets the value of :py:attr:`isotonic`.
"""
return self._set(isotonic=value)
def setFeatureIndex(self, value):
"""
Sets the value of :py:attr:`featureIndex`.
"""
return self._set(featureIndex=value)
@since("1.6.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("1.6.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("1.6.0")
def setLabelCol(self, value):
"""
Sets the value of :py:attr:`labelCol`.
"""
return self._set(labelCol=value)
@since("1.6.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
class IsotonicRegressionModel(JavaModel, _IsotonicRegressionParams, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by :class:`IsotonicRegression`.
.. versionadded:: 1.6.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
def setFeatureIndex(self, value):
"""
Sets the value of :py:attr:`featureIndex`.
"""
return self._set(featureIndex=value)
@property
@since("1.6.0")
def boundaries(self):
"""
Boundaries in increasing order for which predictions are known.
"""
return self._call_java("boundaries")
@property
@since("1.6.0")
def predictions(self):
"""
Predictions associated with the boundaries at the same index, monotone because of isotonic
regression.
"""
return self._call_java("predictions")
@property
@since("3.0.0")
def numFeatures(self):
"""
Returns the number of features the model was trained on. If unknown, returns -1
"""
return self._call_java("numFeatures")
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
class _DecisionTreeRegressorParams(_DecisionTreeParams, _TreeRegressorParams, HasVarianceCol):
"""
Params for :py:class:`DecisionTreeRegressor` and :py:class:`DecisionTreeRegressionModel`.
.. versionadded:: 3.0.0
"""
def __init__(self):
super(_DecisionTreeRegressorParams, self).__init__()
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", leafCol="", minWeightFractionPerNode=0.0)
@inherit_doc
class DecisionTreeRegressor(_JavaRegressor, _DecisionTreeRegressorParams, JavaMLWritable,
JavaMLReadable):
"""
`Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> dt = DecisionTreeRegressor(maxDepth=2)
>>> dt.setVarianceCol("variance")
DecisionTreeRegressor...
>>> model = dt.fit(df)
>>> model.getVarianceCol()
'variance'
>>> model.setLeafCol("leafId")
DecisionTreeRegressionModel...
>>> model.depth
1
>>> model.numNodes
3
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> model.predictLeaf(test0.head().features)
0.0
>>> result.leafId
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> dtr_path = temp_path + "/dtr"
>>> dt.save(dtr_path)
>>> dt2 = DecisionTreeRegressor.load(dtr_path)
>>> dt2.getMaxDepth()
2
>>> model_path = temp_path + "/dtr_model"
>>> model.save(model_path)
>>> model2 = DecisionTreeRegressionModel.load(model_path)
>>> model.numNodes == model2.numNodes
True
>>> model.depth == model2.depth
True
>>> model.transform(test1).head().variance
0.0
>>> df3 = spark.createDataFrame([
... (1.0, 0.2, Vectors.dense(1.0)),
... (1.0, 0.8, Vectors.dense(1.0)),
... (0.0, 1.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
>>> dt3 = DecisionTreeRegressor(maxDepth=2, weightCol="weight", varianceCol="variance")
>>> model3 = dt3.fit(df3)
>>> print(model3.toDebugString)
DecisionTreeRegressionModel...depth=1, numNodes=3...
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance",
seed=None, varianceCol=None, weightCol=None, leafCol="",
minWeightFractionPerNode=0.0):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", seed=None, varianceCol=None, weightCol=None, \
leafCol="", minWeightFractionPerNode=0.0)
"""
super(DecisionTreeRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.DecisionTreeRegressor", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", seed=None, varianceCol=None, weightCol=None,
leafCol="", minWeightFractionPerNode=0.0):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", seed=None, varianceCol=None, weightCol=None, \
leafCol="", minWeightFractionPerNode=0.0)
Sets params for the DecisionTreeRegressor.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return DecisionTreeRegressionModel(java_model)
@since("1.4.0")
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
@since("1.4.0")
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
@since("1.4.0")
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
@since("1.4.0")
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
@since("1.4.0")
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
@since("1.4.0")
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.0.0")
def setVarianceCol(self, value):
"""
Sets the value of :py:attr:`varianceCol`.
"""
return self._set(varianceCol=value)
@inherit_doc
class DecisionTreeRegressionModel(
_JavaRegressionModel, _DecisionTreeModel, _DecisionTreeRegressorParams,
JavaMLWritable, JavaMLReadable
):
"""
Model fitted by :class:`DecisionTreeRegressor`.
.. versionadded:: 1.4.0
"""
@since("3.0.0")
def setVarianceCol(self, value):
"""
Sets the value of :py:attr:`varianceCol`.
"""
return self._set(varianceCol=value)
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
This generalizes the idea of "Gini" importance to other losses,
following the explanation of Gini importance from "Random Forests" documentation
by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.
This feature importance is calculated as follows:
- importance(feature j) = sum (over nodes which split on feature j) of the gain,
where gain is scaled by the number of instances passing through node
- Normalize importances for tree to sum to 1.
.. note:: Feature importance for single decision trees can have high variance due to
correlated predictor variables. Consider using a :py:class:`RandomForestRegressor`
to determine feature importance instead.
"""
return self._call_java("featureImportances")
class _RandomForestRegressorParams(_RandomForestParams, _TreeRegressorParams):
"""
Params for :py:class:`RandomForestRegressor` and :py:class:`RandomForestRegressionModel`.
.. versionadded:: 3.0.0
"""
def __init__(self):
super(_RandomForestRegressorParams, self).__init__()
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, numTrees=20,
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0,
bootstrap=True)
@inherit_doc
class RandomForestRegressor(_JavaRegressor, _RandomForestRegressorParams, JavaMLWritable,
JavaMLReadable):
"""
`Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> rf = RandomForestRegressor(numTrees=2, maxDepth=2)
>>> rf.getMinWeightFractionPerNode()
0.0
>>> rf.setSeed(42)
RandomForestRegressor...
>>> model = rf.fit(df)
>>> model.getBootstrap()
True
>>> model.getSeed()
42
>>> model.setLeafCol("leafId")
RandomForestRegressionModel...
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 1.0])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictLeaf(test0.head().features)
DenseVector([0.0, 0.0])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.leafId
DenseVector([0.0, 0.0])
>>> model.numFeatures
1
>>> model.trees
[DecisionTreeRegressionModel...depth=..., DecisionTreeRegressionModel...]
>>> model.getNumTrees
2
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
0.5
>>> rfr_path = temp_path + "/rfr"
>>> rf.save(rfr_path)
>>> rf2 = RandomForestRegressor.load(rfr_path)
>>> rf2.getNumTrees()
2
>>> model_path = temp_path + "/rfr_model"
>>> model.save(model_path)
>>> model2 = RandomForestRegressionModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0,
weightCol=None, bootstrap=True):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
featureSubsetStrategy="auto", leafCol=", minWeightFractionPerNode=0.0", \
weightCol=None, bootstrap=True)
"""
super(RandomForestRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.RandomForestRegressor", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0,
weightCol=None, bootstrap=True):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0, \
weightCol=None, bootstrap=True)
Sets params for linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return RandomForestRegressionModel(java_model)
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setNumTrees(self, value):
"""
Sets the value of :py:attr:`numTrees`.
"""
return self._set(numTrees=value)
@since("3.0.0")
def setBootstrap(self, value):
"""
Sets the value of :py:attr:`bootstrap`.
"""
return self._set(bootstrap=value)
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
class RandomForestRegressionModel(
_JavaRegressionModel, _TreeEnsembleModel, _RandomForestRegressorParams,
JavaMLWritable, JavaMLReadable
):
"""
Model fitted by :class:`RandomForestRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`
"""
return self._call_java("featureImportances")
class _GBTRegressorParams(_GBTParams, _TreeRegressorParams):
"""
Params for :py:class:`GBTRegressor` and :py:class:`GBTRegressorModel`.
.. versionadded:: 3.0.0
"""
supportedLossTypes = ["squared", "absolute"]
lossType = Param(Params._dummy(), "lossType",
"Loss function which GBT tries to minimize (case-insensitive). " +
"Supported options: " + ", ".join(supportedLossTypes),
typeConverter=TypeConverters.toString)
def __init__(self):
super(_GBTRegressorParams, self).__init__()
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1,
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
leafCol="", minWeightFractionPerNode=0.0)
@since("1.4.0")
def getLossType(self):
"""
Gets the value of lossType or its default value.
"""
return self.getOrDefault(self.lossType)
@inherit_doc
class GBTRegressor(_JavaRegressor, _GBTRegressorParams, JavaMLWritable, JavaMLReadable):
"""
`Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> gbt = GBTRegressor(maxDepth=2, seed=42, leafCol="leafId")
>>> gbt.setMaxIter(5)
GBTRegressor...
>>> gbt.setMinWeightFractionPerNode(0.049)
GBTRegressor...
>>> gbt.getMaxIter()
5
>>> print(gbt.getImpurity())
variance
>>> print(gbt.getFeatureSubsetStrategy())
all
>>> model = gbt.fit(df)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictLeaf(test0.head().features)
DenseVector([0.0, 0.0, 0.0, 0.0, 0.0])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.leafId
DenseVector([0.0, 0.0, 0.0, 0.0, 0.0])
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> gbtr_path = temp_path + "gbtr"
>>> gbt.save(gbtr_path)
>>> gbt2 = GBTRegressor.load(gbtr_path)
>>> gbt2.getMaxDepth()
2
>>> model_path = temp_path + "gbtr_model"
>>> model.save(model_path)
>>> model2 = GBTRegressionModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.treeWeights == model2.treeWeights
True
>>> model.trees
[DecisionTreeRegressionModel...depth=..., DecisionTreeRegressionModel...]
>>> validation = spark.createDataFrame([(0.0, Vectors.dense(-1.0))],
... ["label", "features"])
>>> model.evaluateEachIteration(validation, "squared")
[0.0, 0.0, 0.0, 0.0, 0.0]
>>> gbt = gbt.setValidationIndicatorCol("validationIndicator")
>>> gbt.getValidationIndicatorCol()
'validationIndicator'
>>> gbt.getValidationTol()
0.01
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
weightCol=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
weightCol=None)
"""
super(GBTRegressor, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.regression.GBTRegressor", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
impuriy="variance", featureSubsetStrategy="all", validationTol=0.01,
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
weightCol=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, \
weightCol=None)
Sets params for Gradient Boosted Tree Regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GBTRegressionModel(java_model)
@since("1.4.0")
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
@since("1.4.0")
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
@since("1.4.0")
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
@since("1.4.0")
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
@since("1.4.0")
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
@since("1.4.0")
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setLossType(self, value):
"""
Sets the value of :py:attr:`lossType`.
"""
return self._set(lossType=value)
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
@since("3.0.0")
def setValidationIndicatorCol(self, value):
"""
Sets the value of :py:attr:`validationIndicatorCol`.
"""
return self._set(validationIndicatorCol=value)
@since("1.4.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.4.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("1.4.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("1.4.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
class GBTRegressionModel(
_JavaRegressionModel, _TreeEnsembleModel, _GBTRegressorParams,
JavaMLWritable, JavaMLReadable
):
"""
Model fitted by :class:`GBTRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@since("2.4.0")
def evaluateEachIteration(self, dataset, loss):
"""
Method to compute error or loss for every iteration of gradient boosting.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
:param loss:
The loss function used to compute error.
Supported options: squared, absolute
"""
return self._call_java("evaluateEachIteration", dataset, loss)
class _AFTSurvivalRegressionParams(_PredictorParams, HasMaxIter, HasTol, HasFitIntercept,
HasAggregationDepth, HasBlockSize):
"""
Params for :py:class:`AFTSurvivalRegression` and :py:class:`AFTSurvivalRegressionModel`.
.. versionadded:: 3.0.0
"""
censorCol = Param(
Params._dummy(), "censorCol",
"censor column name. The value of this column could be 0 or 1. " +
"If the value is 1, it means the event has occurred i.e. " +
"uncensored; otherwise censored.", typeConverter=TypeConverters.toString)
quantileProbabilities = Param(
Params._dummy(), "quantileProbabilities",
"quantile probabilities array. Values of the quantile probabilities array " +
"should be in the range (0, 1) and the array should be non-empty.",
typeConverter=TypeConverters.toListFloat)
quantilesCol = Param(
Params._dummy(), "quantilesCol",
"quantiles column name. This column will output quantiles of " +
"corresponding quantileProbabilities if it is set.",
typeConverter=TypeConverters.toString)
def __init__(self):
super(_AFTSurvivalRegressionParams, self).__init__()
self._setDefault(censorCol="censor",
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99],
maxIter=100, tol=1E-6, blockSize=1)
@since("1.6.0")
def getCensorCol(self):
"""
Gets the value of censorCol or its default value.
"""
return self.getOrDefault(self.censorCol)
@since("1.6.0")
def getQuantileProbabilities(self):
"""
Gets the value of quantileProbabilities or its default value.
"""
return self.getOrDefault(self.quantileProbabilities)
@since("1.6.0")
def getQuantilesCol(self):
"""
Gets the value of quantilesCol or its default value.
"""
return self.getOrDefault(self.quantilesCol)
@inherit_doc
class AFTSurvivalRegression(_JavaRegressor, _AFTSurvivalRegressionParams,
JavaMLWritable, JavaMLReadable):
"""
Accelerated Failure Time (AFT) Model Survival Regression
Fit a parametric AFT survival regression model based on the Weibull distribution
of the survival time.
.. seealso:: `AFT Model <https://en.wikipedia.org/wiki/Accelerated_failure_time_model>`_
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0), 1.0),
... (1e-40, Vectors.sparse(1, [], []), 0.0)], ["label", "features", "censor"])
>>> aftsr = AFTSurvivalRegression()
>>> aftsr.setMaxIter(10)
AFTSurvivalRegression...
>>> aftsr.getMaxIter()
10
>>> aftsr.clear(aftsr.maxIter)
>>> model = aftsr.fit(df)
>>> model.getBlockSize()
1
>>> model.setFeaturesCol("features")
AFTSurvivalRegressionModel...
>>> model.predict(Vectors.dense(6.3))
1.0
>>> model.predictQuantiles(Vectors.dense(6.3))
DenseVector([0.0101, 0.0513, 0.1054, 0.2877, 0.6931, 1.3863, 2.3026, 2.9957, 4.6052])
>>> model.transform(df).show()
+-------+---------+------+----------+
| label| features|censor|prediction|
+-------+---------+------+----------+
| 1.0| [1.0]| 1.0| 1.0|
|1.0E-40|(1,[],[])| 0.0| 1.0|
+-------+---------+------+----------+
...
>>> aftsr_path = temp_path + "/aftsr"
>>> aftsr.save(aftsr_path)
>>> aftsr2 = AFTSurvivalRegression.load(aftsr_path)
>>> aftsr2.getMaxIter()
100
>>> model_path = temp_path + "/aftsr_model"
>>> model.save(model_path)
>>> model2 = AFTSurvivalRegressionModel.load(model_path)
>>> model.coefficients == model2.coefficients
True
>>> model.intercept == model2.intercept
True
>>> model.scale == model2.scale
True
.. versionadded:: 1.6.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]),
quantilesCol=None, aggregationDepth=2, blockSize=1):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
quantilesCol=None, aggregationDepth=2, blockSize=1)
"""
super(AFTSurvivalRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.AFTSurvivalRegression", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]),
quantilesCol=None, aggregationDepth=2, blockSize=1):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
quantilesCol=None, aggregationDepth=2, blockSize=1):
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return AFTSurvivalRegressionModel(java_model)
@since("1.6.0")
def setCensorCol(self, value):
"""
Sets the value of :py:attr:`censorCol`.
"""
return self._set(censorCol=value)
@since("1.6.0")
def setQuantileProbabilities(self, value):
"""
Sets the value of :py:attr:`quantileProbabilities`.
"""
return self._set(quantileProbabilities=value)
@since("1.6.0")
def setQuantilesCol(self, value):
"""
Sets the value of :py:attr:`quantilesCol`.
"""
return self._set(quantilesCol=value)
@since("1.6.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.6.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("1.6.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("2.1.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
@since("3.1.0")
def setBlockSize(self, value):
"""
Sets the value of :py:attr:`blockSize`.
"""
return self._set(blockSize=value)
class AFTSurvivalRegressionModel(_JavaRegressionModel, _AFTSurvivalRegressionParams,
JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`AFTSurvivalRegression`.
.. versionadded:: 1.6.0
"""
@since("3.0.0")
def setQuantileProbabilities(self, value):
"""
Sets the value of :py:attr:`quantileProbabilities`.
"""
return self._set(quantileProbabilities=value)
@since("3.0.0")
def setQuantilesCol(self, value):
"""
Sets the value of :py:attr:`quantilesCol`.
"""
return self._set(quantilesCol=value)
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("1.6.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("1.6.0")
def scale(self):
"""
Model scale parameter.
"""
return self._call_java("scale")
@since("2.0.0")
def predictQuantiles(self, features):
"""
Predicted Quantiles
"""
return self._call_java("predictQuantiles", features)
class _GeneralizedLinearRegressionParams(_PredictorParams, HasFitIntercept, HasMaxIter,
HasTol, HasRegParam, HasWeightCol, HasSolver,
HasAggregationDepth):
"""
Params for :py:class:`GeneralizedLinearRegression` and
:py:class:`GeneralizedLinearRegressionModel`.
.. versionadded:: 3.0.0
"""
family = Param(Params._dummy(), "family", "The name of family which is a description of " +
"the error distribution to be used in the model. Supported options: " +
"gaussian (default), binomial, poisson, gamma and tweedie.",
typeConverter=TypeConverters.toString)
link = Param(Params._dummy(), "link", "The name of link function which provides the " +
"relationship between the linear predictor and the mean of the distribution " +
"function. Supported options: identity, log, inverse, logit, probit, cloglog " +
"and sqrt.", typeConverter=TypeConverters.toString)
linkPredictionCol = Param(Params._dummy(), "linkPredictionCol", "link prediction (linear " +
"predictor) column name", typeConverter=TypeConverters.toString)
variancePower = Param(Params._dummy(), "variancePower", "The power in the variance function " +
"of the Tweedie distribution which characterizes the relationship " +
"between the variance and mean of the distribution. Only applicable " +
"for the Tweedie family. Supported values: 0 and [1, Inf).",
typeConverter=TypeConverters.toFloat)
linkPower = Param(Params._dummy(), "linkPower", "The index in the power link function. " +
"Only applicable to the Tweedie family.",
typeConverter=TypeConverters.toFloat)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: irls.", typeConverter=TypeConverters.toString)
offsetCol = Param(Params._dummy(), "offsetCol", "The offset column name. If this is not set " +
"or empty, we treat all instance offsets as 0.0",
typeConverter=TypeConverters.toString)
def __init__(self):
super(_GeneralizedLinearRegressionParams, self).__init__()
self._setDefault(family="gaussian", maxIter=25, tol=1e-6, regParam=0.0, solver="irls",
variancePower=0.0, aggregationDepth=2)
@since("2.0.0")
def getFamily(self):
"""
Gets the value of family or its default value.
"""
return self.getOrDefault(self.family)
@since("2.0.0")
def getLinkPredictionCol(self):
"""
Gets the value of linkPredictionCol or its default value.
"""
return self.getOrDefault(self.linkPredictionCol)
@since("2.0.0")
def getLink(self):
"""
Gets the value of link or its default value.
"""
return self.getOrDefault(self.link)
@since("2.2.0")
def getVariancePower(self):
"""
Gets the value of variancePower or its default value.
"""
return self.getOrDefault(self.variancePower)
@since("2.2.0")
def getLinkPower(self):
"""
Gets the value of linkPower or its default value.
"""
return self.getOrDefault(self.linkPower)
@since("2.3.0")
def getOffsetCol(self):
"""
Gets the value of offsetCol or its default value.
"""
return self.getOrDefault(self.offsetCol)
@inherit_doc
class GeneralizedLinearRegression(_JavaRegressor, _GeneralizedLinearRegressionParams,
JavaMLWritable, JavaMLReadable):
"""
Generalized Linear Regression.
Fit a Generalized Linear Model specified by giving a symbolic description of the linear
predictor (link function) and a description of the error distribution (family). It supports
"gaussian", "binomial", "poisson", "gamma" and "tweedie" as family. Valid link functions for
each family is listed below. The first link function of each family is the default one.
* "gaussian" -> "identity", "log", "inverse"
* "binomial" -> "logit", "probit", "cloglog"
* "poisson" -> "log", "identity", "sqrt"
* "gamma" -> "inverse", "identity", "log"
* "tweedie" -> power link function specified through "linkPower". \
The default link power in the tweedie family is 1 - variancePower.
.. seealso:: `GLM <https://en.wikipedia.org/wiki/Generalized_linear_model>`_
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(0.0, 0.0)),
... (1.0, Vectors.dense(1.0, 2.0)),
... (2.0, Vectors.dense(0.0, 0.0)),
... (2.0, Vectors.dense(1.0, 1.0)),], ["label", "features"])
>>> glr = GeneralizedLinearRegression(family="gaussian", link="identity", linkPredictionCol="p")
>>> glr.setRegParam(0.1)
GeneralizedLinearRegression...
>>> glr.getRegParam()
0.1
>>> glr.clear(glr.regParam)
>>> glr.setMaxIter(10)
GeneralizedLinearRegression...
>>> glr.getMaxIter()
10
>>> glr.clear(glr.maxIter)
>>> model = glr.fit(df)
>>> model.setFeaturesCol("features")
GeneralizedLinearRegressionModel...
>>> model.getMaxIter()
25
>>> model.getAggregationDepth()
2
>>> transformed = model.transform(df)
>>> abs(transformed.head().prediction - 1.5) < 0.001
True
>>> abs(transformed.head().p - 1.5) < 0.001
True
>>> model.coefficients
DenseVector([1.5..., -1.0...])
>>> model.numFeatures
2
>>> abs(model.intercept - 1.5) < 0.001
True
>>> glr_path = temp_path + "/glr"
>>> glr.save(glr_path)
>>> glr2 = GeneralizedLinearRegression.load(glr_path)
>>> glr.getFamily() == glr2.getFamily()
True
>>> model_path = temp_path + "/glr_model"
>>> model.save(model_path)
>>> model2 = GeneralizedLinearRegressionModel.load(model_path)
>>> model.intercept == model2.intercept
True
>>> model.coefficients[0] == model2.coefficients[0]
True
.. versionadded:: 2.0.0
"""
@keyword_only
def __init__(self, labelCol="label", featuresCol="features", predictionCol="prediction",
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None,
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2):
"""
__init__(self, labelCol="label", featuresCol="features", predictionCol="prediction", \
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, \
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2)
"""
super(GeneralizedLinearRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.GeneralizedLinearRegression", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction",
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None,
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2):
"""
setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction", \
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, \
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2)
Sets params for generalized linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GeneralizedLinearRegressionModel(java_model)
@since("2.0.0")
def setFamily(self, value):
"""
Sets the value of :py:attr:`family`.
"""
return self._set(family=value)
@since("2.0.0")
def setLinkPredictionCol(self, value):
"""
Sets the value of :py:attr:`linkPredictionCol`.
"""
return self._set(linkPredictionCol=value)
@since("2.0.0")
def setLink(self, value):
"""
Sets the value of :py:attr:`link`.
"""
return self._set(link=value)
@since("2.2.0")
def setVariancePower(self, value):
"""
Sets the value of :py:attr:`variancePower`.
"""
return self._set(variancePower=value)
@since("2.2.0")
def setLinkPower(self, value):
"""
Sets the value of :py:attr:`linkPower`.
"""
return self._set(linkPower=value)
@since("2.3.0")
def setOffsetCol(self, value):
"""
Sets the value of :py:attr:`offsetCol`.
"""
return self._set(offsetCol=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
@since("2.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("2.0.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("2.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.0.0")
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
@since("3.0.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
class GeneralizedLinearRegressionModel(_JavaRegressionModel, _GeneralizedLinearRegressionParams,
JavaMLWritable, JavaMLReadable, HasTrainingSummary):
"""
Model fitted by :class:`GeneralizedLinearRegression`.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setLinkPredictionCol(self, value):
"""
Sets the value of :py:attr:`linkPredictionCol`.
"""
return self._set(linkPredictionCol=value)
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("2.0.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. residuals, deviance, pValues) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
return GeneralizedLinearRegressionTrainingSummary(
super(GeneralizedLinearRegressionModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("2.0.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_glr_summary = self._call_java("evaluate", dataset)
return GeneralizedLinearRegressionSummary(java_glr_summary)
class GeneralizedLinearRegressionSummary(JavaWrapper):
"""
Generalized linear regression results evaluated on a dataset.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Predictions output by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def predictionCol(self):
"""
Field in :py:attr:`predictions` which gives the predicted value of each instance.
This is set to a new column name if the original model's `predictionCol` is not set.
"""
return self._call_java("predictionCol")
@property
@since("2.2.0")
def numInstances(self):
"""
Number of instances in DataFrame predictions.
"""
return self._call_java("numInstances")
@property
@since("2.0.0")
def rank(self):
"""
The numeric rank of the fitted linear model.
"""
return self._call_java("rank")
@property
@since("2.0.0")
def degreesOfFreedom(self):
"""
Degrees of freedom.
"""
return self._call_java("degreesOfFreedom")
@property
@since("2.0.0")
def residualDegreeOfFreedom(self):
"""
The residual degrees of freedom.
"""
return self._call_java("residualDegreeOfFreedom")
@property
@since("2.0.0")
def residualDegreeOfFreedomNull(self):
"""
The residual degrees of freedom for the null model.
"""
return self._call_java("residualDegreeOfFreedomNull")
@since("2.0.0")
def residuals(self, residualsType="deviance"):
"""
Get the residuals of the fitted model by type.
:param residualsType: The type of residuals which should be returned.
Supported options: deviance (default), pearson, working, and response.
"""
return self._call_java("residuals", residualsType)
@property
@since("2.0.0")
def nullDeviance(self):
"""
The deviance for the null model.
"""
return self._call_java("nullDeviance")
@property
@since("2.0.0")
def deviance(self):
"""
The deviance for the fitted model.
"""
return self._call_java("deviance")
@property
@since("2.0.0")
def dispersion(self):
"""
The dispersion of the fitted model.
It is taken as 1.0 for the "binomial" and "poisson" families, and otherwise
estimated by the residual Pearson's Chi-Squared statistic (which is defined as
sum of the squares of the Pearson residuals) divided by the residual degrees of freedom.
"""
return self._call_java("dispersion")
@property
@since("2.0.0")
def aic(self):
"""
Akaike's "An Information Criterion"(AIC) for the fitted model.
"""
return self._call_java("aic")
@inherit_doc
class GeneralizedLinearRegressionTrainingSummary(GeneralizedLinearRegressionSummary):
"""
Generalized linear regression training results.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def numIterations(self):
"""
Number of training iterations.
"""
return self._call_java("numIterations")
@property
@since("2.0.0")
def solver(self):
"""
The numeric solver used for training.
"""
return self._call_java("solver")
@property
@since("2.0.0")
def coefficientStandardErrors(self):
"""
Standard error of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("coefficientStandardErrors")
@property
@since("2.0.0")
def tValues(self):
"""
T-statistic of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("tValues")
@property
@since("2.0.0")
def pValues(self):
"""
Two-sided p-value of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("pValues")
def __repr__(self):
return self._call_java("toString")
class _FactorizationMachinesParams(_PredictorParams, HasMaxIter, HasStepSize, HasTol,
HasSolver, HasSeed, HasFitIntercept, HasRegParam):
"""
Params for :py:class:`FMRegressor`, :py:class:`FMRegressionModel`, :py:class:`FMClassifier`
and :py:class:`FMClassifierModel`.
.. versionadded:: 3.0.0
"""
factorSize = Param(Params._dummy(), "factorSize", "Dimensionality of the factor vectors, " +
"which are used to get pairwise interactions between variables",
typeConverter=TypeConverters.toInt)
fitLinear = Param(Params._dummy(), "fitLinear", "whether to fit linear term (aka 1-way term)",
typeConverter=TypeConverters.toBoolean)
miniBatchFraction = Param(Params._dummy(), "miniBatchFraction", "fraction of the input data " +
"set that should be used for one iteration of gradient descent",
typeConverter=TypeConverters.toFloat)
initStd = Param(Params._dummy(), "initStd", "standard deviation of initial coefficients",
typeConverter=TypeConverters.toFloat)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: gd, adamW. (Default adamW)", typeConverter=TypeConverters.toString)
def __init__(self):
super(_FactorizationMachinesParams, self).__init__()
self._setDefault(factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW")
@since("3.0.0")
def getFactorSize(self):
"""
Gets the value of factorSize or its default value.
"""
return self.getOrDefault(self.factorSize)
@since("3.0.0")
def getFitLinear(self):
"""
Gets the value of fitLinear or its default value.
"""
return self.getOrDefault(self.fitLinear)
@since("3.0.0")
def getMiniBatchFraction(self):
"""
Gets the value of miniBatchFraction or its default value.
"""
return self.getOrDefault(self.miniBatchFraction)
@since("3.0.0")
def getInitStd(self):
"""
Gets the value of initStd or its default value.
"""
return self.getOrDefault(self.initStd)
@inherit_doc
class FMRegressor(_JavaRegressor, _FactorizationMachinesParams, JavaMLWritable, JavaMLReadable):
"""
Factorization Machines learning algorithm for regression.
solver Supports:
* gd (normal mini-batch gradient descent)
* adamW (default)
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.regression import FMRegressor
>>> df = spark.createDataFrame([
... (2.0, Vectors.dense(2.0)),
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>>
>>> fm = FMRegressor(factorSize=2)
>>> fm.setSeed(16)
FMRegressor...
>>> model = fm.fit(df)
>>> model.getMaxIter()
100
>>> test0 = spark.createDataFrame([
... (Vectors.dense(-2.0),),
... (Vectors.dense(0.5),),
... (Vectors.dense(1.0),),
... (Vectors.dense(4.0),)], ["features"])
>>> model.transform(test0).show(10, False)
+--------+-------------------+
|features|prediction |
+--------+-------------------+
|[-2.0] |-1.9989237712341565|
|[0.5] |0.4956682219523814 |
|[1.0] |0.994586620589689 |
|[4.0] |3.9880970124135344 |
+--------+-------------------+
...
>>> model.intercept
-0.0032501766849261557
>>> model.linear
DenseVector([0.9978])
>>> model.factors
DenseMatrix(1, 2, [0.0173, 0.0021], 1)
.. versionadded:: 3.0.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW", seed=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \
tol=1e-6, solver="adamW", seed=None)
"""
super(FMRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.FMRegressor", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("3.0.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW", seed=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \
tol=1e-6, solver="adamW", seed=None)
Sets Params for FMRegressor.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return FMRegressionModel(java_model)
@since("3.0.0")
def setFactorSize(self, value):
"""
Sets the value of :py:attr:`factorSize`.
"""
return self._set(factorSize=value)
@since("3.0.0")
def setFitLinear(self, value):
"""
Sets the value of :py:attr:`fitLinear`.
"""
return self._set(fitLinear=value)
@since("3.0.0")
def setMiniBatchFraction(self, value):
"""
Sets the value of :py:attr:`miniBatchFraction`.
"""
return self._set(miniBatchFraction=value)
@since("3.0.0")
def setInitStd(self, value):
"""
Sets the value of :py:attr:`initStd`.
"""
return self._set(initStd=value)
@since("3.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("3.0.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("3.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
@since("3.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("3.0.0")
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
class FMRegressionModel(_JavaRegressionModel, _FactorizationMachinesParams, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by :class:`FMRegressor`.
.. versionadded:: 3.0.0
"""
@property
@since("3.0.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("3.0.0")
def linear(self):
"""
Model linear term.
"""
return self._call_java("linear")
@property
@since("3.0.0")
def factors(self):
"""
Model factor term.
"""
return self._call_java("factors")
if __name__ == "__main__":
import doctest
import pyspark.ml.regression
from pyspark.sql import SparkSession
globs = pyspark.ml.regression.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.regression tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
|
dbtsai/spark
|
python/pyspark/ml/regression.py
|
Python
|
apache-2.0
| 89,077
|
[
"Gaussian"
] |
8713d484fe83b5eab74295e2823d5903f0678af6b3e2cd2c0397a5d176626c0d
|
import numpy
import os.path
import sys
try:
import vtk
from vtk.util import numpy_support
except:
pass
class VelocityModel:
''' Subsurface model object. Used to read, write and manage model data.
'''
def __init__(self, filename=None, vp=None, subsample=1):
self.filename = filename
self.vp = vp
if filename:
self.read(filename=filename, subsample=subsample)
def create_model(self, origin, spacing, vp):
self.vp = vp
self.spacing = spacing
self.origin = origin
def get_critical_dt(self):
if self.vp is None:
self.read()
return 0.5 * self.spacing[0] / (numpy.max(self.vp))
def get_spacing(self):
if self.vp is None:
self.read()
return self.spacing[0]
def set_origin(self, shift):
norig = len(self.origin)
aux = []
for i in range(0, norig):
aux.append(self.origin[i] - shift * self.spacing[i])
self.origin = aux
def get_model(self):
if self.vp is None:
self.read()
return self.vp
def get_shape(self):
return self.vp.shape
def read(self, filename=None, subsample=1):
if filename:
self.filename = filename
if not os.path.isfile(self.filename):
raise IOError('Cannot find model file: '+self.filename)
try:
self.vp = None
rsf = open(self.filename, 'r')
header = {}
data_types = {'real': numpy.float32}
for line in rsf:
if '=' in line:
# Standard specifies that no spaces are
# allowed between the key, value and the = sign,
# therefore,
for pairs in line.split():
key, value = pairs.split('=')
header[key] = value
elif '\014\014\004' in line[:3]:
self.vp = numpy.fromfile(
line[3:],
dtype=data_types[header['data_type']]
)
rsf.close()
if self.vp is None and 'in' in header.keys():
datafile = os.path.join(os.path.dirname(self.filename),
header['in'])
if not os.path.isfile(datafile):
raise IOError('Cannot find model file: '+datafile)
self.vp = numpy.fromfile(datafile,
dtype=data_types[header['data_type']])
except:
NotImplementedError("Failed to read model file: "+self.filename)
if len(header.keys()) == 0:
raise IOError('This does not look a valid RSF file: ' +
self.filename)
shape = []
self.spacing = []
self.origin = []
dim = 3
for i in (1, 2, 3):
if 'n%d' % i in header.keys():
if i == 3 and int(header['n%d' % i]) == 1:
dim = 2
break
shape.append(int(header['n%d' % i]))
self.spacing.append(float(header['d%d' % i]))
self.origin.append(float(header['o%d' % i]))
shape = list(reversed(shape))
self.spacing = list(reversed(self.spacing))
self.origin = list(reversed(self.origin))
self.vp = numpy.reshape(self.vp, shape)
if subsample > 1:
if dim == 2:
self.vp = self.vp[::subsample, ::subsample]
else:
self.vp = self.vp[::subsample, ::subsample, ::subsample]
for i in range(dim):
self.spacing[i] = self.spacing[i]*subsample
# Heuristic to check sanity of velocity model.
# For example, if velocity is in m/s or km/3 (want km/s).
min_vp = self.vp.min()
if min_vp > 1000.0:
self.vp = self.vp*1.0e-3
min_vp = min_vp**1.0e-3
max_vp = self.vp.max()
if min_vp < 1.0 or max_vp > 12.0:
raise IOError("Velocity model doesn't look like" +
"a velocity model - exotic values...")
if min(self.spacing) != max(self.spacing):
raise IOError("Velocity model has anisotropic" +
"spacing - not supported.")
return
def write(self, filename):
rsf = open(filename, 'w')
if len(self.vp.shape) == 2:
header = 'n1=%d d1=%f o1=%f\n' % (self.vp.shape[1],
self.spacing[1],
self.origin[1]) + \
'n2=%d d2=%f o2=%f\n' % (self.vp.shape[0],
self.spacing[0],
self.origin[0]) + \
'n3=1 d3=%f o3=%f\n' % (self.spacing[0],
self.origin[0])
else:
header = 'n1=%d d1=%f o1=%f\n' % (self.vp.shape[2],
self.spacing[2],
self.origin[2]) + \
'n2=%d d2=%f o2=%f\n' % (self.vp.shape[1],
self.spacing[1],
self.origin[1]) + \
'n3=%d d3=%f o3=%f\n' % (self.vp.shape[0],
self.spacing[0],
self.origin[0])
header = header + "data_format=native_float\n" + \
"data_type=real\n" + \
"in=%s@\n" % filename
rsf.write(header)
rsf.close()
self.vp.tofile(filename+"@")
def export_vtk(self, filename):
if 'vtk' in sys.modules:
vtp = vtk.vtkStructuredPoints()
if len(self.vp.shape) == 2:
vtp.SetDimensions(self.vp.shape+(1, ))
vtp.SetOrigin(self.origin+[0.0, ])
vtp.SetSpacing(self.spacing+[self.spacing[0], ])
else:
vtp.SetDimensions(self.vp.shape.reverse())
vtp.SetOrigin(self.origin.reverse())
vtp.SetSpacing(self.spacing.reverse())
flip_vp = numpy.transpose(numpy.fliplr(self.vp)).ravel()
vp = numpy_support.numpy_to_vtk(num_array=flip_vp, deep=True,
array_type=vtk.VTK_FLOAT)
vp.SetName("Vp")
vtp.GetPointData().AddArray(vp)
writer = vtk.vtkStructuredPointsWriter()
try:
writer.SetInput(vtp)
except:
writer.SetInputData(vtp)
writer.SetFileName(filename)
writer.SetFileTypeToBinary()
writer.Write()
else:
print "WARNING: VTK support not available."
|
opesci/inversion
|
python/VelocityModel.py
|
Python
|
bsd-3-clause
| 6,993
|
[
"VTK"
] |
9ce713d3711fcc734298d5167014a2239c6036635cc5e960addb6063c9eaacff
|
# -*- coding:utf-8 -*-
"""
TINY PORT of https://github.com/paularmstrong/normalizr
"""
class Schema(object):
def normalize(self, value, parent, key, schema, addEntity, visit):
assert 0, 'not implemented'
def denormalize(self, inputData, unvisit):
assert 0, 'not implemented'
class ArraySchema(object):
def __init__(self, entity):
self.entity = entity
def normalize(self, inputData, parent, key, schema, addEntity, visit):
assert isinstance(inputData, (list, tuple))
result = []
for v in inputData:
result.append(
visit(v, parent, key, self.entity, addEntity)
)
return result
def denormalize(self, inputData, unvisit):
assert isinstance(inputData, (list, tuple))
result = []
for value in inputData:
result.append(
unvisit(value, self.entity)
)
return result
class Entity(Schema):
def __init__(self, key, klass, definition=None):
self.key = key
self.klass = klass
self.schema = definition or {}
def getId(self, inputData):
return inputData['id']
def normalize(self, inputData, parent, key, schema, addEntity, visit):
assert isinstance(inputData, dict) or inputData is None
if inputData is None:
return None
inputData = inputData.copy()
for subkey, subschema in self.schema.items():
if subkey in inputData:
inputData[subkey] = visit(inputData[subkey], inputData, subkey, subschema, addEntity)
valueId = self.getId(inputData)
addEntity(schema, self.getId(inputData), self.klass(**inputData))
return valueId
def denormalize(self, inputData, unvisit):
assert isinstance(inputData, dict)
for subkey, subschema in self.schema.items():
if subkey not in inputData:
inputData[subkey] = None
else:
inputData[subkey] = unvisit(inputData[subkey], subschema)
return inputData
def normalize(inputData, schema):
entities = {}
def addEntity(schema, valueId, value):
entities.setdefault(schema.key, {})[valueId] = value
return entities, visit(inputData, None, None, schema, addEntity)
def denormalize(inputData, schema, entities):
def _unvisit(inputData, schema):
schema = schemaize(schema)
if isinstance(schema, Entity):
if inputData is None:
return None
entity = entities[schema.key][inputData].to_dict()
return schema.denormalize(entity, _unvisit)
else:
return schema.denormalize(inputData, _unvisit)
return _unvisit(inputData, schema)
def visit(value, parent, key, schema, addEntity):
schema = schemaize(schema)
return schema.normalize(value, parent, key, schema, addEntity, visit)
def schemaize(schema):
if not hasattr(schema, 'normalize'):
if isinstance(schema, list):
assert len(schema) == 1
schema = ArraySchema(schema[0])
return schema
|
glucoseinc/naumanni-server
|
naumanni/normalizr.py
|
Python
|
agpl-3.0
| 3,134
|
[
"VisIt"
] |
c7453a608122e381476cac6735d6a3d60542ac2959d387cc5df3dc9d6c6718f7
|
"""
Given a list of airline tickets represented by pairs of departure and arrival airports [from, to], reconstruct the itinerary in order. All of the tickets belong to a man who departs from JFK. Thus, the itinerary must begin with JFK.
Note:
If there are multiple valid itineraries, you should return the itinerary that has the smallest lexical order when read as a single string. For example, the itinerary ["JFK", "LGA"] has a smaller lexical order than ["JFK", "LGB"].
All airports are represented by three capital letters (IATA code).
You may assume all tickets form at least one valid itinerary.
One must use all the tickets once and only once.
Example 1:
Input: [["MUC", "LHR"], ["JFK", "MUC"], ["SFO", "SJC"], ["LHR", "SFO"]]
Output: ["JFK", "MUC", "LHR", "SFO", "SJC"]
Example 2:
Input: [["JFK","SFO"],["JFK","ATL"],["SFO","ATL"],["ATL","JFK"],["ATL","SFO"]]
Output: ["JFK","ATL","JFK","SFO","ATL","SFO"]
Explanation: Another possible reconstruction is ["JFK","SFO","ATL","JFK","ATL","SFO"].
But it is larger in lexical order.
"""
class Solution:
def findItinerary(self, tickets: List[List[str]]) -> List[str]:
import collections
targets = collections.defaultdict(list)
for a, b in sorted(tickets)[::-1]:
targets[a] += b,
route = []
def visit(airport):
while targets[airport]:
visit(targets[airport].pop())
route.append(airport)
visit('JFK')
return route[::-1]
class Solution:
def findItinerary(self, tickets: List[List[str]]) -> List[str]:
graph = defaultdict(list)
for st, ed in tickets:
graph[st].append(ed)
for st in graph:
graph[st].sort()
final_path = []
def visit(curr, path):
if final_path:
return
path.append(curr)
if not graph:
for p in path:
final_path.append(p)
return
nes = graph.get(curr, [])
for ne in nes:
idx = graph[curr].index(ne)
graph[curr].remove(ne)
if len(graph[curr]) == 0:
del graph[curr]
visit(ne, path)
graph[curr].insert(idx, ne)
path.pop()
visit('JFK', [])
return final_path
|
franklingu/leetcode-solutions
|
questions/reconstruct-itinerary/Solution.py
|
Python
|
mit
| 2,419
|
[
"VisIt"
] |
a825b7dff97a26a1ddeafedf5aca037dfcf32a0717a80ff816c4d50eb7e5dea1
|
# -*- coding: utf-8 -*-
import os
import re
import zipfile
from django import forms
from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import reverse
from mock import Mock, patch
from nose.tools import eq_
from mkt.files.helpers import FileViewer, DiffHelper
from mkt.files.utils import SafeUnzip
from mkt.site.storage_utils import (copy_stored_file, local_storage,
private_storage, storage_is_remote)
from mkt.site.tests import MktPaths, TestCase
root = os.path.join(settings.ROOT, 'mkt/files/fixtures/files')
def get_file(x):
return '%s/%s' % (root, x)
def make_file(pk, file_path, **kwargs):
obj = Mock()
obj.id = pk
for k, v in kwargs.items():
setattr(obj, k, v)
obj.file_path = file_path
obj.__str__ = lambda x: x.pk
obj.version = Mock()
obj.version.version = 1
return obj
# TODO: It'd be nice if these used packaged app examples but these addons still
# flex the code so it wasn't converted.
class TestFileHelper(TestCase):
def setUp(self):
fn = get_file('dictionary-test.xpi')
if storage_is_remote():
copy_stored_file(
fn, fn,
src_storage=local_storage, dst_storage=private_storage)
self.viewer = FileViewer(make_file(1, fn))
def tearDown(self):
self.viewer.cleanup()
def test_files_not_extracted(self):
eq_(self.viewer.is_extracted(), False)
def test_files_extracted(self):
self.viewer.extract()
eq_(self.viewer.is_extracted(), True)
def test_cleanup(self):
self.viewer.extract()
self.viewer.cleanup()
eq_(self.viewer.is_extracted(), False)
def test_truncate(self):
truncate = self.viewer.truncate
for x, y in (['foo.rdf', 'foo.rdf'],
['somelongfilename.rdf', 'somelongfilenam...rdf'],
[u'unicode삮.txt', u'unicode\uc0ae.txt'],
[u'unicodesomelong삮.txt', u'unicodesomelong...txt'],
['somelongfilename.somelongextension',
'somelongfilenam...somelonge..'],):
eq_(truncate(x), y)
def test_get_files_not_extracted(self):
assert not self.viewer.get_files()
def test_get_files_size(self):
self.viewer.extract()
files = self.viewer.get_files()
eq_(len(files), 15)
def test_get_files_directory(self):
self.viewer.extract()
files = self.viewer.get_files()
eq_(files['install.js']['directory'], False)
eq_(files['install.js']['binary'], False)
eq_(files['__MACOSX']['directory'], True)
eq_(files['__MACOSX']['binary'], False)
def test_url_file(self):
self.viewer.extract()
files = self.viewer.get_files()
url = reverse('mkt.files.list', args=[self.viewer.file.id, 'file',
'install.js'])
assert files['install.js']['url'].endswith(url)
def test_get_files_depth(self):
self.viewer.extract()
files = self.viewer.get_files()
eq_(files['dictionaries/license.txt']['depth'], 1)
def test_bom(self):
dest = os.path.join(settings.TMP_PATH, 'test_bom')
with private_storage.open(dest, 'w') as f:
f.write('foo'.encode('utf-16'))
self.viewer.select('foo')
self.viewer.selected = {'full': dest, 'size': 1}
eq_(self.viewer.read_file(), u'foo')
private_storage.delete(dest)
def test_syntax(self):
for filename, syntax in [('foo.rdf', 'xml'),
('foo.xul', 'xml'),
('foo.json', 'js'),
('foo.jsm', 'js'),
('foo.js', 'js'),
('manifest.webapp', 'js'),
('foo.html', 'html'),
('foo.css', 'css'),
('foo.bar', 'plain')]:
eq_(self.viewer.get_syntax(filename), syntax)
def test_file_order(self):
self.viewer.extract()
dest = self.viewer.dest
private_storage.open(os.path.join(dest, 'manifest.webapp'),
'w').close()
subdir = os.path.join(dest, 'chrome')
with private_storage.open(os.path.join(subdir, 'foo'), 'w') as f:
f.write('.')
if not private_storage.exists(subdir):
# Might be on S3, which doesn't have directories (and
# django-storages doesn't support empty files).
with private_storage.open(subdir, 'w') as f:
f.write('.')
cache.clear()
files = self.viewer.get_files().keys()
rt = files.index(u'chrome')
eq_(files[rt:rt + 3], [u'chrome', u'chrome/foo', u'dictionaries'])
@patch.object(settings, 'FILE_VIEWER_SIZE_LIMIT', 5)
def test_file_size(self):
self.viewer.extract()
self.viewer.get_files()
self.viewer.select('install.js')
res = self.viewer.read_file()
eq_(res, '')
assert self.viewer.selected['msg'].startswith('File size is')
@patch.object(settings, 'FILE_VIEWER_SIZE_LIMIT', 5)
def test_file_size_unicode(self):
with self.activate(locale='he'):
self.viewer.extract()
self.viewer.get_files()
self.viewer.select('install.js')
res = self.viewer.read_file()
eq_(res, '')
assert self.viewer.selected['msg'].startswith('File size is')
@patch.object(settings, 'FILE_UNZIP_SIZE_LIMIT', 5)
def test_contents_size(self):
self.assertRaises(forms.ValidationError, self.viewer.extract)
def test_default(self):
eq_(self.viewer.get_default(None), 'manifest.webapp')
def test_delete_mid_read(self):
self.viewer.extract()
self.viewer.select('install.js')
private_storage.delete(os.path.join(self.viewer.dest, 'install.js'))
res = self.viewer.read_file()
eq_(res, '')
assert self.viewer.selected['msg'].startswith('That file no')
@patch('mkt.files.helpers.get_md5')
def test_delete_mid_tree(self, get_md5):
get_md5.side_effect = IOError('ow')
self.viewer.extract()
eq_({}, self.viewer.get_files())
class TestDiffHelper(TestCase, MktPaths):
def setUp(self):
src = self.packaged_app_path('signed.zip')
if storage_is_remote():
copy_stored_file(
src, src,
src_storage=local_storage, dst_storage=private_storage)
self.helper = DiffHelper(make_file(1, src), make_file(2, src))
def tearDown(self):
self.helper.cleanup()
if storage_is_remote():
private_storage.delete(self.packaged_app_path('signed.zip'))
def test_files_not_extracted(self):
eq_(self.helper.is_extracted(), False)
def test_files_extracted(self):
self.helper.extract()
eq_(self.helper.is_extracted(), True)
def test_get_files(self):
eq_(self.helper.left.get_files(),
self.helper.get_files())
def test_diffable(self):
self.helper.extract()
self.helper.select('index.html')
assert self.helper.is_diffable()
def test_diffable_one_missing(self):
self.helper.extract()
private_storage.delete(os.path.join(self.helper.right.dest,
'index.html'))
self.helper.select('index.html')
assert self.helper.is_diffable()
def test_diffable_allow_empty(self):
self.helper.extract()
self.assertRaises(AssertionError, self.helper.right.read_file)
eq_(self.helper.right.read_file(allow_empty=True), '')
def test_diffable_both_missing(self):
self.helper.extract()
self.helper.select('foo.js')
assert not self.helper.is_diffable()
def test_diffable_deleted_files(self):
self.helper.extract()
private_storage.delete(os.path.join(self.helper.left.dest,
'index.html'))
eq_('index.html' in self.helper.get_deleted_files(), True)
def test_diffable_one_binary_same(self):
self.helper.extract()
self.helper.select('main.js')
self.helper.left.selected['binary'] = True
assert self.helper.is_binary()
def test_diffable_one_binary_diff(self):
self.helper.extract()
self.change(self.helper.left.dest, 'asd')
cache.clear()
self.helper.select('main.js')
self.helper.left.selected['binary'] = True
assert self.helper.is_binary()
def test_diffable_two_binary_diff(self):
self.helper.extract()
self.change(self.helper.left.dest, 'asd')
self.change(self.helper.right.dest, 'asd123')
cache.clear()
self.helper.select('main.js')
self.helper.left.selected['binary'] = True
self.helper.right.selected['binary'] = True
assert self.helper.is_binary()
def test_diffable_one_directory(self):
self.helper.extract()
self.helper.select('main.js')
self.helper.left.selected['directory'] = True
assert not self.helper.is_diffable()
assert self.helper.left.selected['msg'].startswith('This file')
def test_diffable_parent(self):
self.helper.extract()
self.change(self.helper.left.dest, 'asd',
filename='META-INF/ids.json')
cache.clear()
files = self.helper.get_files()
eq_(files['META-INF/ids.json']['diff'], True)
eq_(files['META-INF']['diff'], True)
def change(self, file, text, filename='main.js'):
path = os.path.join(file, filename)
data = private_storage.open(path, 'r').read()
data += text
with private_storage.open(path, 'w') as f:
f.write(data)
class TestSafeUnzipFile(TestCase, MktPaths):
# TODO(andym): get full coverage for existing SafeUnzip methods, most
# is covered in the file viewer tests.
@patch.object(settings, 'FILE_UNZIP_SIZE_LIMIT', 5)
def test_unzip_limit(self):
zip = SafeUnzip(self.packaged_app_path('full-tpa.zip'))
self.assertRaises(forms.ValidationError, zip.is_valid)
def test_unzip_fatal(self):
zip = SafeUnzip(self.manifest_path('mozball.webapp'))
self.assertRaises(zipfile.BadZipfile, zip.is_valid)
def test_unzip_not_fatal(self):
zip = SafeUnzip(self.manifest_path('mozball.webapp'))
assert not zip.is_valid(fatal=False)
def test_extract_path(self):
zip = SafeUnzip(self.packaged_app_path('mozball.zip'))
assert zip.is_valid()
desc_string = '"description": "Exciting Open Web development action!"'
assert desc_string in zip.extract_path('manifest.webapp')
def test_not_secure(self):
zip = SafeUnzip(self.packaged_app_path('mozball.zip'))
zip.is_valid()
assert not zip.is_signed()
def test_is_secure(self):
zip = SafeUnzip(self.packaged_app_path('signed.zip'))
zip.is_valid()
assert zip.is_signed()
def test_is_broken(self):
zip = SafeUnzip(self.packaged_app_path('signed.zip'))
zip.is_valid()
sf_re = re.compile('^META\-INF/(\w+)\.sf$')
for info in zip.info:
if sf_re.match(info.filename):
info.filename = 'META-INF/foo.foo'
break
assert not zip.is_signed()
|
tsl143/zamboni
|
mkt/files/tests/test_helpers.py
|
Python
|
bsd-3-clause
| 11,592
|
[
"exciting"
] |
2f3c93174703eeab76bbeda06f9fb804e6b07ce3355560353d453cabdf3af5af
|
""" This is a test of using SandboxStoreClient in the WMS
In order to run this test we need the following DBs installed:
- SandboxMetadataDB
And the following services should also be on:
- SandboxStore
And a SandboxSE should be configured, something like:
SandboxStore
{
LocalSE = FedericoSandboxSE
Port = 9196
BasePath = /home/toffo/Rumenta/
Authorization
{
Default = authenticated
FileTransfer
{
Default = all
}
}
}
A user proxy is also needed to submit,
and the Framework/ProxyManager need to be running with a such user proxy already uploaded.
Suggestion: for local testing, run this with::
python -m pytest -c ../pytest.ini -vv tests/Integration/WorkloadManagementSystem/Test_SandboxStoreClient.py
"""
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC.tests.Utilities.utils import find_all
from DIRAC import gLogger
from DIRAC.WorkloadManagementSystem.Client.SandboxStoreClient import SandboxStoreClient
from DIRAC.WorkloadManagementSystem.DB.SandboxMetadataDB import SandboxMetadataDB
gLogger.setLevel('DEBUG')
def test_SSCChain(self):
""" full test of functionalities
"""
ssc = SandboxStoreClient()
smDB = SandboxMetadataDB()
exeScriptLocation = find_all('exe-script.py', '..', '/DIRAC/tests/Integration')[0]
fileList = [exeScriptLocation]
res = ssc.uploadFilesAsSandbox(fileList)
assert res['OK'] is True
# SEPFN = res['Value'].split( '|' )[1]
res = ssc.uploadFilesAsSandboxForJob(fileList, 1, 'Input')
assert res['OK'] is True
res = ssc.downloadSandboxForJob(1, 'Input') # to run this we need the RSS on
print res # for debug...
assert res['OK'] is True
# only ones needing the DB
res = smDB.getUnusedSandboxes()
print res
assert res['OK'] is True
# smDB.getSandboxId(SEName, SEPFN, requesterName, requesterGroup)
# # cleaning
# res = smDB.deleteSandboxes(SBIdList)
# assert res['OK'] is True
|
andresailer/DIRAC
|
tests/Integration/WorkloadManagementSystem/Test_SandboxStoreClient.py
|
Python
|
gpl-3.0
| 2,056
|
[
"DIRAC"
] |
3a641bb43a31fccf7216ae0f75167a70e9f1ff1a4cc453fe54e8e7e275a86162
|
#!/usr/bin/env python
# Authors: Andrew Jewett (jewett.aij at g mail)
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2011, Regents of the University of California
# All rights reserved.
"""
ttree Ttree is a simple program for recursively composing and generating
large redundant text files from small template files.
By default, the large number of unique template variables generated
in the process are automatically substituted with integers
(or other numeric counters, all of which can be overridden),
rendered, and the rendered templates are written to a file.
ttree was designed to be useful for generating input files for
molecular simulation software like LAMMPS or NAMD.
BasicUI This section of the code contains the user interface for ttree
when run as a stand-alone program, as described above. (This
section of code contains the "if __name__ == __main__:" code block.)
-- Data Types --
StaticObj Static nodes are data structures used to store ttree class definitions.
(Static nodes are useful for defining molecule types or
namespaces in LAMMPS or other molecular simulation programs.)
The nodes themselves are stored in a tree of nested class definitions.
Static variables (such as "@atom:C") are also associated with
StaticObjs.
InstanceObj Instance nodes are created when a user creates one (or many)
copies of a class, using the "new" command.
These classes in turn may instantiate other classes.
(Example: A user may manually instantiate several copies of a
molecule, such as a protein, however each of those
molecules may contain molecular subunits, such as
amino acids, which are automatically instantiated.)
Instance variables (such as "$atom:CA") are also associated with
InstanceObjs.
"""
import sys
from collections import defaultdict
import operator
import random
#import gc
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
# -- ttree_lex.py --
# TtreeShlex is a backwards-compatible version of python's standard shlex module.
# It has the additional member: "self.wordterminators", which overrides
# the "self.wordchars" member. This enables better handling of unicode
# characters by allowing a much larger variety of characters to appear
# in words or tokens parsed by TtreeShlex. Otherwise it is identical to shlex.
from ttree_lex import *
if sys.version < '2.6':
raise InputError('Error: Using python '+sys.version+'\n'
' Alas, you must upgrade to a newer version of python (2.7 or later).')
elif sys.version < '2.7':
sys.stderr.write('--------------------------------------------------------\n'
'----------------- WARNING: OLD PYTHON VERSION ----------\n'
' This program is untested on your python version ('+sys.version+').\n'
' PLEASE LET ME KNOW IF THIS PROGRAM CRASHES (and upgrade python).\n'
' -Andrew 2014-11-28\n'
'--------------------------------------------------------\n'
'--------------------------------------------------------\n')
from ordereddict import OrderedDict
else:
from collections import OrderedDict
if sys.version > '3':
import io
else:
import cStringIO
# We keep track of the program name and version.
# (This is only used for generating error messages.)
#g_filename = 'ttree.py'
g_filename = __file__.split('/')[-1]
g_module_name = g_filename
if g_filename.rfind('.py') != -1:
g_module_name = g_filename[:g_filename.rfind('.py')]
g_date_str = '2014-12-19'
g_version_str = '0.79'
class ClassReference(object):
""" Every class defined by the user (stored in an StaticObj data structure)
may contain references to other classes (ie. other StaticObjs).
(Note: All of these StaticObjs are stored in the same tree, the
global static tree.)
Examples:
Whenever an instance of a class is created, this may automatically spawn
the creation of additional classes (which are instantiated because a 'new'
command appeared within the first class's definition). These are stored in
the "StaticObj.instance_commands[i].class_ref" attribute.
Similarly, each class (StaticObj) can optionally inherit some of its
traits (consisting of write() and new commands) from one or more
"class_parents" (also StaticObjs). A list of these parents is stored in the
"StaticObj.class_parents" attribute. In both cases (self.instance_commands
or self.class_parents) we need to storea pointer to the StaticObj(s)
corresponding to the instance-childen or class-parents.
(This stored in self.statobj).
However, for the purposes of debugging and interactivity, it is also
convenient to permanently keep track of the string that the user used to
specify the name/location of that class/StaticObj
(stored in self.statobj_str), in addition to the location
in the file where that string occurs (stored in self.srcloc)."""
__slots__=["statobj_str","srcloc","statobj"]
def __init__(self,
statobj_str=None,
srcloc=None,
statobj=None):
self.statobj_str = statobj_str
if srcloc is None:
self.srcloc = OSrcLoc('', -1)
else:
self.srcloc = srcloc
self.statobj = statobj
#def __repr__(self):
# return repr((self.statobj_str, self.srcloc))
# "Command"s are tasks to carry out.
# (...either immediately, or later during instantiation)
# Commands are used to write to files, create new instances, delete instances,
# or custom commands to modify an instance of a class.
# (For example "instance = new Class.move(1,0,0).rot(45,0,0,1)"
# The ".move(1,0,0)" and ".rot(45,0,0,1)" commands are "stackable" and
# have similar syntax to member functions in C++, JAVA, Python.)
class Command(object):
__slots__=["srcloc"]
def __init__(self, srcloc=None):
self.srcloc = srcloc
# COMMENTING OUT: "COUNT" AND "ORDER" ARE NO LONGER NEEDED
#count = 0
#def __init__(self, srcloc=None):
# self.srcloc = srcloc
# # The "order" member is a counter that keeps track of the order
# # in which the Command data types are created (issued by the user).
# Command.count += 1
# self.order = Command.count
#def __lt__(self, x):
# return self.order < x.order
class WriteFileCommand(Command):
""" WriteFileCommand
filename This is the name of the file that will be written to
when the command is executed.
tmpl_list This is the contents of what will be written to the file.
Text strings are often simple strings, however more
generally, they can be strings which include other variables
(ie templates). In general, templates are lists of alternating
TextBlocks and VarRefs, (with additional tags and data to
identify where they occur in in the original user's files).
"""
__slots__=["filename", "tmpl_list"]
def __init__(self,
filename = None,
tmpl_list = None,
srcloc = None):
self.filename = filename
if tmpl_list is None:
self.tmpl_list = []
else:
Command.__init__(self, srcloc)
self.tmpl_list = tmpl_list
def __str__(self):
if self.filename:
return 'WriteFileCommand(\"'+self.filename+'\")'
else:
return 'WriteFileCommand(NULL)'
def __copy__(self):
tmpl_list = []
CopyTmplList(self.tmpl_list, tmpl_list) #CHECK:IS_MEMORY_WASTED_HERE?
return WriteFileCommand(self.filename, tmpl_list, self.srcloc)
class InstantiateCommand(Command):
""" InstantiateCommand is a simple tuple-like datatype used to
store pairs of names (strings, stored in self.name),
and ClassReferences (see above, stored in self.class_ref).n
The "suffix" argument is an optional string which may contain
additional instructions how to instantiate the object.
"""
__slots__=["name",
"class_ref",
"instobj"]
def __init__(self,
name = None,
class_ref = None,
srcloc = None,
instobj = None):
Command.__init__(self, srcloc)
self.name = name
#if class_ref is None:
# self.class_ref = ClassReference()
#else:
self.class_ref = class_ref
self.instobj = instobj
def __str__(self):
return 'InstantiateCommand('+self.name+')'
def __copy__(self):
return InstantiateCommand(self.name,
self.class_ref,
self.srcloc,
self.instobj)
class DeleteCommand(Command):
__slots__=[]
def __init__(self,
srcloc = None):
Command.__init__(self, srcloc)
def __str__(self):
return 'DeleteCommand()'
def __copy__(self):
return DeleteCommand(self.srcloc)
class StackableCommand(Command):
""" StackableCommand is a class for storing commands
that effect the environment of the object being created.
The combined effect of these commands can be thought of as a "stack"
Commands can be pushed on the stack, or popped off
The actual commands themselves are represented by the "contents" member
which is usually a text string.
ttree.py does not attempt to understand the content of these commands.
That job is left up to the __main___ module. (IE. whatever script that
happens to be importing ttree.py. If there is no script, and
ttree.py IS the main module, then it simply ignores these commands.)
"""
__slots__=["context_node"]
def __init__(self,
srcloc,
context_node=None):
Command.__init__(self, srcloc)
self.context_node = context_node # if multiple stacks are present, then use "context_node"
# as a key to identify which stack you want
# the command to modify
class PushCommand(StackableCommand):
__slots__=["contents"]
def __init__(self,
contents,
srcloc,
context_node=None):
StackableCommand.__init__(self, srcloc, context_node)
self.contents = contents
def __copy__(self):
return PushCommand(self.contents, self.srcloc, self.context_node)
def __str__(self):
return 'PushCommand('+str(self.contents)+')'
class PushRightCommand(PushCommand):
__slots__=[]
def __init__(self,
contents,
srcloc,
context_node=None):
PushCommand.__init__(self, contents, srcloc, context_node)
def __copy__(self):
return PushRightCommand(self.contents, self.srcloc, self.context_node)
def __str__(self):
return 'PushRightCommand('+str(self.contents)+')'
class PushLeftCommand(PushCommand):
__slots__=[]
def __init__(self,
contents,
srcloc,
context_node=None):
PushCommand.__init__(self, contents, srcloc, context_node)
def __copy__(self):
return PushLeftCommand(self.contents, self.srcloc, self.context_node)
def __str__(self):
return 'PushLeftCommand('+str(self.contents)+')'
class PopCommand(StackableCommand):
__slots__=["partner"]
def __init__(self,
partner,
srcloc,
context_node=None):
StackableCommand.__init__(self, srcloc, context_node)
self.partner = partner
def __copy__(self):
return PopCommand(self.partner, self.srcloc, self.context_node)
def __str__(self):
return 'PopCommand('+str(self.partner.contents)+')'
class PopRightCommand(PopCommand):
__slots__=[]
def __init__(self,
partner,
srcloc,
context_node=None):
PopCommand.__init__(self, partner, srcloc, context_node)
assert((partner is None) or isinstance(partner, PushRightCommand))
def __copy__(self):
return PopRightCommand(self.partner, self.srcloc, self.context_node)
def __str__(self):
return 'PopRightCommand('+str(self.partner.contents)+')'
class PopLeftCommand(PopCommand):
__slots__=[]
def __init__(self,
partner,
srcloc,
context_node=None):
PopCommand.__init__(self, partner, srcloc, context_node)
assert((partner is None) or isinstance(partner, PushLeftCommand))
def __copy__(self):
return PopLeftCommand(self.partner, self.srcloc, self.context_node)
def __str__(self):
return 'PopLeftCommand('+str(self.partner.contents)+')'
# The ScopeCommand, ScopeBegin, and ScopeEnd commands are useful to designate
# which commands belong to a particular class definition (or class instance).
# (This is useful later on, when a linear list of commands has been created.)
# They are simply markers an do not do anything. These classes can be ignored.
class ScopeCommand(Command):
__slots__=["node"]
def __init__(self,
node,
srcloc):
Command.__init__(self, srcloc)
self.node = node
#self.srcloc = srcloc
def __copy__(self):
return ScopeCommand(self.node, self.srcloc)
def __str__(self):
if self.node:
return 'ScopeCommand('+self.node.name+')'
else:
return 'ScopeCommand(None)'
class ScopeBegin(ScopeCommand):
__slots__=[]
def __init__(self, node, srcloc):
ScopeCommand.__init__(self, node, srcloc)
def __copy__(self):
return ScopeBegin(self.node, self.srcloc)
def __str__(self):
if self.node:
return 'ScopeBegin('+NodeToStr(self.node)+')'
else:
return 'ScopeBegin(None)'
class ScopeEnd(ScopeCommand):
__slots__=[]
def __init__(self, node, srcloc):
ScopeCommand.__init__(self, node, srcloc)
def __copy__(self):
return ScopeEnd(self.node, self.srcloc)
def __str__(self):
if self.node:
return 'ScopeEnd('+NodeToStr(self.node)+')'
else:
return 'ScopeEnd(None)'
# COMMENTING OUT: NOT NEEDED AT THE MOMENT
#class VarAssignCommand(Command):
# """ VarAssignCommand
#
# This class is used whenever the user makes an explicit request to assign
# a variable to a value (values are text strings).
#
# var_ref The variable name (tecnically speaking, I call this
# a variable descriptor string and it includes at least one of
# the following: the name of a leaf node, a category node name,
# and category name)
# the location in the file where variable appears, and (eventually
# after subsequent lookup), references to the leaf_node, cat_node,
# "Category", and "VarBinding" data structures associated with it.
# text_tmpl Text strings are often simple strings, however more
# generally, they can be strings which include other variables
# (ie templates). In general, templates are lists of alternating
# TextBlocks and VarRefs, (with additional tags and data to
# identify where they occur in in the original user's files).
#
# """
# __slots__=["var_ref","text_tmpl"]
#
# def __init__(self,
# #command_name = '=', <-- ?!?
# var_ref = None,
# text_tmpl=None):
# Command.__init__(self, srcloc)
# self.var_ref = var_ref
# self.text_tmpl = text_tmpl
class ModCommand(object):
__slots__=["command","multi_descr_str"]
def __init__(self,
command,
multi_descr_str):
self.command = command
self.multi_descr_str = multi_descr_str
def __str__(self):
return 'ModCommand('+str(self.command)+')'
def __copy__(self):
return ModCommand(self.command.__copy__(), self.multi_descr_str)
def CopyTmplList(source_tmpl_list, dest_cpy):
for entry in source_tmpl_list:
if isinstance(entry, TextBlock):
dest_cpy.append(entry) # Then make a shallow copy
# (pointer assignment) to the text
# block (Text blocks do not change
# during instantiation.)
elif isinstance(entry, VarRef):
assert(len(entry.prefix)>0)
if entry.prefix[0] == '@': # '@' vars refer to static data
dest_cpy.append(entry) # Then make a shallow copy
# pointer assignment) to the static
# variable. (Static variables do
# not change during instantiation.)
elif entry.prefix[0] == '$': # new '$' vars are created
# during every instantiation.
# var_refs do change when you instantiate them. So
# create a new VarRef object, and copy the attributes.
var_ref = VarRef(entry.prefix,
entry.descr_str,
entry.suffix,
entry.srcloc)
# Note: for instance variables ('$' vars)
# "entry.nptr" should not contain
# any data yet, so we just ignore it.
# I assert this below:
assert((entry.nptr.cat_node is None) and
(entry.nptr.leaf_node is None))
dest_cpy.append(var_ref)
else:
assert(False) # prefix[0] should be either '@' or '$'
else:
assert(False) # type(entry) should be either TextBlock or VarRef
def RecursiveJoin(tokens_expr, delimiter = ''):
""" RecursiveJoin() converts a tree-like list/tuple of tokens, for example:
['a ', ('tree', '-', ['like', 'container']), [[' '], 'of'], ' strings']
to an ordinary string, eg:
'a tree-like container of strings'
This behavees similarly to "reduce(lambda a, b: a+b, tokens)",
except that it works with arbitrarily nested lists/tuples."""
text = ''
if isinstance(tokens_expr, basestring):
return tokens_expr
else:
text_lstr = []
for i in range(0, len(tokens_expr)):
text.append( TokensToStr(tokens_expr[i]) )
return ''.join(text_lstr, delimiter)
#----------------------------------------------------------
#----------------------------------------------------------
# The following code is specific to ttree.
#
# (Up until this point, we have only defined
# a few simple general text parsing routines.)
#----------------------------------------------------------
#----------------------------------------------------------
def PtknsToStr(path_tokens):
"""
There are three ways to store paths:
As a single string: '/Protein/Phe/Ca' <- the format entered by the user
As a list of tokens ['Protein', 'Phe', 'Ca'] <- split into tokens
As a list of nodes in a tree (pointers to nodes in a tree hierarchy)
This function converts between the first two formats.
"""
text = ''
if len(path_tokens) > 0:
text = path_tokens[0]
for i in range(1, len(path_tokens)):
text += '/' + path_tokens[i]
else:
text = ''
return text
def StrToPtkns(path_string):
""" The inverse of PtknsToStr(), this function splits a string like
'/usr/local/../bin/awk' into ['usr','local','..','bin','awk'].
For illustrative purposes only. Use text.split('/') directly instead."""
return orig_text.split('/')
def FindChild(name, node, dbg_loc):
""" FindChild looks over the list of node.children to find a child
which matches the name given in the first argument.
If it is not found, it returns None.
Note: I have not yet specified what kind of nodes FindChild() operates
on. Both StaticObjs and InstanceObjs have self.children and self.parent.
However only StaticObjs have "self.class_parents".
("class_parents" are "parents" in the object-oriented sense.)
If "node" (2nd argument) happens t be an StaticObj, this means it also
We must search over the children of these class_parents as well.
Terminology used here differs from Object Oriented Programming
Children in node.children are not children in the object-oriented
programming sense. However, in OOP, "children" are objects that share all
of the traits of their ancestors (and may have additionl traits as well).
I have implemented OOP style children and parents, but this informtion
is stored in "node.class_parents", instead of "node.parents".
For comparison, instantiated nodes (InstanceObjs) are different. Altough
instantiated classes (InstanceObjs) have access to the attributes of the
class_parents of the StaticObjs that define them, they do not remember the
ownership of that data. (It just gets merged with their own member data,
including their .children.)
Hence we must treat StaticObjs carefully because their are two ways we can
access child data. We should loop over both of them. We do that below:
"""
child = node.children.get(name)
if child:
return child
if isinstance(node, StaticObj):
# The object-oriented inheritance stuff appears here.
# If you don't care about OOP or inheritance,
# then comment out the loop that follows:
# Search recursively over the "children" (ie attributes or members)
# belonging to any OOP ancestors of this node.
for class_parent in node.class_parents:
child = FindChild(name, class_parent, dbg_loc)
if child != None:
return child
for namespace_node in node.namespaces:
child = FindChild(name, namespace_node, dbg_loc)
if child != None:
return child
else:
assert(isinstance(node, InstanceObjBasic))
# Otherwise, a child name match was not found
return None
def FollowPath(path_tokens, starting_node, dbg_loc):
""" FollowPath() returns the "last_node", a node whose position in the
tree is indicated by a list of path_tokens, describing the names
of nodes connecting "starting_node" to "last_node".
If it one of the strings in the list of path_tokens turns out
not to match then names of classes in the tree, then this function
returns the last_node that did match before the error occurred,
as well as an integer which stores the number of tokens in
the path_tokens list which were successfully processed.
In other words, the list of node naes is not a full path, but the
relative path that takes you from one node (not necessarily the root)
to another. Return Value:
Ideally, each node in the list should be a parent or a child of the
previous node. (See comment for PathTokensToStr(), for more details.)
This function returns the number of path_tokens successfully
parsed. Under normal termination, this is len(path_tokens).
If the path can not be followed (because at some point, a child
or parent does not exist), then this function returns a number
smaller than len(path_tokens).
We let the caller handle undefined paths. """
#print(' FollowPath() invoked on: ', path_tokens)
if len(path_tokens) == 0:
return 0, starting_node
node = starting_node
# Is this path a relative path, or a full path?
# If the path-string began with '/', then it's a full path. This means
# that after processing by split('/'), the first token will be ''
# Example: path_tokens='/Prot/Alanine'.split('/')
# --> path_tokens[0] == ''
if path_tokens[0] == '':
# In that case, then take us to the root node:
while node.parent != None:
node = node.parent
#sys.stdout.write('FollowPath(): Retreating to node \"'+node.name+'\"\n')
i0 = 1 # <- We've just processed the first token. Skip over it later.
else:
i0 = 0
i = i0
while i < len(path_tokens):
if path_tokens[i] == '..':
if node.parent is None:
return i, node # <-return the index into the token list
# Caller will know that something went awry
# if the return value is not equal to the
# length of the token list
else:
node = node.parent
i += 1
elif path_tokens[i] == '...':
node_before_ellipsis = node
if i == len(path_tokens)-1:
return i, node_before_ellipsis
search_target = path_tokens[i+1]
# Now search over the "children" of this node
# for one who's name matches path_tokens[i].
# If not found, then move up to the parent node's children.
# (This is not an exhaustive tree search. Only the nodes which
# are immediate children of this node's parents are searched.)
while node != None:
child = FindChild(search_target, node, dbg_loc)
if child is None:
node = node.parent
else:
node = child
break
if node is None:
# Caller will know that something went awry if the return
# value is not equal to the length of the token list.
return i, node_before_ellipsis
i += 2
elif path_tokens[i] in ('','.'): # <-Note we ignore empty tokens from now on.
# (Same convention is used in specifying a
# directory in a filesystem, eg. using /usr/local
# or /usr//local or /usr/./local. These are all equivalent.)
i += 1
else:
# Now search over the "children" of this
# node for one who's name matches path_tokens[i].
child = FindChild(path_tokens[i], node, dbg_loc)
if child is None:
# In that case, return with the node_list incomplete.
# Let the caller check to see if something went wrong.
return i, node # <-return the index into the token list (i)
# Caller will know that something went awry
# if the return value is not equal to the
# length of the token list
else:
node = child
i += 1
if node.IsDeleted():
#sys.stderr.write('(debug_msg: encountered deleted node: \"'+node.name+'\")\n')
break
return len(path_tokens), node
def PtknsToNode(path_tokens, starting_node, dbg_loc):
""" PtknsToNode() is identical to def FollowPath() except
that it raises syntax-error exceptions if the path is undefined."""
i_last_ptkn, last_node = FollowPath(path_tokens, starting_node, dbg_loc)
if i_last_ptkn < len(path_tokens):
#assert(isinstance(last_node,StaticObj)) <--why did I assert this? seems wrong
if (last_node.parent is None) and (path_tokens[i_last_ptkn] == '..'):
#In that case, we tried to back out beyond the root of the tree.
raise InputError('Error('+g_module_name+'.PtknsToNode()):\n'
' Invalid variable/class name:\n'
' \"'+PtknsToStr(path_tokens)+'\" located near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n'
' There are too many \"..\" tokens in the path string.')
elif path_tokens[i_last_ptkn] == '...':
if i_last_ptkn+1 == len(path_tokens):
raise InputError('Error('+g_module_name+'.PtknsToNode()):\n'
' Error in '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n'
' Expected name following \"...\"\n')
else:
search_target = path_tokens[i_last_ptkn+1]
#In that case, we were unable to find the node referenced by "..."
raise InputError('Error('+g_module_name+'.PtknsToNode()):\n'
' Class or variable \"'+search_target+'\" not found\n'
' in this context: \"'+PtknsToStr(path_tokens)+'\"\n'
' located near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno))
else:
#Then the reason is: The string in path_tokens[i_last_ptkn]
#was supposed to be a child of last_node but a child
#of that name was not found.
err_msg = 'Error('+g_module_name+'.PtknsToNode()):\n'+\
' Undefined variable/class name:\n'+\
' \"'+PtknsToStr(path_tokens)+'\",\n'+\
' This occured near or before '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n'+\
' (Specifically \"'+path_tokens[i_last_ptkn]+\
'\" is not a subordinate of \"'+MaxLenStr(last_node.name,'/')+'\".)\n'+\
' This may be due to a typo located here or earlier.\n'+\
' It may also occur if you deleted the object earlier. (Referring to a\n'+\
' deleted object is only forgiven when using [0-9] or [0:10] notation.)\n'+\
' If this object refers to an array you must use brackets []\n'+\
' to explicitly specify the element(s) you want from that array.\n'+\
' (To select multiple elements, you can use [*] or [0-9] or [0:10].)\n'
if (path_tokens[i_last_ptkn] in NodeToPtkns(last_node)):
err_msg += '\nIn this case:\n'+\
' It seems like you may have omitted a } character somewhere before:\n'+\
' '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)
raise InputError(err_msg)
assert(False) # One of the two conditions above should be true.
return last_node
def StrToNode(obj_name, starting_node, dbg_loc):
path_tokens = obj_name.split('/')
return PtknsToNode(path_tokens, starting_node, dbg_loc)
def NodeListToPtkns(node_list, dbg_loc=None):
assert(len(node_list) > 0) #The path must contain at least the starting node
path_tokens = [node_list[0].name]
for i in range(1, len(node_list)):
if node_list[i] == node_list[i-1].parent:
path_tokens.append('..')
else:
path_tokens.append(node_list[i].name)
# Now check to make sure the user supplied consistent information:
if (node_list[i] not in node_list[i-1].children.values()):
raise InputError('Error('+g_module_name+'.NodeListToPtkns()):\n'
' Undefined variable/class name:\n'
' \"'+PtknsToStr(path_tokens)+'\" located near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n'
' (\"'+path_tokens[i]+'\" is not subordinate to \"'+MaxLenStr(node_list[i-1].name,'/')+'\")\n'
' This could be an internal error.')
return path_tokens
def NodeListToStr(node_list, dbg_loc=None):
assert(len(node_list) > 0) #The path must contain at least the starting node
path_str = node_list[0].name
for i in range(1, len(node_list)):
if node_list[i] == node_list[i-1].parent:
path_str += '/..'
else:
path_str += '/' + node_list[i].name
# Now check to make sure the user supplied consistent information:
if (node_list[i] not in node_list[i-1].children.values()):
err_msg = 'Error('+g_module_name+'.NodeListToStr()):\n'+\
' Invalid variable/class name:\n' +\
' \"'+PtknsToStr(path_tokens)+'\"'
if dbg_loc != None:
err_msg += ' located near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)
err_msg += '\n' +\
' (\"'+node_list[i].name+'\" is not a subordinate of \"'+MaxLenStr(node_list[i-1].name,'/')+'\")\n'+\
' This could be an internal error.'
raise InputError(err_msg)
return path_str
def NodeToPtkns(node):
ptkns = []
nd = node
while nd != None:
ptkns.append(nd.name)
nd = nd.parent
ptkns.reverse()
return ptkns
def NodeToStr(node):
ptkns = NodeToPtkns(node)
assert(len(ptkns) > 0)
if node.parent is None:
assert(node.name == '')
return '/'
path_str = ptkns[0]
i = 1
while i < len(ptkns):
path_str += '/'+ptkns[i]
i += 1
return path_str
def CatLeafNodesToTkns(cat_name, cat_node, leaf_node, dbg_loc):
assert((cat_node != None) and (leaf_node != None))
assert((cat_name != None) and (cat_name != ''))
# Determine the path of the cat node
cat_node_ptkns = NodeToPtkns(cat_node)
cat_node_ptkns.append(cat_name+':')
# Determine the path of the leaf node (which should inherit from cat)
deleted = False
leaf_node_ptkns = []
if cat_node != leaf_node:
node = leaf_node
while node.parent != None:
if node.IsDeleted():
deleted = True
leaf_node_ptkns.append('DELETED_'+node.name)
break
leaf_node_ptkns.append(node.name)
if node.parent == cat_node:
break
node = node.parent
leaf_node_ptkns.reverse()
if not deleted:
# Check that leaf inherits from cat. If not, print error.
if ((node.parent != cat_node) and (node != cat_node)):
err_msg = 'Error('+g_module_name+'.CatLeafNodesToPtkns()):\n'+\
' Invalid variable (category:leaf) pair\n'
if dbg_loc != None:
cat_node_str = NodeToStr(cat_node)
leaf_node_str = NodeToStr(leaf_node)
err_msg += ' located near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n'+\
' (\"'+leaf_node.name+'\" is not in the scope of \"'+cat_node_str+'/'+cat_name+':\")\n'+\
' This will happen if you used the \"category\" command to manually\n'+\
' create a category/counter which is not defined globally.\n'+\
'\n'+\
' Note: Using the analogy of a unix style file system, \n'+\
' the problem is that \"'+leaf_node_str+'\"\n'+\
' is not a subdirectory of \"'+cat_node_str+'\".\n'+\
'\n'+\
' Note: This often occurs when \".../\" is used. In that case, you may\n'+\
' be able to avoid this error by referring to your variable explicitly\n'+\
' by using chains of \"../\" tokens in the path instead of \".../\".\n'
#' Make sure that your variable you are using is defined in \n'+\
#' an environment (currently \"'+leaf_node_str+'\")\n'+\
#' which lies WITHIN the environment where the category was defined.\n'+\
#' (currently \"'+cat_node_str+'\").\n'
raise InputError(err_msg)
else:
err_msg = 'Warning: Strange variable path'
if dbg_loc != None:
err_msg += ' near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)
err_msg += '\n' +\
' The category and leaf nodes for variable \"'+cat_name+':'+leaf_node.name+'\" are the same.\n'+\
' Check to see that this variable is behaving the way you intended.\n'+\
' (It\'s possible this could be an internal error in the program.)\n'
sys.stderr.write(err_msg)
# Merge the list of strings together into a single string:
return cat_node_ptkns + leaf_node_ptkns
def CanonicalCatName(cat_name, cat_node, dbg_loc=None):
# Determine the path of the cat node
tkns = NodeToPtkns(cat_node)
tkns.append(cat_name)
#full_cat_name = tkns[0]
#for i in range(1,len(tkns)):
# full_cat_name += '/'+tkns[i]
# better way:
return '/'.join(tkns)
def CanonicalDescrStr(cat_name, cat_node, leaf_node, dbg_loc=None):
tkns = CatLeafNodesToTkns(cat_name, cat_node, leaf_node, dbg_loc)
descr_str = tkns[0]
for i in range(1, len(tkns)):
if (len(descr_str)>0) and (descr_str[-1] == ':'):
descr_str += tkns[i]
else:
descr_str += '/'+tkns[i]
return descr_str
def CollapsePath(path_tokens):
"""
CollapsePath() takes a list of Strings argument representing a
directory-like path string
(for example '/SUB1A/Sub2A/../Sub2B/sub3b/../sub3c/entry'),
and replaces it with a version which should contain no '..' patterns.
(In the example above, it returns /SUB1A/Sub2B/sub3c/entry')
"""
new_ptkns = []
ndelete = 0
i = len(path_tokens)-1
while i >= 0:
if path_tokens[i] == '..':
ndelete += 1
else:
if (ndelete > 0) and (path_tokens[i] != ''):
# Note: "path_tokens[i] != '')" means "/a/b//c" <-> "/a/b/c"
ndelete -= 1
else:
if len(path_tokens[i]) > 0:
new_ptkns.append(path_tokens[i])
i -= 1
new_ptkns.reverse()
if ndelete > 0:
return ndelete # <-- useful to let caller know an error ocurred
return new_ptkns
def FindCatNode(category_name, current_node, srcloc):
""" Search upwards (toward the ancester nodes), looking for a node
containing a category matching category_name (first argument).
Useful when the user specifies a category name, but neglects to
specify which node it was defined in.
Note: there is no gaurantee that the category node returned by this function
contains an entry in it's "categories" list corresponding to this
category name. You must check for this condition and handle it."""
cat_node = None
node = current_node
while True:
if category_name in node.categories:
cat_node = node
break
elif node.parent != None:
node = node.parent
else:
# node.parent is None, ... we're done
break
if cat_node is None:
assert(node.parent is None)
#sys.stderr.write('Warning near ' +
# ErrorLeader(srcloc.infile,
# srcloc.lineno)+'\n'+
# ' no category named \"'+category_name+'\" found.\n'+
# ' Creating a new global category: /'+
# category_name+':\n')
cat_node = node # the global node
assert(cat_node != None)
return cat_node
def RemoveNullTokens(in_ptkns):
"""This function just gets rid of useless empty tokens in the path ('', '.')
(However if '' appears at the beginning of a path, we leave it alone.)
"""
out_ptkns = []
for i in range(0,len(in_ptkns)):
if ((in_ptkns[i] != '.') and
((in_ptkns[i] != '') or (i==0))):
out_ptkns.append(in_ptkns[i])
# (I'm sure there are ways to write this in python
# using fewer lines of code. Sigh.)
return out_ptkns
def DescrToCatLeafPtkns(descr_str, dbg_loc):
"""
Review: Variables in this program have three parts:
1) A variable category name (designating the type of variable).
2) A variable category path, which consists of a node which is an ancestor
of the variable leaf (1) in the tree
3) A variable name ("leaf"), which refers to a node in the tree
(either a static type tree or instance tree)
DescrToCatLeafPtkns() takes a string describing a variable,
as it appears in a template (ie, a write() command, once it has been
stripped of it's '$' or '@' prefix, and surrounding {} brackets)
...and divides it into strings which specify the location of that leaf in
a static or instance tree, in addition to the name and location of the
category node. Descriptor examples for atoms in water:
"AtomType:/Water/O", There are only 2 --types-- of atoms in
"AtomType:/Water/H", a water molecule. We identify them this way.
"AtomID:O" However each water molecule has 3 atoms, and we
"AtomID:H1" can give each atom in each water molecule a unique
"AtomID:H2" AtomID number. "AtomID:H2" is the id number of the
second hydrogen atom in the current water molecule.
---- Output: This function returns a 3-tuple: ----
leaf_ptkns The name of the variable's leaf node, as well as the list of
tokens denoting the path (named list of nodes) which lead to it.
cat_name The name of the variable category (no path information)
cat_ptkns A --suggestion-- for where to find the node containing the
category mentioned in "cat_name". Same format as leaf_ptkns.
Examples:
"AtomType:/Water/O" cat_name='AtomType', cat_path=[], leaf_ptkns=['','Water','O']
"AtomType:/Water/H" cat_name='AtomType', cat_path=[], leaf_ptkns=['','Water','H']
"AtomID:O" cat_name='AtomID', cat_path=[], leaf_ptkns=['O']
"AtomID:H1" cat_name='AtomID', cat_path=[], leaf_ptkns=['H1']
"AtomID:H2" cat_name='AtomID', cat_path=[], leaf_ptkns=['H2']
"mol:/" cat_name='mol', cat_path=[], leaf_ptkns=['']
"mol:" cat_name='mol', cat_path=[], leaf_ptkns=[]
"mol:../" cat_name='mol', cat_path=[], leaf_ptkns=['..']
"../mol" cat_name='mol', cat_path=[], leaf_ptkns=['..']
"$/peptide[3]/ResID:res[25]" cat_name='ResID', cat_path=['', 'peptide[3]'], leaf_ptkns=['res[25]']
"""
split_colon = descr_str.split(':')
if len(split_colon) > 2:
raise InputError('Error('+g_module_name+'.DescrToCatLeafPtkns())\n'
' Error near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n\n'
' Bad variable descriptor: \"'+descr_str+'\"\n'+
' There can be at most one \':\' character in a variable descriptor.\n')
# ---- Are we using colon syntax (example '$atom:H1')?
elif len(split_colon) == 2:
# The category name = text after the last '/' (if present)and before ':'
cat_ptkns = split_colon[0].split('/')
cat_name = cat_ptkns[-1]
# The text before that is the suggested (category) path
cat_ptkns = cat_ptkns[:-1]
# if len(cat_ptkns) == 0:
# cat_ptkns.append('.')
# The remaining text is the path leading to the leaf node.
if split_colon[1] != '':
leaf_ptkns = split_colon[1].split('/')
else:
leaf_ptkns = []
if (cat_name == ''):
raise InputError('Error('+g_module_name+'.DescrToCatLeafPtkns()):\n'
' Error near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n\n'
' Bad variable descriptor: \"'+descr_str+'\"\n')
else:
# ---- Are we using colon-less syntax (example: "$../mol") ?
ptkns = split_colon[0].split('/')
cat_name = ptkns[-1] # last token (eg. "mol") is the cat_name
leaf_ptkns = ptkns[:-1] # the rest is the leaf's path ("..")
if len(leaf_ptkns) == 0:
leaf_ptkns.append('.')
#cat_ptkns = ptkns[:-1] # the same goes for the cat path suggestion
#if len(cat_ptkns) == 0:
# cat_ptkns.append('.')
cat_ptkns = []
# On 2012-8-22, I commented out this line:
#return cat_name, RemoveNullTokens(cat_ptkns), RemoveNullTokens(leaf_ptkns)
# and replaced it with:
return cat_name, RemoveNullTokens(cat_ptkns), leaf_ptkns
def DescrToCatLeafNodes(descr_str,
context_node,
dbg_loc,
create_missing_nodes=False):
"""
Variables in ttree correspond to nodes in a tree
(and also categories to which they belong).
DescrToCatLeafNodes() reads the name of a variable,
(its descriptor) and determines where in the tree
does this variable reside, and what is it's category?
This function is the heart of ttree because it is
the function used to interpret ttree variable syntax.
(It is very messy right now. I will clean up the code later. AJ 2011-9-06)
Arguments:
descr_str The complete name that the user gave
to the variable. (Excluding '$' or '@')
context_node The class (node) in which the variable
was used. descr_str is interpeted
relative to this context. (This argument
is similar to the current directory
in which a command was issued in unix.)
dbg_loc The location in the user's input file(s)
where this variable is referred to.
create_missing_nodes
If we lookup a variable whose leaf node
does not exist yet, should we create it?
Setting this argument to "True" allows
us to augment the tree to add nodes
corresponding to variables.
-- Here is a greatly simplified version of DescrToCatLeafNodes(): --
def DescrToCatLeafNodes(descr_str, context_node, dbg_loc):
cat_name, cat_ptkns, leaf_ptkns = DescrToCatLeafPtkns(descr_str, dbg_loc)
cat_node = PtknsToNode(cat_ptkns, context_node, dbg_loc)
if len(cat_ptkns) > 0:
leaf_node = PtknsToNode(leaf_ptkns, cat_node, dbg_loc)
else:
leaf_node = PtknsToNode(leaf_ptkns, context_node, dbg_loc)
return cat_name, cat_node, leaf_node
(This version works, but it does not handle "..." corectly,
and it does not create missing nodes when needed.)
-- Here is a (probably unnecessary) review of terminology: --
Descriptor String:
The first argument ("descr_str") is a descriptor string.
A descriptor string typically contains ":" and "/"
characters to to divide the string into pieces in order
to identify a category name, category node, and leaf node.
Conceptually, the variable's NAME is the leaf node.
The variable's TYPE is the category (node and name).
Node:
Nodes are used to represent both class objects and variable names
1) class objects
Each type of class objects is represented by an StaticObj.
Each instantiated object is represented by an InstanceObj.
2) variable names (leaf nodes)
However variable names are also represented using either
StaticObjs (for @ static variables) or
InstanceObjs (for $ instance variables)
Again, all variables in ttree are members of a class object.
In this case, the name of the node corresponds to the variable's
name, and it's position in the tree refers to the class to which
it belongs.
However "leaf nodes" do not uniquely identify the
actual variable itself. A single node can refer to two different
variables if they are in different categories.
All 3 identifiers (leaf node, category node, category name)
are needed to uniquely identify a ttree variable. See below.
Ptkn (Path Token)
Strings containing multiple '/' characters are typically used
to identify the location of the category and leaf nodes in the
tree (ie the path to the node). The '/' characters are
delimiters which break up the string into small pieces, (which
are usually the names of classes).
These pieces are called "path tokens" or "ptkns"
Leaf Node:
It exists as a node in a tree (instead of a simple string)
because, just like member variables in a class in an
object oriented programming language (or in a C struct)
language, variables in ttree belong to the class in
which they are defined. The node's location in the
tree represents which class it belongs to.
If a variable's leaf node name
refers to a node which does no exist yet, then we create it
(assuming the "create_missing_nodes" argument is "True").
Category Node/Name:
Categories are a peculiar feature of ttree. Categories
are groups of variables that share the same counter when
numeric values are automatically given to each variable.
So you can think of a category as a counter with a name.
Variables in different categories have different counters,
and are assigned numeric values independently.
Consequently two variables in different categories
may be assigned the same number. But two variables
in the same category are always given unique values.
Counters are typically global, but can have local scope.
(ie, only defined within a Class, or an instantiated
class, and whatever other classes are nested or
instantiated beneath it.)
Therefore to identify a counter/category you must specify
both a name AND a node. The node identifies the class where
the scope is defined. It is assumed that the Leaf Node
(see above) lies within this scope (ie. somewhere after
it in the tree).
Example: local counters are used to keep track of the
residues within in a protein chain. If we use a class to
represent the protein, we can create a local residue-
counter (category) within that protein. Then when we
instantiate the protein multiple times, this counter
is reset for every new instance of of the protein.
"""
cat_name, cat_ptkns, leaf_ptkns = DescrToCatLeafPtkns(descr_str, dbg_loc)
# ---- ellipsis hack ----
#
# Search for class:
# Most users expect ttree.py to behave like a
# standard programming language: If the class they are
# instantiating was not defined in this specific
# location, they expect ttree.py to search for
# it outwards, first in the parent's environment,
# and then in the parent's parent's environment,
# and so on, until the object is found.
# For example, most users expect this to work:
# class Res{
# write("Atoms") {
# $atom:CA @atom:CA 0.123 1.234 2.345
# $atom:CB @atom:CB 1.234 2.345 3.456
# }
# }
# class Protein{
# write_once("AnglesByType") {
# @angle:backbone @atom:Res/CA @atom:Res/CA @atom:Res/CA
# }
# Notice that in class Protein, we did not have to specify
# where "Res" was defined because it is defined in the parent
# environment (ie. immediately outside Proteins's environment).
# The general way to do this in ttree.py, is to
# use ellipsis syntax "@atom:.../Res/CA" symbol. The
# ellipsis ".../" tells ttree.py to search upwards
# for the object to the right of it ("Res")
# In order to make ttree.py behave the way
# most users are expecting, we artificially insert a
# ".../" before the class name here. (Later on, the
# code that processes the ".../" symbol will take
# care of finding A. We don't have to worry about
# about doing that now.)
#
# I think we only want to do this for variables with path information
# such as "@atom:Res/CA" (which means that leaf_ptkns = ['Res', 'CA']).
# For simple variables like "@atom:CA", we don't automatically look upwards
# unless the user eplicitly requests it.
# (That's why we check to make sure that len(leaf_ptkns) > 1 below
# before we insert '...' into the leaf_ptkns.)
# In other words, the two variables "@atom:CA" below are treated differently
#
# A {
# write("Atoms") {
# @atom:CA
# }
# class B {
# write("Atoms") {
# @atom:CA
# }
# }
# }
#
if ((descr_str.find(':') != -1) and
#(not ((len(leaf_ptkns) == 1) and
# (leaf_ptkns[0] == context_node.name))) and
#(len(leaf_ptkns) > 0) and
(len(leaf_ptkns) > 1) and
(len(leaf_ptkns[0]) > 0) and
(leaf_ptkns[0][0] not in ('.','*','?'))):
leaf_ptkns.insert(0, '...')
# ---- Done with "ellipsis hack" -----
#sys.stderr.write(' DescrToCatLeafNodes(): (cat_ptkns, cat_name, lptkns) = ('+
# str(cat_ptkns)+', \"'+cat_name+'\", '+str(leaf_ptkns)+')\n')
cat_node = None
cat_start_node = context_node
leaf_start_node = context_node
if (len(cat_ptkns) > 0):
if cat_ptkns[-1] == '...':
# The "..." in this position means trace the path from the
# current node (context_node) up to cat_ptkns[:-1].
cat_start_node = PtknsToNode(cat_ptkns[:-1], context_node, dbg_loc)
# Later on, we will search upwards until we find an ancestor
# node containing a category matching cat_name. This will
# be taken care of later. (See "if cat_node is None:" below.)
else:
# In this case, the user supplied an explicit path
# for the category node. Find it now.
cat_node = PtknsToNode(cat_ptkns, context_node, dbg_loc)
# Whenever the user supplies an explicit path, then
# the cat node should be the starting location from
# which the leaf path is interpreted. This nearly
# insures that the leaf node will be an ancestor
# of the category node, which is what we want.
leaf_start_node = cat_node
if cat_node is None:
# Otherwise, the user did not indicate where the category
# node is defined, but only supplied the category name.
# (This is the most common scenario.)
# In this case, climb up the tree to the parent
# until you find an ancestor with a category whose
# name matches cat_name.
cat_node = FindCatNode(cat_name, cat_start_node, dbg_loc)
if (cat_name not in cat_node.categories):
if create_missing_nodes:
# If this is the first time we encountered a variable in this
# category (ie if it's the first time we encountered a variable
# with this category's name and node), then we must create a
# new entry in the cat_node.categories associative container
# (using cat_name as the dictionary key).
cat_node.categories[cat_name] = Category(cat_name)
else:
raise InputError('Error('+g_module_name+'.DescrToCatLeafNodes()):\n'
' Error near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n'
' Category named \"'+cat_name+'\" not found at\n'
' position '+NodeToStr(cat_node)+'\n')
# ---------- Now look up the leaf node -----------
if (len(leaf_ptkns) > 0) and (leaf_ptkns[-1] == 'query()'):
# Special case: "query()"
# Variables named "query()" are not really variables.
# (They are a way for users to query a category's counter.)
# But we treat them as such internally. Consequently we
# give them unique names to avoid clashes (just in case
# "query()" appears multiple times in the same context).
#leaf_ptkns[-1] = '__query__'+dbg_loc.infile+'_'+str(dbg_loc.lineno)
leaf_ptkns[-1] = '__query__' + str(dbg_loc.order)
# Lookup the path for the leaf:
#
# Often, the leaf that the path refers to does not
# exist yet. For example, it is common for a template to
# contain a reference to "$atom:CA". If the current context_node
# is "/protein1/res22", this means that the leaf should be
# at "/protein1/res22/CA". (However in this example, "CA"
# is not a class that has been defined yet. It is the name
# of a variable which which may not have even been mentioned
# before. Think of "CA" as a variable placeholder.
#
# So we follow the path tokens as far as we can:
i_last_ptkn, last_node = FollowPath(leaf_ptkns,
leaf_start_node,
dbg_loc)
# Did we find the node?
if i_last_ptkn == len(leaf_ptkns):
leaf_node = last_node
else:
# If we are here, then we did not find the node.
# The unrecognized token is stored in
# leaf_ptkns[i_last_ptkn]
if leaf_ptkns[i_last_ptkn] == '...':
# ----------------------------------------------
# ---- UGHH I hate dealing with '...' ----
# ----(Messy code to follow in this section)----
# ----------------------------------------------
# The "..." means different things depending on
# whether or not it is the last token in leaf_ptkns.
if i_last_ptkn+1 < len(leaf_ptkns):
# If "..." is NOT the last token in leaf_ptkns, we
# should search for an ancestor of this node who has
# a child whose name matches a the requested target
# string (located in leaf_ptkns[i_last_ptkn+1])
search_target = leaf_ptkns[i_last_ptkn+1]
# If such an ancestor exists, then FollowPath()
# should have already found it for us.
# This means it was not found.
# So if there is only one more token in the
# list of tokens, then create the needed node
if (create_missing_nodes and
(i_last_ptkn+1 == len(leaf_ptkns)-1)):
# Create a new leaf node and link it:
new_leaf_name = leaf_ptkns[-1]
parent_node = last_node
# Is this parent_node an StaticObj? (..or inherit from StaticObj?)
if isinstance(parent_node, StaticObj):
parent_node.children[new_leaf_name] = StaticObj(new_leaf_name, parent_node)
elif isinstance(parent_node, InstanceObj):
parent_node.children[new_leaf_name] = InstanceObjBasic(new_leaf_name, parent_node)
else:
assert(False) # (only 2 types of nodes are possible)
# Now assign the pointer
leaf_node = parent_node.children[new_leaf_name]
else:
#In that case, we were unable to find the node referenced by "..."
raise InputError('Error('+g_module_name+'.DescrToCatLeafNodes()):\n'
' Broken path.\n' # containing ellipsis (...)\n'
' class/variable \"'+search_target+'\" not found in this\n'
' context: \"'
#+var_ref.prefix + var_ref.descr_str + var_ref.suffix+'\"\n'
+descr_str+'\"\n'
' located near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno))
else: # if i_last_ptkn+1 < len(leaf_ptkns):
# If "..." IS the last token, then it means:
# we want to search for the CATEGORY NAME,
# This is very different.
# It means we need to:
# search backwards up the ancestor tree until
# we find an ancestor variable (of last_node)
# which has the right category, (ie until you
# find an ancestor node with a variable (VarRef)
# pointing to it with belonging to the correct
# category node and name (determined above).)
# If not found, then use the current context_node.
assert(cat_name in cat_node.categories)
var_bindings = cat_node.categories[cat_name].bindings
node = last_node
while (node != None):
# Recall that cat_node.categories[cat_name]
# is a dictionary whose keys are leaf nodes
# corresponding to the variables in this category.
if node in var_bindings:
# then we found it, and we're done
break
else:
node = node.parent
if node != None:
leaf_node = node
else:
# If not found, have it point to the
# current (context) node.
leaf_node = context_node
# -----------------------------------------------
# -- Finished dealing with '...' in leaf_ptkns --
# -----------------------------------------------
elif (create_missing_nodes and
((i_last_ptkn == len(leaf_ptkns)-1) or
HasWildCard('/'.join(leaf_ptkns)))):
#elif (create_missing_nodes and
# (i_last_ptkn == len(leaf_ptkns)-1)):
# Again, another reason the leaf-node was not found is
# that it refers to a leaf node which has not yet been
# created. If the path was valid until up to the last
# token, then we sould create a new node with this name.
# -- This is a common scenario. --
# -- This is how all new variables are created. --
# Anyway, we handle that here:
# Create a new leaf node and link it:
new_leaf_name = leaf_ptkns[-1]
new_leaf_name = '/'.join(leaf_ptkns[i_last_ptkn:])
parent_node = last_node
# Is this parent_node an StaticObj? (..or does it inherit from StaticObj?)
if isinstance(parent_node, StaticObj):
parent_node.children[new_leaf_name] = StaticObj(new_leaf_name, parent_node)
elif isinstance(parent_node, InstanceObj):
parent_node.children[new_leaf_name] = InstanceObjBasic(new_leaf_name, parent_node)
else:
assert(False) # (only 2 types of nodes are possible)
# Now assign the pointer
leaf_node = parent_node.children[new_leaf_name]
else:
# Otherwise, the user made a mistake in the path.
# Figure out which kind of mistake and print an error.
if (last_node.parent is None) and (leaf_ptkns[i_last_ptkn] == '..'):
#In that case, we tried to back out beyond the root of the tree.
raise InputError('Error('+g_module_name+'.DescrToCatLeafNodes()):\n'
' Broken path in variable:\n'
#' \"'+var_ref.prefix + var_ref.descr_str + var_ref.suffix+'\"\n'
' \"'+ descr_str + '\"\n'
' located near '+
ErrorLeader(dbg_loc.infile,
dbg_loc.lineno)+'\n'
' There are too many \"..\" tokens in the path string.')
else:
#Then the reason is: The string in leaf_ptkns[i_last_ptkn]
#was supposed to be a child of last_node but a child
#of that name was not found.
raise InputError('Error('+g_module_name+'.DescrToCatLeafNodes()):\n'
' Broken path / Undefined variable:\n'
#' \"'+var_ref.prefix + var_ref.descr_str + var_ref.suffix+'\"\n'
' \"'+ descr_str + '\"\n'
' located near '+
ErrorLeader(dbg_loc.infile,
dbg_loc.lineno)+'\n'
' Undefined: \"'+PtknsToStr(leaf_ptkns)+'\"\n'
' (Specifically \"'+leaf_ptkns[i_last_ptkn]+
'\" is not a subordinate of \"'+MaxLenStr(last_node.name,'/')+'\")')
#'\n This could be a typo or spelling error.')
return cat_name, cat_node, leaf_node
def DescrToVarBinding(descr_str, context_node, dbg_loc):
""" DescrToVarBinding() is identical to LookupVar(), but it has a name
that is harder to remember. See comment for LookupVar() below.
"""
cat_name, cat_node, leaf_node = DescrToCatLeafNodes(descr_str,
context_node,
dbg_loc)
if cat_name in cat_node.categories:
category = cat_node.categories[cat_name]
var_bindings = category.bindings
if leaf_node in var_bindings:
var_binding = var_bindings[leaf_node]
else:
raise InputError('Error('+g_module_name+'.DescrToVarBinding()):\n'
' Error near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n'
' Bad variable reference: \"'+descr_str+'\". There is\n'
' There no category named \"'+cat_name+'\" defined for "'+NodeToStr(cat_node)+'\".\n')
else:
raise InputError('Error('+g_module_name+'.DescrToVarBinding()):\n'
' Error near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n'
' Bad variable reference: \"'+descr_str+'\". There is\n'
' no category named \"'+cat_name+'\" defined for "'+NodeToStr(cat_node)+'\".\n')
return var_binding
# Wrappers:
def LookupVar(descr_str, context_node, dbg_loc):
""" LookupVar() looks up a string (a variable descriptor, which is the
variable's name, excluding the '$', '@' prefixes and any '{}' brackets.)
This function returns the variable's "VarBinding" (the variable-name:value
pair). This is useful for querying or changing the value of a variable.
Because nearly all variables are local, you must specify the starting
node (ie. the node corresponding to the class in which this class
or variable was referred to). This is typically the global node.
"""
return DescrToVarBinding(descr_str, context_node, dbg_loc)
def LookupNode(obj_name, starting_node, dbg_loc):
""" LookupNode() parses through a string like
'../ClassA/NestedClassB'
and returns the corresponding node.
Nodes are data types used for representing a class or class instance.
They are also used for storing variables.
'ClassA/NestedClassB/VariableC'
Because nearly all variables are local, you must specify the starting
node (ie. the node corresponding to the class in which this class
or variable was referred to). This is typically the global node.
"""
return StrToNode(obj_name, starting_node, dbg_loc)
class SimpleCounter(object):
__slots__=["n","nincr"]
def __init__(self, n0 = 1, nincr = 1):
self.n = n0 - nincr
self.nincr = nincr
def query(self):
return self.n
def incr(self):
self.n += self.nincr
def __copy__(self): #makes a (deep) copy of the counter in its current state
return SimpleCounter(self.n + self.nincr, self.nincr)
class Category(object):
"""
Category contains a list of all of the variables which belong
to the same category, as well as some supporting information.
Attributes:
name The name of the category (a string)
bindings An OrderedDict() containing leaf_node:VarBinding
(key:value) pairs. Variables are looked up by their leaf node.
The actual variable name (which simply refers to the leaf node)
and values are both stored in the VarBinding data structure.
counter A counter object like "SimpleCounter". Each time counter.incr()
is invoked it should return a unique string (typically this is
simply a string representing an integer which is incremented).
"""
__slots__=["name","bindings","counter","manual_assignments","reserved_values"]
def __init__(self,
name = '',
bindings = None,
counter = None,
manual_assignments = None,
reserved_values = None):
self.name = name
if bindings is None:
self.bindings = OrderedDict()
else:
self.bindings = bindings
if counter is None:
self.counter = SimpleCounter(1,1)
else:
self.counter = counter
if manual_assignments is None:
self.manual_assignments = OrderedDict()
else:
self.manual_assignments = manual_assignments
if reserved_values is None:
self.reserved_values = OrderedDict()
else:
self.reserved_values = reserved_values
class StaticObj(object):
""" StaticObjs and InstanceObjs:
The state of the system is stored in two different trees:
1) The static tree:
StaticObj trees are similar "class" definitions in an OOP language.
These trees contains class definitions, and their nested classes,
and instructions for how to create new instances (copies) of this class.
Nodes in this tree are stored using StaticObjs:
2) The instance tree:
This tree contains classes that have been instantiated, and any sub-
classes (members or attributes) that are instantiated as a result.
This tree is automatically generated by instantiating the root
StaticObj. Nodes in this tree are stored using InstanceObjs.
StaticObjs and InstanceObjs both contain
"commands" (commands which usually involve instructions
for writing templates)
"categories" (local counters used to assign variables. See below.)
"children" (Nested class definitions -NOT- OOP child classes. See below.)
StaticObjs also contain
"instance_commands"
"instance_categories"
These three members contain information to create a new instance/copy
of this class (how to construct an InstanceObj from an StaticObj).
StaticObj contains the member function Parse() which builds the global static
tree by parsing the contents of a text file supplied by the user.
The external function BuildInstanceTree(), creates the global instance tree
from the global static tree (a tree of StaticObjs).
----- CLASS MEMBERS OF StaticObj: ----
0) Name:
Every class (object type) has a name. It is stored in self.name.
To make it easier to distinguish the names of classes from the names of
individual instances of that class, I recommend using a capital letter
for the name of a class type (and lower-case letters for instances).
1) Commands
Commands are usually instructions for writing templates.
Templates are blocks of ordinary text which contain variables.
(Variables in this program consist of variable names, categories,
and (eventually) bound values (usually generated automatically),
which will be substituted into the template to generate a text file.)
A class can contain multiple templates, each one having a unique name
which also happens to be the name of the file that will be created when
the template is written.
Variants:
self.commands:
Some templates are written immediate after the class is defined
(stored in "self.commands").
Example: The "write_once()" command.
self.instance_commands:
Other templates are written when an instance/copy of the class is created
(stored in "self.instance_commands".
Example: The "write()" command.
2) Children
self.children:
Class definitions can be defined from within the definition of other
("parent") classes. These nested classes are referred to as "children".
These sub-classes are not "children" in the OOP sense of the word at
all (they do not have any of the the traits of their "parents").
However in the source code I refer to them as "children" because here
they are implemented as "child" nodes (branches) in the tree-like
data-structure used to store class definitions (the static tree).
3) Categories
This is a new concept and is difficult to explain.
Recall that each class contains a list of templates containing raw text,
interspersed with variables (whose values will determined later).
In most cases, variables are assigned to integer values which are
automatically generated by incrementing a counter. Simply put,
"categories" are collections of variables which share the same counter.
Within a category, the goal is to assign a unique integer (or other
symbol) to each distinct variable in this category.
To avoid name-clashes, variable names have local "scope".
This scope is the "leaf_token"
Categories can be specific to a particular class (node), and any of the
classes (nodes) which are nested within it, but by default are global.
(This means they "belong" to the global (outermost) node by default.)
All the various categories which are defined within a particular
StaticObj are stored in self.categories.
Static variables (ie. variables with a '@' prefix) are stored this way.
"self.categories"
If a class contains a new category, it means that if any nested
classes defined within that class definition contain (static, '@')
variables whose categories match the category name, their values will
be determined by looking up the couter associated with this category
stored locally (here) in self.categories. All variables belonging
to this category are stored in "self.categories[category_name]".
"self.instance_categories"
Recall that creating a new copy (instance) of a class automatically
creates an InstanceObj in the instance-tree. InstanceObj's have a
".categories" attribute of their own, the contents of which are
copied from this StaticObj's "self.instance_categories" attribute.
Instantiating a new class also spawns the instantiation of any
sub-classes.
If any of these "instance children" contain variables whose category
names match a category stored in the parent InstanceObj's .categories
dictionary, then their values will be determined by that InstanceObj's
counter for that category name.
4) Parent:
A link to the parent StaticObj is stored in self.parent.
"""
__slots__=["name",
"parent",
"children",
"categories",
"commands",
"srcloc_begin",
"srcloc_end",
"deleted",
"class_parents",
"namespaces",
"instname_refs",
"instance_categories",
"instance_commands_push",
"instance_commands",
"instance_commands_pop"]
def __init__(self,
name='',
parent=None):
"""
The members/attributes of StaticObj are defined in the comment
for StaticObj above. """
# The following members are shared by both InstanceObj and StaticObj:
self.name = name
self.parent = parent #For traversing the global static template tree
self.children = OrderedDict() # Nested class definitions.
self.categories=OrderedDict() #<- new variable categories that are only defined
# in the context of this molecule's type definition
self.commands=[] # Commands to carry out (only once)
##vb##self.var_bindings=[] # List of variables assigned to this object.
self.srcloc_begin = None # Keep track of location in user files
self.srcloc_end = None # (useful for error message reporting)
self.deleted = False # Users can delete static objects?
# (why not?)
# The following members are not shared with InstanceObj:
self.class_parents = [] # classes we inherit traits from (this is
# similar to the parent/child relationship
# in an object-oriented-programming language)
self.namespaces = [] # A list of classes we also look in when searching
# for other static nodes or variables. (similar to
# class_parents, but only used for searches.)
self.instname_refs = {} # <-- used for debugging to insure that
# two instances do not have the same name
self.instance_categories=OrderedDict()#<-new variable categories that are defined
#within the scope of this molecule's instance
self.instance_commands_push=[] #1)process these commands first by adding
# these commands to InstanceObj.commands
# (before you deal with class_parents)
self.instance_commands=[] #2) then add this to InstanceObj.commands
self.instance_commands_pop=[] #3) finally add these commands
def DeleteSelf(self):
for child in self.children.values():
child.DeleteSelf()
self.deleted = True
def IsDeleted(self):
return self.deleted
##vb##def AddVarBinding(self, var_binding):
##vb## if self.var_bindings is None:
##vb## self.var_bindings = [var_binding]
##vb## else:
##vb## self.var_bindings.append(var_binding)
def Parse(self, lex):
""" Parse() builds a static tree of StaticObjs by parsing text file.
-The "lex" argument is a file or input stream which has been converted
to a "TemplateLexer" object (similar to the python's built-in shlex lexer).
"""
#sys.stdout.write(' -- Parse() invoked --\n')
# Keep track of the location in the users' input files where this
# class object is first defined. (Keep in mind that the user might
# augment their original class definition, adding new content to an
# existing class. In that case self.srcloc_begin will have already
# been assigned. We don't want to overwrite it in that case.)
if self.srcloc_begin is None: # <-- not defined yet?
self.srcloc_begin = lex.GetSrcLoc()
while True:
cmd_token = lex.get_token()
#print('Parse(): token = \"'+cmd_token+'\", '+lex.error_leader())
if cmd_token == lex.eof:
#print('Parse(): EOF encountered\n')
break
if ((cmd_token == 'write') or
(cmd_token == 'write_once') or
(cmd_token == 'create_var') or
(cmd_token == 'create_vars')):
open_paren = lex.get_token()
#print('Parse(): open_paren=\"'+open_paren+'\"')
if open_paren=='{':
# ..then the user neglected to specify the "dest" file-name
# argument. In that case, supply the default, ''.
# (which is shorthand for the standard out in this case)
open_curly = open_paren[0]
open_paren = ''
close_paren = ''
tmpl_filename = ''
srcloc = lex.GetSrcLoc()
else:
tmpl_filename = lex.get_token()
if tmpl_filename == ')':
tmpl_filename = ''
close_paren = ')'
else:
close_paren = lex.get_token()
open_curly = lex.get_token()
srcloc = lex.GetSrcLoc()
if ((cmd_token == 'create_var') or
(cmd_token == 'create_vars')):
tmpl_filename = None
# This means: define the template without attaching
# a file name to it. (IE., don't write the contents
# of what's enclosed in the curly brackets { } to a file.)
if ((open_curly != '{') or
((open_paren == '') and (close_paren != '')) or
((open_paren == '(') and (close_paren != ')'))):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error in '+lex.error_leader()+'\n\n'
'Syntax error at the beginning of the \"'+cmd_token+'\" command.')
if tmpl_filename != None:
tmpl_filename = RemoveOuterQuotes(tmpl_filename, lex.quotes)
# ( The previous line is similar to:
# tmpl_filename = tmpl_filename.strip(lex.quotes) )
tmpl_contents = lex.ReadTemplate()
StaticObj.CleanupReadTemplate(tmpl_contents, lex)
#sys.stdout.write(' Parse() after ReadTemplate, tokens:\n\n')
#print(tmpl_contents)
#sys.stdout.write('\n----------------\n')
if cmd_token == 'write_once':
# Check for a particular bug:
# Ordinary instance variables (preceded by a '$')
# should never appear in a write_once() statement.
for entry in tmpl_contents:
if (isinstance(entry, VarRef) and
(entry.prefix[0]=='$')):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+ErrorLeader(entry.srcloc.infile,
entry.srcloc.lineno)+'\n'
' Illegal variable: \"'+entry.prefix+entry.descr_str+entry.suffix+'\"\n'
' All variables in a \"write_once()\" statement must be statically\n'
' defined, and hence they must begin with a \'@\' prefix character.\n'
' (not a \'$\' character).\n'
' Suggestion: Use the \"write()\" command instead.\n')
if cmd_token == 'write':
commands = self.instance_commands
elif cmd_token == 'write_once':
commands = self.commands
elif ((cmd_token == 'create_var') or
(cmd_token == 'create_vars')):
commands = self.instance_commands
else:
assert(False)
command = WriteFileCommand(tmpl_filename,
tmpl_contents,
srcloc)
commands.append(command)
# end of "if (cmd_token == 'write') or (cmd_token == 'write_once'):"
elif cmd_token == 'delete':
instobj_descr_str = lex.get_token()
instobj_srcloc = lex.GetSrcLoc()
delete_command = DeleteCommand(instobj_srcloc)
mod_command = ModCommand(delete_command,
instobj_descr_str)
self.instance_commands.append(mod_command)
elif cmd_token == 'using':
namespacecom_str = lex.get_token()
if namespacecom_str != 'namespace':
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' The \"'+cmd_token+'\" command must be followed by the \"namespace\" keyword.')
namespace_str = lex.get_token()
stnode = StrToNode(namespace_str,
self,
lex.GetSrcLoc())
self.namespaces.append(stnode)
elif cmd_token == 'category':
cat_name = lex.get_token()
cat_count_start = 1
cat_count_incr = 1
open_paren = lex.get_token()
if (open_paren == '('):
token = lex.get_token()
if token == ',':
token = lex.get_token()
if token != ')':
# Interpret token as an integer, float, or string
try:
cat_count_start = int(token)
except ValueError:
try:
cat_count_start = float(token)
except ValueError:
cat_count_start = RemoveOuterQuotes(token, '\'\"')
token = lex.get_token()
if token == ',':
token = lex.get_token()
if token != ')':
# Interpret token as an integer,float,or string
try:
cat_count_incr = int(token)
except ValueError:
try:
cat_count_incr = float(token)
except ValueError:
cat_count_incr = RemoveOuterQuotes(token, '\'\"')
token = lex.get_token()
if token != ')':
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' \"'+cmd_token+' '+cat_name+'...\" has too many arguments,\n'
' or lacks a close-paren \')\'.\n')
else:
lex.push_token(open_paren)
if (isinstance(cat_count_start, basestring) or
isinstance(cat_count_incr, basestring)):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' \"'+cmd_token+' '+cat_name+'('+
str(cat_count_start)+','+
str(cat_count_incr)+')\"\n'
' Only numeric counters are currently supported.\n')
# check for really stupid and unlikely errors:
if type(cat_count_start) is not type(cat_count_incr):
if ((isinstance(cat_count_start, int) or
isinstance(cat_count_start, float))
and
(isinstance(cat_count_incr, int) or
isinstance(cat_count_incr, float))):
cat_count_start = float(cat_count_start)
cat_count_incr = float(cat_count_incr)
else:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' Problem with \"'+cmd_token+'\" command.\n')
prefix = cat_name[0]
cat_name = cat_name[1:]
# Add this category to the list.
if prefix == '@':
self.categories[cat_name] = Category(cat_name)
self.categories[cat_name].counter=SimpleCounter(cat_count_start,
cat_count_incr)
elif prefix == '$':
self.instance_categories[cat_name] = Category(cat_name)
self.instance_categories[cat_name].counter=SimpleCounter(cat_count_start,
cat_count_incr)
else:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' category name = \"'+cat_name+'\" lacks a \'$\' or \'&\' prefix.\n'
' This one-character prefix indicates whether the variables in this\n'
' new category will be static or dynamics variables\n')
elif (cmd_token == '}') or (cmd_token == ''):
# a '}' character means we have reached the end of our scope.
# Stop parsing and let the caller deal with the remaining text.
# (And a '' means we reached the end of the file... I think.)
break
#elif (cmd_token == 'include'):
# "include filename" loads a file (adds it to the file stack)
# The "TtreeShlex" class (from which "lex" inherits) handles
# "include" statements (ie. "source" statements) automatically.
else:
# Otherwise, 'cmd_token' is not a command at all.
# Instead it's the name of an object which needs to be
# defined or instantiated.
# First, let's figure out which.
# (small detail: The "class" keyword is optional
# and can be skipped.)
if cmd_token == 'class':
object_name = lex.get_token()
else:
object_name = cmd_token
next_symbol = lex.get_token()
#print('Parse(): next_token=\"'+next_symbol+'\"')
class_parents = []
if next_symbol == 'inherits':
# Then read in the list of classes which are parents of
# of this class. (Multiple inheritance is allowed.)
# (We don't yet check to insure that these are valid class
# names. We'll do this later in LookupStaticRefs().)
syntax_err_inherits = False
while True:
next_symbol = lex.get_token()
if ((next_symbol == '{') or
(next_symbol == lex.eof)):
break
elif (next_symbol == '='):
syntax_err_inherits = True
break
else:
class_parents.append(StrToNode(next_symbol,
self,
lex.GetSrcLoc()))
if len(class_parents) == 0:
syntax_err_inherits = True
if syntax_err_inherits:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' \"inherits\" should be followed by one or more class names.\n')
if next_symbol == '{':
child_name = object_name
# Check to see if this class has already been defined.
# (IE. check if it present in the list of children.)
# If the name (child_name) matches another class (child),
# then the contents of the new class will be appended to
# the old. This way, class definitions can be augmented
# later. (This is the way "namespaces" work in C++.)
child = self.children.get(child_name)
# If found, we refer to it as "child".
# If not, then we create a new StaticObj named "child".
if child is None:
child = StaticObj(child_name, self)
self.children[child_name] = child
assert(child.name == child_name)
# Either way we invoke child.Parse(), to
# add contents (class commands) to child.
child.Parse(lex)
child.class_parents += class_parents
elif next_symbol == '=':
next_symbol = lex.get_token()
if next_symbol == 'new':
base_name = object_name
base_srcloc = lex.GetSrcLoc()
array_slice_str = ''
if base_name.find('/') != -1:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+ErrorLeader(base_srcloc.infile,
base_srcloc.lineno)+'\n'
' (You can not instantiate some other object\'s members.)\n'
' Invalid instance name: \"'+base_name+'\"\n')
elif base_name in self.instname_refs:
ref_srcloc = self.instname_refs[base_name]
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Duplicate class/array \"'+base_name+'\"\n'
' This occurs near:\n'
' '+ErrorLeader(ref_srcloc.infile,
ref_srcloc.lineno)+'\n'
' and also near:\n'
' '+ErrorLeader(base_srcloc.infile,
base_srcloc.lineno)+'\n')
else:
self.instname_refs[base_name] = base_srcloc
# Check for syntax allowing the user to instantiate
# PART of an array. For example, check for this syntax:
# "monomers[20-29] = new ...". This only fills in a
# portion of the array from: monomers[20]...monomers[29]
#
# We also have to deal with multidimensional syntax
# like this: "cells[3][2-3][1][4-7] = new..."
# Split the "cells[3][2-3][2][4-7][2]" string into
# "cells[3][", "][1][", and "]".
# Later, we will instantiate InstanceObjs with names:
# "cells[3][2][1][4]"
# "cells[3][2][1][5]"
# "cells[3][2][1][6]"
# "cells[3][2][1][7]"
# "cells[3][3][1][4]"
# "cells[3][3][1][5]"
# "cells[3][3][1][6]"
# "cells[3][3][1][7]"
p1 = base_name.find('[')
if p1 == -1:
p1 = len(base_name)
else:
p1 += 1
array_name_tkns = [ base_name[0:p1] ]
array_name_offsets = []
p2 = -1
p4 = p1
while p4 < len(base_name):
p3 = base_name.find(']', p1)
if p3 == -1:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Expected a \']\' character following:\n'
' \"'+base_name[0:p1]+'\", located near:\n'
' '+ErrorLeader(ref_srcloc.infile,
ref_srcloc.lineno)+'\n')
# Search for a '-', ':', or '*' character between []
# For example "monomers[20-29] = "
# If present, the user wants us to fill a range
# inside an array. This could be a multi-dimensional
# array, (eg "cells[3][2-6][4-11] = "), so we must
# figure out which entries in the array the user
# wants us to fill (in this case, "[2-6][4-11]")
p2 = base_name.find('-', p1)
if p2 == -1:
p2 = len(base_name)
if p2 > p3:
p2 = base_name.find(':', p1)
if p2 == -1:
p2 = len(base_name)
if p2 > p3:
p2 = base_name.find('*', p1)
if p2 == -1:
p2 = len(base_name)
p4 = p3 + 1
if p4 < len(base_name):
if base_name[p4] == '[':
p4 += 1 # skip over it
else:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Expected a \'[\' character forllowing a \']\' character in\n'
' \"'+base_name[0:p2+1]+'\", located near:\n'
' '+ErrorLeader(ref_srcloc.infile,
ref_srcloc.lineno)+'\n')
if p2 > p3:
# Then no '-', ':', or '*' character was found
# between '[' and the subsequent ']' character
# In that case, ignore this token
token = base_name[p1:p4]
# append all this text to the previous token
if len(array_name_tkns) == 0:
array_name_tkns.append(token)
else:
array_name_tkns[-1] = array_name_tkns[-1]+token
array_slice_str = 'slice '
else:
assert((p1 < p2) and (p2 < p3))
index_offset_str = base_name[p1:p2]
if len(index_offset_str) == 0:
index_offset = 0
elif (not str.isdigit(index_offset_str)):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Expected a nonnegative integer preceding the \''+base_name[p2]+'\' character in:\n'
' \"'+base_name[0:p2+1]+'\", located near:\n'
' '+ErrorLeader(ref_srcloc.infile,
ref_srcloc.lineno)+'\n')
else:
index_offset = int(index_offset_str)
token=base_name[p3:p4]
array_name_tkns.append(token)
array_name_offsets.append(index_offset)
p1 = p4
# If the statobj_str token contains a ']' character
# then this means the user wants us to make multiple
# copies of this template. The number of copies
# to instantiate is enclosed in the [] characters
# (Example wat = new Water[3000] creates
# 3000 instantiations of the Water template
# named wat[1], wat[2], wat[3], ... wat[3000]).
# Note: Here '[' and ']' have a special meaning.
# So lex.get_token() should not treat them as
# ordinary word characters. To prevent this:
orig_wordterminators = lex.wordterminators
lex.wordterminators += '[],'
class_name_str = lex.get_token()
if ((class_name_str == lex.eof) or
(class_name_str == '}')):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
'Class ends prematurely. (Incomplete \"new\" statement.)')
assert(len(class_name_str) > 0)
if (class_name_str[0] == '['):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' new '+class_name_str+'\n'
'Bracketed number should be preceeded by a class name.')
class_names = []
weights = []
num_by_type = []
if class_name_str == 'random':
class_names, weights, num_by_type = self._ParseRandom(lex)
tmp_token = lex.get_token()
if len(tmp_token)>0:
if tmp_token[0]=='.':
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' \"'+tmp_token+'\" should not follow random()\n'
'\n'
' Coordinate transformations and other commands (such as \"'+tmp_token+'\")\n'
' should appear after each class name inside the random() statement,\n'
' not after it. For example, do not use:\n'
' \"lipids=new random([DPPC,DLPC],[0.5,0.5]).move(0,0,23.6)\"\n'
' Use this instead:\n'
' \"lipids=new random([DPPC.move(0,0,23.6),DLPC.move(0,0,23.6)],[0.5,0.5])\"\n')
lex.push_token(tmp_token)
else:
class_name, class_suffix, class_suffix_srcloc = \
self._ProcessClassName(class_name_str, lex)
array_size = []
array_suffixes = []
array_srclocs = []
# A general "new" statement could look like this:
# "m = new Mol.scale(3) [2].trans(0,4.5,0).rotate(30,0,0,1)
# [3].trans(0,0,4.5)"
# So far we have processed "m = new Mol.scale(3)".
# Now, we need to deal with:
# "[2].trans(0,4.5,0).rotate(30,0,0,1) [3].trans(0,0,4.5)"
while True:
new_token = lex.get_token()
#if ((new_token == '') or (new_token == lex.eof)):
# break
if new_token == '[':
number_str = lex.get_token()
close_bracket = lex.get_token()
if ((not str.isdigit(number_str)) or
(close_bracket != ']')):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error in \"new\" statement near '+lex.error_leader()+'\n'
' A \'[\' character should be followed by a number and a \']\' character.')
array_size.append(int(number_str))
suffix = lex.get_token()
if ((suffix == '') or (suffix == lex.eof)):
array_suffixes.append('')
array_srclocs.append(base_srcloc)
break
if suffix[0] == '.':
lex.push_token(suffix[1:])
suffix_func = lex.GetParenExpr()
suffix = '.' + suffix_func
array_suffixes.append(suffix)
array_srclocs.append(lex.GetSrcLoc())
else:
array_suffixes.append('')
array_srclocs.append(base_srcloc)
lex.push_token(suffix)
if suffix != '[':
break
else:
lex.push_token(new_token)
break
srcloc_final = lex.GetSrcLoc()
lex.wordterminators = orig_wordterminators
assert(len(array_size) == len(array_suffixes))
if len(array_size) > 0:
if len(array_name_offsets) == 0:
assert(len(array_name_tkns) == 1)
array_name_offsets = [0] * len(array_size)
array_name_tkns[0] = array_name_tkns[0] + '['
for d in range(0, len(array_size)-1):
array_name_tkns.append('][')
array_name_tkns.append(']')
if len(array_name_offsets) != len(array_size):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error in \"new\" statement near/before '+lex.error_leader()+'\n'
' Array '+array_slice_str+'dimensionality on the left side of the \'=\' character ('+str(len(array_name_offsets))+')\n'
' does not match the array dimensionality on the right side ('+str(len(array_size))+').\n')
# If the user wants us to instantiate a
# multidimensional array of class instances
# then we must loop through this multidimensional
# array and create a new instance for each entry.
# For example fill a 3 dimensional volume
# with 1000 water molecules
# Example 1:
# solvent = new Water [10][10][10]
# (The coordinates must be read separately.)
# In this example array_size = [10,10,10]
# array_suffixes = ['','','']
# Example 2:
# solvent = new Water.transcm(0,0,0)
# [10].trans(0,0,4)
# [10].trans(0,4,0).rot(45,0,0,1)
# [10].trans(4,0,0)
# (This command generates a 10x10x10 lattice
# simple cubic lattice of regularly spaced
# water molecules pointing the same direction.)
# In this example array_size = [10,10,10]
# and
# class_suffix = 'transcm(0,0,0)'
# and
# array_suffixes = ['trans(0,0,4)',
# 'trans(0,4,0).rot(45,0,0,1)',
# 'trans(4,0,0)']
# Note that tree ignores the "trans()"
# commands, it stores them so that inherited
# classes can attempt to process them.
D = len(array_size)
if D > 0:
i_elem = 0 #(used to look up selection_list[])
if len(num_by_type) > 0:
selection_list = []
for i in range(0, len(num_by_type)):
selection_list += [i]*num_by_type[i]
random.shuffle(selection_list)
num_elements = 1
for d in range(0,D):
num_elements *= array_size[d]
err_msg_str = str(array_size[0])
for d in range(1,D):
err_msg_str += '*'+str(array_size[d])
if num_elements != len(selection_list):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near or before '+lex.error_leader()+'\n'
' The sum of the numbers in the \"new random([],[])\" command ('+str(len(selection_list))+')\n'
' does not equal the number of elements in the array ('+err_msg_str+')\n')
digits = [0 for d in range(0, D)]
table_filled = False
pushed_commands = []
while (not table_filled):
instance_name = array_name_tkns[0]
for d in range(0, D):
i = digits[d]
instance_name+=str(i+
array_name_offsets[d])+\
array_name_tkns[d+1]
# Does the user want us to select
# a class at random?
if len(class_names) > 0:
if len(num_by_type) > 0:
class_name_str = class_names[selection_list[i_elem]]
else:
class_name_str = RandomSelect(class_names,
weights)
class_name, class_suffix, class_suffix_srcloc= \
self._ProcessClassName(class_name_str, lex)
if class_suffix != '':
class_suffix_command = \
PushRightCommand(class_suffix.lstrip('.'),
class_suffix_srcloc)
self.instance_commands.append(class_suffix_command)
command = \
InstantiateCommand(instance_name,
ClassReference(class_name,
base_srcloc),
base_srcloc)
self.instance_commands.append(command)
if class_suffix != '':
command = \
PopRightCommand(class_suffix_command,
srcloc_final)
self.instance_commands.append(command)
# Now go to the next entry in the table.
# The indices of this table are similar to
# a D-digit integer. We increment this d-digit number now.
d_carry = D-1
while True:
digits[d_carry] += 1
if digits[d_carry] >= array_size[d_carry]:
digits[d_carry] = 0
if array_suffixes[d_carry] != '':
for i in range(0, array_size[d_carry]-1):
partner = pushed_commands.pop()
command = PopRightCommand(partner,
srcloc_final)
self.instance_commands.append(command)
d_carry -= 1
else:
if array_suffixes[d_carry] != '':
command = PushRightCommand(array_suffixes[d_carry].lstrip('.'),
array_srclocs[d_carry])
pushed_commands.append(command)
self.instance_commands.append(command)
break
if d_carry < 0:
table_filled = True
break
i_elem += 1 #(used to look up selection_list[])
pass
else:
if len(class_names) > 0:
assert(len(num_by_type) == 0)
#if len(num_by_type) > 0:
# class_name_str = class_names[selection_list[i_elem]]
#else:
# class_name_str = RandomSelect(class_names,
# weights)
class_name_str = RandomSelect(class_names,
weights)
class_name, class_suffix, class_suffix_srcloc= \
self._ProcessClassName(class_name_str, lex)
if class_suffix != '':
class_suffix_command = \
PushRightCommand(class_suffix.lstrip('.'),
class_suffix_srcloc)
self.instance_commands.append(class_suffix_command)
command = \
InstantiateCommand(base_name,
ClassReference(class_name,
base_srcloc),
base_srcloc)
self.instance_commands.append(command)
if class_suffix != '':
command = \
PopRightCommand(class_suffix_command,
srcloc_final)
self.instance_commands.append(command)
else:
# Now check for commands using this syntax:
#
# "MolNew = MolOld.rot(45,1,0,0).scale(100.0)"
# /|\ /|\ `-----------.------------'
# | | |
# child_name parent_name optional suffix
child_name = object_name
parent_name_str = next_symbol
child = StaticObj(child_name, self)
parent_name, suffix, suffix_srcloc = \
self._ProcessClassName(parent_name_str, lex)
child.class_parents.append(StrToNode(parent_name,
self,
lex.GetSrcLoc()))
if suffix != '':
# Assume the command is a StackableCommand. (This
# way it will enclose the commands of the parents.)
# Stackable commands come in (Push...Pop) pairs.
push_command = PushLeftCommand(suffix,
suffix_srcloc)
pop_command = PopLeftCommand(push_command,
suffix_srcloc)
push_mod_command = ModCommand(push_command, './')
pop_mod_command = ModCommand(pop_command, './')
child.instance_commands_push.append(push_mod_command)
child.instance_commands_pop.insert(0,pop_mod_command)
#sys.stderr.write('child.instance_commands_push = '+str(child.instance_commands_push)+'\n')
#sys.stderr.write('child.instance_commands_pop = '+str(child.instance_commands_pop)+'\n')
# Check to see if this class has already been defined.
if self.children.get(child_name) is not None:
if self.children[i].IsDeleted():
del self.children[child_name]
else:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' The name \"'+child_name+'\" is already in use.')
self.children[child_name] = child
else:
# Otherwise hopefully this is a post-instance command
# (a command applied to a class which has been instantiated)
# In that case, the object_name would be followed by
# a dot and a function-call containing a '(' paren (which
# would have ended up stored in the next_symbol variable).
open_paren_encountered = False
if (next_symbol == '('):
open_paren_encountered = True
lex.push_token(next_symbol) #put '(' back in the stream
i_dot = object_name.rfind('.')
i_slash = object_name.rfind('/')
dot_encountered = ((i_dot != -1) and
((i_slash == -1) or (i_slash < i_dot)))
if (open_paren_encountered and dot_encountered and
(object_name[:1] != '[')):
obj_descr_str, suffix, suffix_srcloc = \
self._ExtractSuffix(object_name, lex)
path_tokens = obj_descr_str.split('/')
i_last_ptkn, staticobj = FollowPath(path_tokens,
self,
lex.GetSrcLoc())
instobj_descr_str = './'+'/'.join(path_tokens[i_last_ptkn:])
# I still support the "object_name.delete()" syntax for
# backwards compatibility. (However newer input files
# use this equivalent syntax: "delete object_name")
if suffix == 'delete()':
delete_command = DeleteCommand(suffix_srcloc)
mod_command = ModCommand(delete_command,
instobj_descr_str)
staticobj.instance_commands.append(mod_command)
else:
push_command = PushLeftCommand(suffix,
suffix_srcloc,
'.')
pop_command = PopLeftCommand(push_command,
suffix_srcloc,
'.')
push_mod_command = ModCommand(push_command,
instobj_descr_str)
pop_mod_command = ModCommand(pop_command,
instobj_descr_str)
if instobj_descr_str != './':
#sys.stderr.write('DEBUG: Adding '+str(push_command)+' to '+
# staticobj.name+'.instance_commands\n')
staticobj.instance_commands.append(push_mod_command)
staticobj.instance_commands.append(pop_mod_command)
else:
#sys.stderr.write('DEBUG: Adding '+str(push_command)+' to '+
# staticobj.name+'.instance_commands_push\n')
# CONTINUEHERE: should I make these PushRight commands and
# append them in the opposite order?
# If so I also have to worry about the case above.
staticobj.instance_commands_push.append(push_mod_command)
staticobj.instance_commands_pop.insert(0,pop_mod_command)
else:
# Otherwise, the cmd_token is not any of these:
# "write", "write_once", "create_vars"
# "delete", or "category".
# ... and it is ALSO not any of these:
# the name of a class (StaticObj), or
# the name of an instance (InstanceObj)
# followed by either a '.' or "= new"
#
# In that case, it is a syntax error:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Syntax error at or before '+lex.error_leader()+'\n'
' \"'+object_name+' '+next_symbol+'\".')
# Keep track of the location in the user's input files
# where the definition of this object ends.
self.srcloc_end = lex.GetSrcLoc()
@staticmethod
def CleanupReadTemplate(tmpl_contents, lex):
#1) Remove any newlines at the beginning of the first text block
# in tmpl_content.(Sometimes they cause ugly extra blank lines)
assert(len(tmpl_contents) > 0)
if isinstance(tmpl_contents[0], TextBlock):
first_token_strip = tmpl_contents[0].text.lstrip(' ')
if ((len(first_token_strip) > 0) and
(first_token_strip[0] in lex.newline)):
tmpl_contents[0].text = first_token_strip[1:]
tmpl_contents[0].srcloc.lineno += 1
#2) Remove any trailing '}' characters, and complain if absent.
# The last token
assert(isinstance(tmpl_contents[-1], TextBlock))
assert(tmpl_contents[-1].text in ['}',''])
if tmpl_contents[-1].text == '}':
del tmpl_contents[-1]
else:
tmpl_begin = None
if isinstance(tmpl_contents[0], TextBlock):
tmpl_begin = tmpl_contents[0].srcloc
elif isinstance(tmpl_contents[0], VarRef):
tmpl_begin = tmpl_contents[0].srcloc
else:
assert(False)
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n\n'
' Premature end to template.\n'
'(Missing terminator character, usually a \'}\'.) The\n'
'incomplete template begins near '+ErrorLeader(tmpl_begin.infile, tmpl_begin.lineno)+'\n')
#3) Finally, if there is nothing but whitespace between the
# last newline and the end, then strip that off too.
if isinstance(tmpl_contents[-1], TextBlock):
i = len(tmpl_contents[-1].text)-1
if i >= 0:
while ((i >= 0) and
(tmpl_contents[-1].text[i] in lex.whitespace) and
(tmpl_contents[-1].text[i] not in lex.newline)):
i -= 1
if (tmpl_contents[-1].text[i] in lex.newline):
tmpl_contents[-1].text = tmpl_contents[-1].text[0:i+1]
def LookupStaticRefs(self):
""" Whenever the user requests to instantiate a new copy of a class,
the name of that class is stored in self.instance_commands.
This name is stored as a string. After all of the classes have been
defined, then we go back through the tree and replace these names
with pointers to actual StaticObjs which correspond to those classes.
(This was deferred until all of the classes have been defined so
that users can refer to classes that they will define later on.)
"""
# Now do the same for any children which
# are created during instantiation:
for command in self.instance_commands:
# Does this command create/instantiate a new copy of a class?
if isinstance(command, InstantiateCommand):
# If so, figure out which statobj is referred to by statobj_str.
assert(isinstance(command.class_ref.statobj_str, basestring))
command.class_ref.statobj = StrToNode(command.class_ref.statobj_str,
self,
command.class_ref.srcloc)
# Now recursively resolve StaticObj pointers for the "children"
# (in this case, "children" refers to classes whose definitions
# are nested within this one).
for child in self.children.values():
child.LookupStaticRefs()
def _ExtractSuffix(self, class_name_str, lex):
"""
This ugly function helps process "new" commands such as:
mola = new ForceFieldA/../MoleculeA.move(30,0,0).rot(45,0,0,1)
This function expects a string,
(such as "ForceFieldA/../MoleculeA.move(30,0,0).rot(45,0,0,1)")
It extracts the class name "ForceFieldA/../MoleculeA"
and suffix "move(30,0,0).rot(45,0,0,1)"
"""
# Dots in class names can appear for 2 reasons:
# 1) as part of a path like "../" describing the location
# where this class was defined relative to the caller.
# In that case it will be preceeded or followed by
# either another dot '.', or a slash '/'
# 2) as part of a "suffix" which appears after the name
# containing instructions which modify how to
# instantiate that class.
# Case 1 is handled elsewhere. Case 2 is handled here.
i_dot = 0
while i_dot < len(class_name_str):
i_dot = class_name_str.find('.', i_dot)
if i_dot == -1:
break
# Is the '.' character followed by another '.', as in ".."?
# If so, it's part of a path such as "../Parent/Mol', (if
# so, it's not what we're looking for, so keep searching)
if i_dot < len(class_name_str)-1:
if class_name_str[i_dot+1] == '.':
i_dot += 1
#otherwise, check to see if it is followed by a '/'?
elif class_name_str[i_dot+1] != '/':
# if not, then it must be part of a function name
break;
class_suffix = ''
class_name = class_name_str
class_suffix_srcloc = None
if ((i_dot != -1) and
(i_dot < len(class_name_str))):
class_suffix = class_name_str[i_dot:]
class_name = class_name_str[:i_dot]
if class_name_str[-1] != ')':
# If it does not already contains the parenthesis?
class_suffix += lex.GetParenExpr()
class_suffix_srcloc = lex.GetSrcLoc()
#sys.stderr.write(' splitting class name into class_name.suffix\n'
# ' class_name=\"'+class_name+'\"\n'
# ' suffix=\"'+class_suffix+'\"\n')
return class_name, class_suffix.lstrip('.'), class_suffix_srcloc
def _ProcessClassName(self, class_name_str, lex):
"""
This function does some additional
processing (occasionaly inserting "..." before class_name).
"""
class_name, class_suffix, class_suffix_srcloc = \
self._ExtractSuffix(class_name_str, lex)
# ---- ellipsis hack ----
# (Note-to-self 2012-4-15)
# Most users expect ttree.py to behave like a
# standard programming language: If the class they are
# instantiating was not defined in this specific
# location, they expect ttree.py to search for
# it outwards, first in the parent's environment,
# and then in the parent's parent's environment,
# and so on, until the object is found.
# For example, most users expect this to work:
# class A{
# <definition_of_a_goes_here...>
# }
# class B{
# a = new A
# }
# Notice in the example above we did not have to specify where "A"
# was defined, because it is defined in the parent's
# environment (ie. immediately outside B's environment).
#
# One can obtain the equivalent behavior in ttree.py
# using ellipsis syntax: "a = new .../A" symbol.
# The ellipsis ".../" tells ttree.py to search upwards
# for the object to the right of it ("A")
# In order to make ttree.py behave the way
# most users are expecting, we artificially insert a
# ".../" before the class name here. (Later on, the
# code that processes the ".../" symbol will take
# care of finding A.)
if (len(class_name)>0) and (class_name[0] not in ('.','/','*','?')):
class_name = '.../' + class_name
return class_name, class_suffix, class_suffix_srcloc
def _ParseRandom(self, lex):
bracket1 = lex.get_token()
bracket2 = lex.get_token()
if ((bracket1 != '(') and (bracket1 != '[')):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
'Expected a \"([\" following '+class_name+'.')
class_names = []
token = ''
prev_token = '['
while True:
token = lex.get_token()
if (token == '('):
lex.push_token(token)
token = lex.GetParenExpr()
if (prev_token not in (',','[','(')):
assert(len(class_names) > 0)
class_names[-1] = prev_token + token
prev_token = prev_token + token
else:
class_names.append(token)
prev_token = token
else:
if ((token == ']') or
(token == lex.eof) or
(token == '}') or
((token in lex.wordterminators) and
(token != ','))):
if (prev_token in (',','[','(')):
class_names.append('')
break
if token != ',':
class_names.append(token)
elif (prev_token in (',','[','(')):
class_names.append('')
prev_token = token
token_comma = lex.get_token()
bracket1 = lex.get_token()
if ((token != ']') or
(token_comma != ',') or
(bracket1 != '[')):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
'Expected a list of class names enclosed in [] brackets, followed by\n'
'a comma, and then a list of probabilities also enclosed in [] brackets.\n'
'(A random-seed following another comma is optional.)')
weights = []
while True:
token = lex.get_token()
if ((token == ']') or
(token == lex.eof) or
(token == '}') or
((token in lex.wordterminators) and
(token != ','))):
break
if token != ',':
try:
weight = float(token)
except ValueError:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' \"'+token+'\"\n'
'Expected a list of numbers enclosed in [] brackets.')
if (weight < 0.0):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' Negative numbers are not allowed in \"random(\" argument list.\n')
weights.append(weight)
bracket2 = lex.get_token()
if ((token != ']') or
(bracket2 not in (')',','))):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
'Expected a \")\" or a \",\" following the list of numeric weights.')
if len(class_names) != len(weights):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
'Unequal number of entries in object list and probability list.\n')
# Are all the entries in the "weights" array integers?
# If they are then, treat them as molecule counters,
# ot probabilities
num_by_type = []
for i in range(0, len(weights)):
# are the weights all positive integers?
n = int(weights[i])
if n == weights[i]:
num_by_type.append(n)
if len(num_by_type) < len(weights):
num_by_type = []
tot_weight = sum(weights)
if (tot_weight <= 0.0):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' The numbers in the \"random(\" argument list can not all be zero.\n')
for i in range(0,len(weights)):
weights[i] /= tot_weight
if bracket2 == ',':
try:
token = lex.get_token()
seed = int(token)
random.seed(seed)
except ValueError:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' \"'+token+'\"\n'
'Expected an integer (a seed) following the list of weights.')
bracket2 = lex.get_token()
if (bracket2 != ')'):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' \"'+token+'\"\n'
'Expected a \")\".')
else:
random.seed()
return (class_names, weights, num_by_type)
def BuildCommandList(self, command_list):
"""
Search the commands in the tree and make a linear list of commands
in the order they should be carried out.
"""
if self.IsDeleted():
return
# Add a special note to the list of commands to indicate which object
# the commands refer to. (This might be useful one day.)
# Later we can loop through this command list and still be able to tell
# whether or not we are within the scope of a particular class or instance
# (by seeing if we are between a "ScopeBegin" and "ScopeEnd" pair).
command_list.append(ScopeBegin(self, self.srcloc_begin))
# We want to append commands to the command_list in the same order
# that these commands appear in the user's input files.
# Unfortunately the commands may be interspersed with the creation of
# new StaticObjs which have their own commands which we have to explore
# recursively.
# Fortunately each child (StaticObj) has a srcloc_begin member, so we
# can infer the correct order of all the commands belonging to the
# children and correctly insert them into the correct place in between
# the commands of the parent.
srcloc2command_or_child = {}
for command in self.commands:
srcloc2command_or_child[command.srcloc] = command
for child in self.children.values():
srcloc = child.srcloc_begin
# special case: Some children do not have a srcloc because
# they were generated automatically. These children should
# not have any commands either so we can ignore them.
if srcloc != None:
srcloc2command_or_child[srcloc] = child
else:
assert(len(child.commands) == 0)
for srcloc in sorted(srcloc2command_or_child.keys()):
entry = srcloc2command_or_child[srcloc]
if isinstance(entry, StaticObj):
child = entry
child_commands = []
child.BuildCommandList(child_commands)
command_list += child_commands
else:
command_list.append(entry)
command_list.append(ScopeEnd(self, self.srcloc_end))
def __str__(self):
out_str = self.name
if len(self.children) > 0:
out_str += '('
i = 0
for child in self.children.values():
if i+1 < len(self.children):
out_str += str(child)+', '
else:
out_str += str(child)+')'
i += 1
return out_str
def RandomSelect(entries, weights):
""" Return an entry from a list at random using
a (normalized) list of probabilities. """
assert(len(entries) == len(weights))
x = random.random()
i = 0
tot_probability = 0.0
while i < len(weights)-1:
tot_probability += weights[i]
if x <= tot_probability:
break
i += 1
return entries[i]
class InstanceObjBasic(object):
""" A simplified version of InstanceObj.
See the documentation/comments for InstanceObj for more details.
(Leaf nodes (variables) are typically stored as InstanceObjBasic objects
More general, non-leaf nodes are stored using InstanceObj objects.)
"""
__slots__=["name","parent"]
def __init__(self,
name = '',
parent = None):
self.parent = parent # the environment/object which created this object
# Example:
# Suppose this "molecule" is an amino acid monomer
# belonging to a protein. The "parent" refers to
# the InstanceObj for the protein. ".parent" is
# useful for traversing the global instance tree.
# (use InstanceObj.statobj.parent for
# traversing the global static tree)
self.name = name # A string uniquely identifying this object in
# in it's "parent" environment.
# (It is always the same for every instance
# of the parent object. It would save memory to
# get rid of this member. Andrew 2012/9/13)
#self.deleted = False
##vb##self.var_bindings=None # List of variables assigned to this object
##vb## # or None (None takes up less space than an
##vb## # empty list.)
##vb##def AddVarBinding(self, var_binding):
##vb## if self.var_bindings is None:
##vb## self.var_bindings = [var_binding]
##vb## else:
##vb## self.var_bindings.append(var_binding)
#def DeleteSelf(self):
# self.deleted = True
def DeleteSelf(self):
#self.Dealloc()
self.parent = self # This condition (normally never true)
# flags the node as "deleted". (Nodes are never
# actually deleted, just flagged.)
# I used to have a separate boolean member variable
# which was set True when deleted, but I started
# eliminated unnecessary data members to save space.
#def IsDeleted(self):
# return self.deleted
def IsDeleted(self):
# Return true if self.deleted == True or self.parent == self
# for this node (or for any ancestor node).
node = self
while node.parent != None:
if hasattr(node, 'deleted'):
if node.deleted:
return True
elif node.parent == node:
return True
node = node.parent
return False
#def Dealloc(self):
# pass
##vb##if self.var_bindings is None:
##vb## return
##vb##N = len(self.var_bindings)-1
##vb##for i in range(0,len(self.var_bindings)):
##vb## vb = self.var_bindings[N-i]
##vb## cat = vb.category
##vb## assert(self in cat.bindings)
##vb## del cat.bindings[self]
##vb## del self.var_bindings[N-i]
##vb##self.var_bindings = None
class InstanceObj(InstanceObjBasic):
""" InstanceObjs are used to store nodes in the global
"instance tree", the tree of all classes (molecules) which have
been instantiated. Recall that whenever a class is instantiated,
it's members will be instantiated as well. Since these
members can also be classes, this relationship is hierarchical,
and can be represented as a tree.
"InstanceObjs" are the data type used to store the nodes in that tree."""
__slots__=["statobj",
"children",
"categories",
"commands",
"commands_push",
"commands_pop",
"srcloc_begin",
"srcloc_end",
"deleted"]
#"LookupMultiDescrStr",
##"Dealloc",
##"DeleteSelf",
##"IsDeleted",
##"UndeleteSelf",
##"DeleteProgeny",
#"BuildInstanceTree",
#"ProcessCommand",
#"ProcessContextNodes",
#"BuildCommandList"]
def __init__(self,
name = '',
parent = None):
InstanceObjBasic.__init__(self, name, parent)
self.statobj = None # The statobj node refered to by this instance
self.children = {} # A list of statobjs corresponding to
# constituent parts (members) of the
# current class instance.
# The typical example is to consider the
# multiple amino acids (child-molecules)
# which must be created in order to create a
# new protein (instance) to which they belong
# (which would be "self" in this example)
self.categories = {} # This member stores the same data as the
# Instance variables (ie. variables
# with a '$' prefix) are stored in a
# category belonging to node.categories
# where "node" is of type InstanceObj.
# (There is a long explanation of
# "categories" in the comments
# of class StaticObj.)
self.commands = [] # An ordered list of commands to carry out
# during instantiation
self.commands_push = [] # Stackable commands to carry out (first, before children)
self.commands_pop = [] # Stackable commands to carry out (last, after children)
self.srcloc_begin = None # Keep track of location in user files
self.srcloc_end = None # (useful for error message reporting)
self.deleted = False
def LookupMultiDescrStr(self,
multi_descr_str,
srcloc,
null_list_warning=False,
null_list_error=False):
"""
Post-Instance (PI) modifiers/commands are commands which modify
an instance of a class after it has already been instantiated.
Simple Example:
class A {
...
}
class B {
a = new A.command_1()
a.command_2()
}
In the example above "command_2()" is a ModCommand, and
"a" is the multi_descr_str (string describing the correspond InstanceObj).
The "command_2()" command will be retroactively pushed onto the
list of commands to execute once "a" is instantiated.
(This is somewhat counter-intuitive.)
When array brackets [] and wildcards are used, a single ModCommand
can modify many different instances, for example suppose:
a = new A [2][5][3]
then "a[1][2][*].command_3()" is equivalent to
a[0][2][0].command_3()
a[0][2][1].command_3()
a[0][2][2].command_3()
In this example "a[1][2][*]" is the multi_descr_str
"a[*][3][*].command_4()" is equivalent to
a[0][3][0].command_4()
a[0][3][1].command_4()
a[1][3][0].command_4()
a[1][3][1].command_4()
In this function, we interpret strings like "a" and "a[*][3][*]"
in the examples above, and figure out which InstanceObjs they refer to,
and push the corresponding command into that InstanceObjs instance
command stack retroactively.
In addition to [*], you can use [a-b] and [a:b] syntax. For example:
"a[0][1-2][0-1].command_3()" and
"a[0][1:3][0:2].command_3()" are both equivalent to:
a[0][1][0].command_3()
a[0][1][1].command_3()
a[0][2][0].command_3()
a[0][2][1].command_3()
"""
pattern_str = multi_descr_str
# Suppose pattern_str = 'a[1][*][3]/b[**][2]'
# We want to split this string into a list of string fragments
# which omits the '*' characters: [ 'a[', '][3]/b', '][2]' ]
# However, we only want to do this when * is enclosed in [].
pattern_fragments = []
ranges_ab = []
i_close_prev = 0
i_close = 0
i_open = 0
while True:
i_open = pattern_str.find('[', i_open+1)
if i_open == -1:
pattern_fragments.append(pattern_str[i_close_prev:])
break
else:
i_close = pattern_str.find(']', i_open+1)
if i_close == -1:
pattern_fragments.append(pattern_str[i_close_prev:])
break
# If there is a '*' or a ':' character between
# the [] brackets, then split the string at '['
# (at i_open) and resume reading again at ']'
# (at i_close) (and create a new entry in the
# pattern_fragments[] and ranges_ab[] lists)
wildcard_here = True
range_ab = [0,-1]
for j in range(i_open+1, i_close):
if ((pattern_str[j] == ':') or
((pattern_str[j] == '-') and (j > i_open+1)) or
(pattern_str[j] == '*')):
i_wildcard = len(pattern_fragments)
range_a_str = pattern_str[i_open+1 : j]
range_b_str = pattern_str[j+1 : i_close]
if (range_a_str != ''):
if str.isdigit(range_a_str):
range_ab[0] = int(range_a_str)
else:
raise InputError('Error near '+
ErrorLeader(srcloc.infile,
srcloc.lineno)+'\n'
' Expected colon-separated integers.\n')
if (range_b_str != ''):
if str.isdigit(range_b_str):
range_ab[1] = int(range_b_str)
# special case: When [a-b] type syntax is
# used, it selects from a to b inclusive.
# (IE. b is not a strict upper bound.)
if pattern_str[j] == '-':
range_ab[1] += 1
else:
raise InputError('Error near '+
ErrorLeader(srcloc.infile,
srcloc.lineno)+'\n'
' Expected colon-separated integers.\n')
break
elif j == i_close-1:
wildcard_here = False
if wildcard_here:
pattern_fragments.append(pattern_str[i_close_prev:i_open+1])
ranges_ab.append(range_ab)
i_close_prev = i_close
assert(len(pattern_fragments)-1==len(ranges_ab))
# Now figure out which InstanceObj or InstanceObjs correspond to
# the name or set of names suggested by the multi_descr_str,
# (after wildcard characters have been substituted with integers).
instobj_list = []
if len(pattern_fragments) == 1:
# commenting out:
# instobj_list.append(StrToNode(pattern_str, self, srcloc))
#
# Line above will print an error message if the node is not found.
# However sometimes we don't want this. Use this code instead:
path_tokens = pattern_str.split('/')
i_last_ptkn, instobj = FollowPath(path_tokens,
self,
srcloc)
# If found add to instobj_list
if ((i_last_ptkn == len(path_tokens))
and (not instobj.IsDeleted())):
instobj_list.append(instobj)
else:
# num_counters equals the number of bracket-enclosed wildcards
num_counters= len(pattern_fragments)-1
multi_counters = [ranges_ab[i][0] for i in range(0, num_counters)]
all_matches_found = False
d_carry = 0
while d_carry < num_counters:
# Find the next InstanceObj in the set of InstanceObjs which
# satisfy the wild-card pattern in pattern_fragments.
while d_carry < num_counters:
candidate_descr_str = ''.join([pattern_fragments[i] +
str(multi_counters[i])
for i in range(0,num_counters)] \
+ \
[pattern_fragments[num_counters]])
#sys.stderr.write('DEBUG: /'+self.name+
# '.LookupMultiDescrStr()\n'
# ' looking up \"'+
# candidate_descr_str+'\"\n')
path_tokens = candidate_descr_str.split('/')
i_last_ptkn, instobj = FollowPath(path_tokens,
self,
srcloc)
# If there is an InstanceObj with that name,
# then add it to the list of InstanceObjs to
# which we will apply this modifier function,
# and increment the counters
# If found (and if the counter is within the range)...
if ((i_last_ptkn == len(path_tokens)) and
((ranges_ab[d_carry][1] == -1) or
(multi_counters[d_carry]<ranges_ab[d_carry][1]))):
# (make sure it has not yet been "deleted")
if (not instobj.IsDeleted()):
instobj_list.append(instobj)
d_carry = 0
multi_counters[0] += 1
#sys.stderr.write('DEBUG: InstanceObj found.\n')
break
# If there is no InstanceObj with that name,
# then perhaps it is because we have incremented
# the counter too high. If there are multiple
# counters, increment the next most significant
# counter, and reset this counter to 0.
# Keep looking
# (We only do this if the user neglected to explicitly
# specify an upper bound --> ranges_ab[d_carry[1]==-1)
elif ((ranges_ab[d_carry][1] == -1) or
(multi_counters[d_carry]>=ranges_ab[d_carry][1])):
#sys.stderr.write('DEBUG: InstanceObj not found.\n')
multi_counters[d_carry] = ranges_ab[d_carry][0]
d_carry += 1
if d_carry >= num_counters:
break
multi_counters[d_carry] += 1
else:
# Object was not found but we keep going. Skip
# to the next entry in the multi-dimensional list.
d_carry = 0
multi_counters[0] += 1
break
if (null_list_warning and (len(instobj_list) == 0)):
sys.stderr.write('WARNING('+g_module_name+'.LookupMultiDescrStr()):\n'
' Potential problem near '+
ErrorLeader(srcloc.infile,
srcloc.lineno)+'\n'
' No objects (yet) matching name \"'+pattern_str+'\".\n')
if (null_list_error and
(len(instobj_list) == 0)):
if len(pattern_fragments) == 1:
raise InputError('Error('+g_module_name+'.LookupMultiDescrStr()):\n'
' Syntax error near '+
ErrorLeader(srcloc.infile,
srcloc.lineno)+'\n'
' No objects matching name \"'+pattern_str+'\".')
else:
sys.stderr.write('WARNING('+g_module_name+'.LookupMultiDescrStr()):\n'
' Potential problem near '+
ErrorLeader(srcloc.infile,
srcloc.lineno)+'\n'
' No objects (yet) matching name \"'+pattern_str+'\".\n')
return instobj_list
def __str__(self):
out_str = self.name
if len(self.children) > 0:
out_str += '('
i = 0
for child in self.children.values():
if i+1 < len(self.children):
out_str += str(child)+', '
else:
out_str += str(child)+')'
i += 1
return out_str
def DeleteSelf(self):
self.deleted = True
# COMMENT1: Don't get rid of pointers to yourself. Knowing which
# objects you instantiated and destroyed might be useful
# in case you want to apply multiple delete [*] commands
# COMMENT2: Don't delete all the child nodes, and commands. These are
# needed later (so that text-templates containing references
# to these nodes don't cause moltemplate to crash.)
#def UndeleteSelf(self):
# self.deleted = False
#
#
#def DeleteProgeny(self):
# for child in self.children.values():
# if hasattr(child, 'DeleteProgeny'):
# child.DeleteProgeny()
# else:
# child.DeleteSelf()
# self.DeleteSelf();
def BuildInstanceTree(self,
statobj,
class_parents_in_use):
"""
This takes care of the details of copying relevant data from an StaticObj
into a newly-created InstanceObj. It allocates space for and performs
a deep-copy of any instance variables (and new instance categories), but
it performs a shallow copy of everything else (template text, etc..).
This is done recursively for every child that this class instantiates.
"""
if self.IsDeleted():
return
#sys.stderr.write(' DEBUG: '+self.name+
# '.BuildInstanceTree('+statobj.name+')\n')
#instance_refs = {}
# Keep track of which line in the file (and which file) we were
# in when we began parsing the class which defines this instance,
# as well as when we stopped parsing.
# (Don't do this if you are recusively searching class_parents because
# in that case you would be overwritting .statobj with with the parent.)
if len(class_parents_in_use) == 0:
self.statobj = statobj
self.srcloc_begin = statobj.srcloc_begin
self.srcloc_end = statobj.srcloc_end
# Make copies of the class_parents' StaticObj data.
# First deal with the "self.instance_commands_push"
# These commands should be carried out before any of the commands
# in "self.instance_commands".
for command in statobj.instance_commands_push:
#self.commands.append(command)
self.ProcessCommand(command)
# Then deal with class parents
for class_parent in statobj.class_parents:
# Avoid the "Diamond of Death" multiple inheritance problem
if class_parent not in class_parents_in_use:
#sys.stderr.write(' DEBUG: '+self.name+'.class_parent = '+
# class_parent.name+'\n')
self.BuildInstanceTree(class_parent,
class_parents_in_use)
class_parents_in_use.add(class_parent)
# Now, deal with the data in THIS object and its children
assert((self.commands != None) and (self.categories != None))
# "instance_categories" contains a list of new "categories" (ie new
# types of variables) to create whenever this class is instantiated.
# (This is used whenever we create a local counter variable: Suppose we
# want to count the residues within a particular protein, when there
# are multiple copies of the same protein in the simulation.)
for cat_name, cat in statobj.instance_categories.items():
assert(len(cat.bindings) == 0)
self.categories[cat_name] = Category(cat_name)
self.categories[cat_name].counter = cat.counter.__copy__()
# Note: Later on we will generate leaf nodes corresponding to
# variables, and put references to them in this category.
# Deal with the "instance_commands",
for command in statobj.instance_commands:
#self.commands.append(command)
self.ProcessCommand(command)
# Finally deal with the "self.instance_commands_pop"
# These commands should be carried out after all of the commands
# in "self.instance_commands".
for command in statobj.instance_commands_pop:
#self.commands.append(command)
self.ProcessCommand(command)
def ProcessCommand(self, command):
if isinstance(command, ModCommand):
sys.stderr.write(' processing command \"'+str(command)+'\"\n')
mod_command = command
instobj_list = self.LookupMultiDescrStr(mod_command.multi_descr_str,
mod_command.command.srcloc)
if isinstance(mod_command.command, DeleteCommand):
# Delete any objects we have created so far
# whose name matches mod_command.multi_descr_str:
for instobj in instobj_list:
instobj.DeleteSelf()
#instobj.DeleteProgeny()
elif len(instobj_list) == 0:
raise InputError('Error('+g_module_name+'.ProcessCommand()):\n'
' Syntax error at or before '+
ErrorLeader(mod_command.command.srcloc.infile,
mod_command.command.srcloc.lineno)+'\n'
' No objects matching name \"'+
mod_command.multi_descr_str+'\"\n'
' (If the object is an array, include brackets. Eg. \"molecules[*][*][*]\")')
else:
for instobj in instobj_list:
assert(not isinstance(mod_command.command, DeleteCommand))
command = mod_command.command.__copy__()
self.ProcessContextNodes(command)
if isinstance(command, PushCommand):
instobj.commands_push.append(command)
elif isinstance(mod_command.command, PopCommand):
instobj.commands_pop.insert(0, command)
else:
# I don't know if any other types commands will ever
# occur but I handle them below, just in case...
assert(not isinstance(command, InstantiateCommand))
instobj.commands.append(command.__copy__())
return # ends "if isinstance(command, ModCommand):"
# Otherwise:
command = command.__copy__()
self.ProcessContextNodes(command)
if isinstance(command, InstantiateCommand):
sys.stderr.write(' processing command \"'+str(command)+'\"\n')
self.commands.append(command) #<- useful later to keep track of the
# order that children were created
# check to make sure no child of that name was previously defined
prev_child = self.children.get(command.name)
if ((prev_child != None) and (not prev_child.IsDeleted())):
raise InputError('Error near '+
ErrorLeader(command.srcloc.infile,
command.srcloc.lineno)+'\n'
' Object \"'+command.name+'\" is already defined.\n')
child = InstanceObj(command.name, self)
command.instobj = child
if command.class_ref.statobj_str == '':
child.DeleteSelf()
# Why? This if-then check handles the case when the user
# wants to create an array of molecules with random vacancies.
# When this happens, some of the instance commands will
# contain instructions to create a copy of a molecule with
# an empty molecule-type-string (class_ref.statobj_str).
# Counter-intuitively, ...
# ...we DO want to create something here so that the user can
# safely loop over the array without generating an error.
# (Such as to delete elements, or move the remaining
# members in the array.) We just want to mark it as
# 'deleted'. (That's what "DeleteSelf()" does.)
else:
# This is the heart of "BuildInstanceTree()"
# (which implements object composition)
new_class_parents_in_use = set([])
child.BuildInstanceTree(command.class_ref.statobj,
new_class_parents_in_use)
self.children[child.name] = child
elif isinstance(command, WriteFileCommand):
#sys.stderr.write(' processing command \"'+str(command)+'\"\n')
self.commands.append(command)
for var_ref in command.tmpl_list:
# Process the VarRef entries in the tmpl_list,
# (and check they have the correct prefix: either '$' or '@')
# Ignore other entries (for example, ignore TextBlocks).
if (isinstance(var_ref, VarRef) and (var_ref.prefix[0] == '$')):
if (var_ref.descr_str[:4] == 'mol:'):
pass
var_ref.nptr.cat_name, var_ref.nptr.cat_node, var_ref.nptr.leaf_node = \
DescrToCatLeafNodes(var_ref.descr_str,
self,
var_ref.srcloc,
True)
categories = var_ref.nptr.cat_node.categories
# "categories" is a dictionary storing "Category" objects
# indexed by category names.
# Note to self: Always use the ".categories" member,
# (never the ".instance_categories" member.
# ".instance_categories" are only used temporarilly before
# we instantiate, ie. before we build the tree of InstanceObjs.)
category = categories[var_ref.nptr.cat_name]
# "category" is a Category object containing a
# dictionary of VarBinding objects, and an internal counter.
var_bindings = category.bindings
# "var_bindings" is a dictionary storing "VarBinding"
# objects, indexed by leaf nodes. Each leaf node
# corresponds to a unique variable in this category.
# --- Now update "var_bindings" ---
# Search for the "VarBinding" object that
# corresponds to this leaf node.
# If not found, then create one.
if var_ref.nptr.leaf_node in var_bindings:
var_binding = var_bindings[var_ref.nptr.leaf_node]
# "var_binding" stores the information for a variable,
# including pointers to all of the places the variable
# is rerefenced, the variable's (full) name, and value.
#
# Keep track of all the places that varible is
# referenced by updating the ".refs" member
var_binding.refs.append(var_ref)
else:
# Not found, so we create a new binding.
var_binding = VarBinding()
# var_binding.refs contains a list of all the places
# this variable is referenced. Start with this var_ref:
var_binding.refs = [var_ref]
# keep track of the cat_node, cat_name, leaf_node:
var_binding.nptr = var_ref.nptr
# "var_binding.full_name" stores a unique string like
# '@/atom:Water/H' or '$/atom:water[1423]/H2',
# which contains the full path for the category and leaf
# nodes, and uniquely identifies this variable globally.
# Thus these strings correspond uniquely (ie. in a
# one-to-one fashion) with the nodes they represent.
var_binding.full_name = var_ref.prefix[0] + \
CanonicalDescrStr(var_ref.nptr.cat_name,
var_ref.nptr.cat_node,
var_ref.nptr.leaf_node,
var_ref.srcloc)
# (These names can always be generated later when needed
# but it doesn't hurt to keep track of it here too.)
# Now add this binding to the other
# bindings in this category:
var_bindings[var_ref.nptr.leaf_node] = var_binding
##vb## var_ref.nptr.leaf_node.AddVarBinding(var_binding)
var_binding.category = category
# It's convenient to add a pointer in the opposite direction
# so that later if we find the var_ref, we can find its
# binding and visa-versa. (Ie. two-way pointers)
var_ref.binding = var_binding
assert(var_ref.nptr.leaf_node in var_bindings)
else:
# Otherwise, we don't know what this command is yet.
# Append it to the list of commands and process it/ignore it later.
self.commands.append(command)
def ProcessContextNodes(self, command):
if hasattr(command, 'context_node'):
# Lookup any nodes pointers to instobjs
if command.context_node != None:
if type(command.context_node) is str:
command.context_node = StrToNode(command.context_node,
self,
command.srcloc)
# (Otherwise, just leave it as None)
def BuildCommandList(self, command_list):
"""
Search the commands in the tree and make a linear list of commands
in the order they should be carried out.
"""
if self.IsDeleted():
return
if (len(self.commands) == 0):
assert(len(self.children) == 0)
# To save memory don't generate any commands
# for trivial (leaf) nodes
return
# Add a special note to the list of commands to indicate which object
# the commands refer to. (This might be useful one day.)
# Later we can loop through this command list and still be able to tell
# whether or not we are within the scope of a particular class or instance
# (by seeing if we are between a "ScopeBegin" and "ScopeEnd" pair).
command_list.append(ScopeBegin(self, self.srcloc_begin))
# Note:
# The previous version looped over all commands in this node, and then
# recursively invoke BuildCommandList() on all the children of this node
# We don't do that anymore because it does not take into account the
# order that various child objects were created/instantiated
# which potentially could occur in-between other commands. Instead,
# now we loop through the command_list and recursively visit child
# nodes only when we encounter them in the command list.
for command in self.commands_push:
assert(isinstance(command, InstantiateCommand) == False)
command_list.append(command)
for command in self.commands:
if isinstance(command, InstantiateCommand):
#child = self.children[command.name]
# the above line does not work because you may have
# deleted that child after you created and then
# replaced it by somebody else. Store the node.
child = command.instobj
child.BuildCommandList(command_list)
else:
command_list.append(command)
for command in self.commands_pop:
assert(isinstance(command, InstantiateCommand) == False)
command_list.append(command)
command_list.append(ScopeEnd(self, self.srcloc_begin))
def AssignTemplateVarPtrs(tmpl_list, context_node):
"""
Now scan through all the variables within the templates defined
for this context_node (either static or dynamic depending on var_filter).
Each reference to a variable in the template has a descriptor which
indicates the variable's type, and in which molecule it is defined (ie
where it is located in the molecule instance tree or type definition tree).
(See comments for "class VarNPtr(object):" above for details.)
Eventually we want to assign a value to each variable.
This same variable (node) may appear multiple times in diffent templates.
So we also create a place to store this variable's value, and also assign
(two-way) pointers from the VarRef in the template, to this storage area so
that later on when we write out the contents of the template to a file, we
can substitute this variable with it's value, in all the places it appears.
"""
for var_ref in tmpl_list:
# Process the VarRef entries in the tmpl_list,
# (and check they have the correct prefix: either '$' or '@')
# Ignore other entries (for example, ignore TextBlocks).
if (isinstance(var_ref, VarRef) and
((isinstance(context_node, StaticObj) and
(var_ref.prefix[0] == '@'))
or
(isinstance(context_node, InstanceObjBasic) and
(var_ref.prefix[0] == '$')))):
var_ref.nptr.cat_name, var_ref.nptr.cat_node, var_ref.nptr.leaf_node = \
DescrToCatLeafNodes(var_ref.descr_str,
context_node,
var_ref.srcloc,
True)
categories = var_ref.nptr.cat_node.categories
# "categories" is a dictionary storing "Category" objects
# indexed by category names.
# Note to self: Always use the ".categories" member,
# (never the ".instance_categories" member.
# ".instance_categories" are only used temporarilly before
# we instantiate, ie. before we build the tree of InstanceObjs.)
category = categories[var_ref.nptr.cat_name]
# "category" is a Category object containing a
# dictionary of VarBinding objects, and an internal counter.
var_bindings = category.bindings
# "var_bindings" is a dictionary storing "VarBinding"
# objects, indexed by leaf nodes. Each leaf node
# corresponds to a unique variable in this category.
# --- Now update "var_bindings" ---
# Search for the "VarBinding" object that
# corresponds to this leaf node.
# If not found, then create one.
if var_ref.nptr.leaf_node in var_bindings:
var_binding = var_bindings[var_ref.nptr.leaf_node]
# "var_binding" stores the information for a variable,
# including pointers to all of the places the variable
# is rerefenced, the variable's (full) name, and value.
#
# Keep track of all the places that varible is
# referenced by updating the ".refs" member
var_binding.refs.append(var_ref)
else:
# Not found, so we create a new binding.
var_binding = VarBinding()
# var_binding.refs contains a list of all the places
# this variable is referenced. Start with this var_ref:
var_binding.refs = [var_ref]
# keep track of the cat_node, cat_name, leaf_node:
var_binding.nptr = var_ref.nptr
# "var_binding.full_name" stores a unique string like
# '@/atom:Water/H' or '$/atom:water[1423]/H2',
# which contains the full path for the category and leaf
# nodes, and uniquely identifies this variable globally.
# Thus these strings correspond uniquely (ie. in a
# one-to-one fashion) with the nodes they represent.
var_binding.full_name = var_ref.prefix[0] + \
CanonicalDescrStr(var_ref.nptr.cat_name,
var_ref.nptr.cat_node,
var_ref.nptr.leaf_node,
var_ref.srcloc)
# (These names can always be generated later when needed
# but it doesn't hurt to keep track of it here too.)
# Now add this binding to the other
# bindings in this category:
var_bindings[var_ref.nptr.leaf_node] = var_binding
##vb## var_ref.nptr.leaf_node.AddVarBinding(var_binding)
var_binding.category = category
# It's convenient to add a pointer in the opposite direction
# so that later if we find the var_ref, we can find its
# binding and visa-versa. (Ie. two-way pointers)
var_ref.binding = var_binding
assert(var_ref.nptr.leaf_node in var_bindings)
def AssignStaticVarPtrs(context_node, search_instance_commands = False):
#sys.stdout.write('AssignVarPtrs() invoked on node: \"'+NodeToStr(context_node)+'\"\n')
if search_instance_commands:
assert(isinstance(context_node, StaticObj))
commands = context_node.instance_commands
else:
# Note: Leaf nodes contain no commands, so skip them
if (not hasattr(context_node, 'commands')):
return
# Otherwise process their commands
commands = context_node.commands
for command in commands:
if isinstance(command, WriteFileCommand):
AssignTemplateVarPtrs(command.tmpl_list, context_node)
# Recursively invoke AssignVarPtrs() on all (non-leaf) child nodes:
for child in context_node.children.values():
AssignStaticVarPtrs(child, search_instance_commands)
#def AssignVarOrderByFile(context_node, search_instance_commands=False):
# """
# For each category in context_node, and each variable in that category,
# set the order of each variable equal to the position of that variable
# in the user's input file.
#
# """
#
# if search_instance_commands:
# assert(isinstance(context_node, StaticObj))
# commands = context_node.instance_commands_push + \
# context_node.instance_commands + \
# context_node.instance_commands_pop
# else:
# commands = context_node.commands
# for command in commands:
# if isinstance(command, WriteFileCommand):
# tmpl_list = command.tmpl_list
# for var_ref in tmpl_list:
# if isinstance(var_ref, VarRef):
# if (((var_ref.prefix == '@') and
# isinstance(context_node, StaticObj)) or
# ((var_ref.prefix == '$') and
# isinstance(context_node, InstanceObjBasic))):
# #if ((var_ref.prefix == '@') or
# # (not search_instance_commands)):
# if ((var_ref.binding.order == -1) or
# (var_ref.binding.order > var_ref.srcloc.order)):
# var_ref.binding.order = var_ref.srcloc.order
#
# for child in context_node.children.values():
# AssignVarOrderByFile(child, search_instance_commands)
def AssignVarOrderByCommand(command_list, prefix_filter):
"""
For each category in context_node, and each variable in that category,
set the order of each variable according to the position of the
write(), write_once(), or other command that created it.
Only variables with the correct prefix ('$' or '@') are affected.
"""
count = 0
for command in command_list:
if isinstance(command, WriteFileCommand):
tmpl_list = command.tmpl_list
for var_ref in tmpl_list:
if isinstance(var_ref, VarRef):
if var_ref.prefix in prefix_filter:
count += 1
if ((var_ref.binding.order is None) or
(var_ref.binding.order > count)):
var_ref.binding.order = count
def AutoAssignVals(cat_node,
sort_variables,
reserved_values = None,
ignore_prior_values = False):
"""
This function automatically assigns all the variables
belonging to all the categories in cat_node.categories.
Each category has its own internal counter. For every variable in that
category, query the counter (which usually returns an integer),
and assign the variable to it. Exceptions can be made if the integer
is reserved by some other variable, or if it has been already assigned.
Afterwards, we recursively search the child nodes recursively
(in a depth-first-search order).
sort_variables: Sorting the variables according to their "binding.order"
counters is optional.
"""
if (not hasattr(cat_node, 'categories')):
# (sometimes leaf nodes lack a 'categories' member, to save memory)
return
# Search the tree in a depth-first-search manner.
# For each node, examine the "categories" associated with that node
# (ie the list of variables whose counters lie within that node's scope).
for cat_name, cat in cat_node.categories.items():
# Loop through all the variables in this category.
if sort_variables:
# Sort the list of variables according to var_binding.order
# First, print a progress indicator (this could be slow)
prefix = '$'
# Is this parent_node an StaticObj? (..or inherit from StaticObj?)
if isinstance(cat_node, StaticObj):
prefix = '@'
sys.stderr.write(' sorting variables in category: '+prefix+
CanonicalCatName(cat_name, cat_node)+':\n')
var_bind_iter = iter(sorted(cat.bindings.items(),
key=operator.itemgetter(1)))
else:
# Just iterate through them in the order that they were added
# to the category list. (This happens to be the same order as
# we found it earlier when searching the tree.)
var_bind_iter = iter(cat.bindings.items())
for leaf_node,var_binding in var_bind_iter:
if ((var_binding.value is None) or ignore_prior_values):
if var_binding.nptr.leaf_node.name[:9] == '__query__':
# -- THE "COUNT" HACK --
# '__query__...' variables are not really variables.
# They are a mechanism to allow the user to query the
# category counter without incrementing it.
var_binding.value = str(cat.counter.query())
elif HasWildCard(var_binding.full_name):
# -- The wildcard hack ---
# Variables containing * or ? characters in their names
# are not allowed. These are not variables, but patterns
# to match with other variables. Represent them by the
# (full-path-expanded) string containing the * or ?.
var_binding.value = var_binding.full_name
else:
if (not var_binding.nptr.leaf_node.IsDeleted()):
# For each (regular) variable, query this category's counter
# (convert it to a string), and see if it is already in use
# (in this category). If not, then set this variable's value
# to the counter's value. Either way, increment the counter.
while True:
cat.counter.incr()
value = str(cat.counter.query())
if ((reserved_values is None) or
((cat, value) not in reserved_values)):
break
var_binding.value = value
# Recursively invoke AssignVarValues() on all child nodes
for child in cat_node.children.values():
AutoAssignVals(child,
sort_variables,
reserved_values,
ignore_prior_values)
# Did the user ask us to reformat the output string?
# This information is encoded in the variable's suffix.
def ExtractFormattingCommands(suffix):
if (len(suffix) <= 1):
return None, None
if suffix[-1] == '}': # Get rid of any trailing '}' characters
suffix = suffix[:-1]
if suffix[-1] != ')': # Format functions are always followed by parens
return None, None
else:
idot = suffix.find('.') # Format functions usually preceeded by '.'
ioparen = suffix.find('(')
icparen = suffix.find(')')
format_fname = suffix[idot+1:ioparen]
args = suffix[ioparen+1:icparen]
args = args.split(',')
for i in range(0, len(args)):
args[i] = RemoveOuterQuotes(args[i].strip(), '\"\'')
return format_fname, args
def Render(tmpl_list, substitute_vars=True):
"""
This function converts a TextBlock,VarRef list into a string.
It is invoked by WriteTemplatesValue() in order to print
out the templates stored at each node of the tree.
"""
out_str_list = []
i = 0
while i < len(tmpl_list):
entry = tmpl_list[i]
if isinstance(entry, VarRef):
var_ref = entry
var_bindings = var_ref.nptr.cat_node.categories[var_ref.nptr.cat_name].bindings
#if var_ref.nptr.leaf_node not in var_bindings:
#assert(var_ref.nptr.leaf_node in var_bindings)
if var_ref.nptr.leaf_node.IsDeleted():
raise InputError('Error near '+
ErrorLeader(var_ref.srcloc.infile,
var_ref.srcloc.lineno)+'\n'
' The variable you referred to does not exist:\n\n'
' '+var_ref.prefix+var_ref.descr_str+var_ref.suffix+'\n\n'
' (You probably deleted it or something it belonged to earlier.)\n')
else:
if substitute_vars:
value = var_bindings[var_ref.nptr.leaf_node].value
format_fname, args = ExtractFormattingCommands(var_ref.suffix)
if format_fname == 'ljust':
if len(args) == 1:
value = value.ljust(int(args[0]))
else:
value = value.ljust(int(args[0]), args[1])
elif format_fname == 'rjust':
if len(args) == 1:
value = value.rjust(int(args[0]))
else:
value = value.rjust(int(args[0]), args[1])
out_str_list.append(value)
else:
out_str_list.append(var_ref.prefix +
SafelyEncodeString(var_bindings[var_ref.nptr.leaf_node].full_name[1:]) +
var_ref.suffix)
else:
assert(isinstance(entry, TextBlock))
out_str_list.append(entry.text)
i += 1
return ''.join(out_str_list)
def MergeWriteCommands(command_list):
""" Write commands are typically to the same file.
We can improve performance by appending all of
commands that write to the same file together before
carrying out the write commands.
"""
file_templates = defaultdict(list)
for command in command_list:
if isinstance(command, WriteFileCommand):
if command.filename != None:
file_templates[command.filename] += \
command.tmpl_list
return file_templates
def WriteTemplatesValue(file_templates):
""" Carry out the write() and write_once() commands (which
write out the contents of the templates contain inside them).
"""
for filename, tmpl_list in file_templates.items():
if filename == '':
out_file = sys.stdout
else:
out_file = open(filename, 'a')
out_file.write(Render(tmpl_list, substitute_vars=True))
if filename != '':
out_file.close()
# Alternate (old method):
#for command in command_list:
# if isinstance(command, WriteFileCommand):
# if command.filename != None:
# if command.filename == '':
# out_file = sys.stdout
# else:
# out_file = open(command.filename, 'a')
#
# out_file.write(Render(command.tmpl_list))
#
# if command.filename != '':
# out_file.close()
def WriteTemplatesVarName(file_templates):
""" Carry out the write() and write_once() commands (which
write out the contents of the templates contain inside them).
However variables within the templates are represented by their
full name instead of their assigned value.
"""
for filename, tmpl_list in file_templates.items():
if filename != '':
out_file = open(filename + '.template', 'a')
out_file.write(Render(tmpl_list, substitute_vars=False))
out_file.close()
def EraseTemplateFiles(command_list):
filenames = set([])
for command in command_list:
if isinstance(command, WriteFileCommand):
if (command.filename != None) and (command.filename != ''):
if command.filename not in filenames:
filenames.add(command.filename)
# Openning the files (in mode 'w') and closing them again
# erases their contents.
out_file = open(command.filename, 'w')
out_file.close()
out_file = open(command.filename + '.template', 'w')
out_file.close()
#def ClearTemplates(file_templates):
# for filename in file_templates:
# if filename != '':
# out_file = open(filename, 'w')
# out_file.close()
# out_file = open(filename + '.template', 'w')
# out_file.close()
def WriteVarBindingsFile(node):
""" Write out a single file which contains a list of all
of the variables defined (regardless of which class they
were defined in). Next to each variable name is the corresponding
information stored in that variable (a number) that variable.
"""
if (not hasattr(node, 'categories')):
# (sometimes leaf nodes lack a 'categories' member, to save memory)
return
out = open('ttree_assignments.txt', 'a')
for cat_name in node.categories:
var_bindings = node.categories[cat_name].bindings
for nd, var_binding in var_bindings.items():
if nd.IsDeleted():
continue # In that case, skip this variable
#if type(node) is type(nd):
if ((isinstance(node, InstanceObjBasic) and isinstance(nd, InstanceObjBasic))
or
(isinstance(node, StaticObj) and isinstance(nd, StaticObj))):
# Now omit variables whos names contain "*" or "?"
# (these are actually not variables, but wildcard patterns)
if not HasWildCard(var_binding.full_name):
if len(var_binding.refs) > 0:
usage_example = ' #'+\
ErrorLeader(var_binding.refs[0].srcloc.infile, \
var_binding.refs[0].srcloc.lineno)
else:
usage_example = ''
out.write(SafelyEncodeString(var_binding.full_name) +' '+
SafelyEncodeString(var_binding.value)
+usage_example+'\n')
out.close()
for child in node.children.values():
WriteVarBindingsFile(child)
def CustomizeBindings(bindings,
g_objectdefs,
g_objects):
var_assignments = set()
for name,vlpair in bindings.items():
prefix = name[0]
var_descr_str = name[1:]
value = vlpair.val
dbg_loc = vlpair.loc
if prefix == '@':
var_binding = LookupVar(var_descr_str,
g_objectdefs,
dbg_loc)
elif prefix == '$':
var_binding = LookupVar(var_descr_str,
g_objects,
dbg_loc)
else:
# If the user neglected a prefix, this should have generated
# an error earlier on.
assert(False)
# Change the assignment:
var_binding.value = value
var_assignments.add((var_binding.category, value))
#sys.stderr.write(' CustomizeBindings: descr=' + var_descr_str +
# ', value=' + value + '\n')
return var_assignments
##############################################################
##################### BasicUI functions #####################
# These functions are examples of how to use the StaticObj
# and InstanceObj data structures above, and to read a ttree file.
# These are examples only. New programs based on ttree_lib.py
# will probably require their own settings and functions.
##############################################################
def BasicUIReadBindingsFile(bindings_so_far, filename):
try:
f = open(filename, 'r')
except IOError:
sys.stderr.write('Error('+g_filename+'):\n'' : unable to open file\n'
'\n'
' \"'+filename+'\"\n'
' for reading.\n'
'\n'
' (If you were not trying to open a file with this name, then this could\n'
' occur if you forgot to enclose your command-line-argument in quotes,\n'
' For example, use: \'$atom:wat[2]/H1 20\' or "\$atom:wat[2]/H1 to 20"\n'
' to set the variable $atom:wat[2]/H1 to 20.)\n')
sys.exit(1)
BasicUIReadBindingsStream(bindings_so_far, f, filename)
f.close()
def BasicUIReadBindingsText(bindings_so_far, text, source_name=''):
if sys.version > '3':
in_stream = io.StringIO(text)
else:
in_stream = cStringIO.StringIO(text)
return BasicUIReadBindingsStream(bindings_so_far, in_stream, source_name)
class ValLocPair(object):
__slots__=["val","loc"]
def __init__(self,
val = None,
loc = None):
self.val = val
self.loc = loc
def BasicUIReadBindingsStream(bindings_so_far, in_stream, source_name=''):
# EXAMPLE (simple version)
# The simple version of this function commented out below
# does not handle variable whose names or values
# contain strange or escaped characters, quotes or whitespace.
# But I kept it in for illustrative purposes:
#
#for line in f:
# line = line.strip()
# tokens = line.split()
# if len(tokens) == 2:
# var_name = tokens[0]
# var_value = tokens[1]
# var_assignments[var_name] = var_value
#f.close()
lex = TemplateLexer(in_stream, source_name)
tmpllist = lex.ReadTemplate()
i = 0
if isinstance(tmpllist[0], TextBlock):
i += 1
while i+1 < len(tmpllist):
# process one line at a time (2 entries per line)
var_ref = tmpllist[i]
text_block = tmpllist[i+1]
assert(isinstance(var_ref, VarRef))
if (not isinstance(text_block, TextBlock)):
raise InputError('Error('+g_filename+'):\n'
' This is not a valid name-value pair:\n'
' \"'+var_ref.prefix+var_ref.descr_str+' '+text_block.text.rstrip()+'\"\n'
' Each variable asignment should contain a variable name (beginning with\n'
' @ or $) followed by a space, and then a string you want to assign to it.\n'
' (Surrounding quotes are optional and will be removed.)\n')
# Variables in the ttree_assignments.txt file use "full-path" style.
# In other words, the full name of the variable, (including all
# path information) is stored var_ref.descr_str,
# and the first character of the prefix stores either a @ or $
var_name = var_ref.prefix[:1] + var_ref.descr_str
text = SplitQuotedString(text_block.text.strip())
var_value = EscCharStrToChar(RemoveOuterQuotes(text, '\'\"'))
bindings_so_far[var_name] = ValLocPair(var_value, lex.GetSrcLoc())
i += 2
class BasicUISettings(object):
"""
BasicUISettings() contains several run-time user customisations
for ttree. (These effect the order and values assigned to variables
in a ttreee file).
This object, along with the other "UI" functions below are examples only.
(New programs based on ttree_lib.py will probably have their own settings
and functions.)
Members:
user_bindings
user_bindings_x
These are lists containing pairs of variable names,
and the string values they are bound to (which are typically numeric).
Values specified in the "user_bindings_x" list are "exclusive".
This means their values are reserved, so that later on, when other
variables (in the same category) are automatically assigned to values, care
care will be taken to avoid duplicating the values in user_bindings_x.
However variables in the "user_bindings" list are assigned without regard
to the values assigned to other variables, and may or may not be unique.
order_method
The order_method specifies the order in which values will be automatically
assigned to variables. (In the context of building molecular simulation
input files, this helps the user to insure that the order of the atoms
created by the ttree file matches the order they appear in other files
created by other programs.)
"""
def __init__(self,
user_bindings_x=None,
user_bindings=None,
order_method='by_command',
lex=None):
if user_bindings_x:
self.user_bindings_x = user_bindings_x
else:
self.user_bindings_x = OrderedDict()
if user_bindings:
self.user_bindings = user_bindings
else:
self.user_bindings = OrderedDict()
self.order_method = order_method
self.lex = lex
def BasicUIParseArgs(argv, settings):
"""
BasicUIParseArgs()
The following function contains part of the user interface for a
typical ttree-based program. This function processes an argument list
and extracts the common ttree user settings.
This function, along with the other "UI" functions below are examples only.
(New programs based on ttree_lib.py will probably have their own UI.)
"""
#argv = [arg for arg in orig_argv] # (make a deep copy of "orig_argv")
# This error message is used in multiple places:
bind_err_msg = 'should either be followed by a 2-column\n'+\
' file (containing variable-value pairs on each line).\n'+\
' --OR-- a quoted string (such as \"@atom:x 2\")\n'+\
' with the full variable name and its desired value.'
bind_err_msg_var = 'Missing value, or space needed separating variable\n'+\
' and value. (Remember to use quotes to surround the argument\n'+\
' containing the variable name, and it\'s assigned value.)'
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if argv[i] == '-a':
if ((i+1 >= len(argv)) or (argv[i+1][:1] == '-')):
raise InputError('Error('+g_filename+'):\n'
' Error in -a \"'+argv[i+1]+' argument.\"\n'
' The -a flag '+bind_err_msg)
if (argv[i+1][0] in '@$'):
#tokens = argv[i+1].strip().split(' ')
tokens = SplitQuotedString(argv[i+1].strip())
if len(tokens) < 2:
raise InputError('Error('+g_filename+'):\n'
' Error in -a \"'+argv[i+1]+'\" argument.\n'
' '+bind_err_msg_var)
BasicUIReadBindingsText(settings.user_bindings_x,
argv[i+1],
'__command_line_argument__')
else:
BasicUIReadBindingsFile(settings.user_bindings_x,
argv[i+1])
#i += 2
del(argv[i:i+2])
elif argv[i] == '-b':
if ((i+1 >= len(argv)) or (argv[i+1][:1] == '-')):
raise InputError('Error('+g_filename+'):\n'
' Error in -b \"'+argv[i+1]+' argument.\"\n'
' The -b flag '+bind_err_msg)
if (argv[i+1][0] in '@$'):
#tokens = argv[i+1].strip().split(' ')
tokens = SplitQuotedString(argv[i+1].strip())
if len(tokens) < 2:
raise InputError('Error('+g_filename+'):\n'
' Error in -b \"'+argv[i+1]+'\" argument.\n'
' '+bind_err_msg_var)
BasicUIReadBindingsText(settings.user_bindings,
argv[i+1],
'__command_line_argument__')
else:
BasicUIReadBindingsFile(settings.user_bindings,
argv[i+1])
#i += 2
del(argv[i:i+2])
elif argv[i] == '-order-command':
settings.order_method = 'by_command'
#i += 1
del(argv[i:i+1])
elif argv[i] == '-order-file':
settings.order_method = 'by_file'
#i += 1
del(argv[i:i+1])
elif ((argv[i] == '-order-tree') or (argv[i] == '-order-dfs')):
settings.order_method = 'by_tree'
del(argv[i:i+1])
elif ((argv[i] == '-importpath') or
(argv[i] == '-import-path') or
(argv[i] == '-import_path')):
if ((i+1 >= len(argv)) or (argv[i+1][:1] == '-')):
raise InputError('Error('+g_filename+'):\n'
' Error in \"'+argv[i]+'\" argument.\"\n'
' The \"'+argv[i]+'\" argument should be followed by the name of\n'
' an environment variable storing a path for including/importing files.\n')
TtreeShlex.custom_path = RemoveOuterQuotes(argv[i+1])
del(argv[i:i+2])
elif ((argv[i][0] == '-') and (__name__ == "__main__")):
#elif (__name__ == "__main__"):
raise InputError('Error('+g_filename+'):\n'
'Unrecogized command line argument \"'+argv[i]+'\"\n')
else:
i += 1
if __name__ == "__main__":
# Instantiate the lexer we will be using.
# (The lexer's __init__() function requires an openned file.
# Assuming __name__ == "__main__", then the name of that file should
# be the last remaining (unprocessed) argument in the argument list.
# Otherwise, then name of that file will be determined later by the
# python script which imports this module, so we let them handle it.)
if len(argv) == 1:
raise InputError('Error('+g_filename+'):\n'
' This program requires at least one argument\n'
' the name of a file containing ttree template commands\n')
elif len(argv) == 2:
try:
settings.lex = TemplateLexer(open(argv[1], 'r'), argv[1]) # Parse text from file
except IOError:
sys.stderr.write('Error('+g_filename+'):\n'
' unable to open file\n'
' \"'+argv[1]+'\"\n'
' for reading.\n')
sys.exit(1)
del(argv[1:2])
else:
# if there are more than 2 remaining arguments,
problem_args = ['\"'+arg+'\"' for arg in argv[1:]]
raise InputError('Syntax Error ('+g_filename+'):\n'
' Problem with argument list.\n'
' The remaining arguments are:\n\n'
' '+(' '.join(problem_args))+'\n\n'
' (The actual problem may be earlier in the argument list.\n'
' If these arguments are source files, then keep in mind\n'
' that this program can not parse multiple source files.)\n'
' Check the syntax of the entire argument list.\n')
def BasicUI(settings,
static_tree_root,
instance_tree_root,
static_commands,
instance_commands):
"""
BasicUI()
This function loads a ttree file and optional custom bindings for it,
creates a "static" tree (of defined ttree classes),
creates an "instance" tree (of instantiated ttree objects),
automatically assigns values to unbound variables,
substitutes them into text templates (renders the template).
The actual writing of the templates to a file is not handled here.
"""
# Parsing, and compiling is a multi-pass process.
# Step 1: Read in the StaticObj (class) defintions, without checking
# whether or not the instance_children refer to valid StaticObj types.
sys.stderr.write('parsing the class definitions...')
static_tree_root.Parse(settings.lex)
#gc.collect()
#sys.stderr.write('static = ' + str(static_tree_root) + '\n')
# Step 2: Now that the static tree has been constructed, lookup
# any references to classes (StaticObjs), contained within
# the instance_children or class_parents of each node in
# static_tree_root. Replace them with (pointers to)
# the StaticObjs they refer to (and check validity).
# (Note: Variables stored within the templates defined by write()
# and write_once() statements may also refer to StaticObjs in
# the tree, but we leave these references alone. We handle
# these assignments later using "AssignVarPtrs()" below.)
sys.stderr.write(' done\nlooking up classes...')
static_tree_root.LookupStaticRefs()
#gc.collect()
# Step 3: Now scan through all the (static) variables within the templates
# and replace the (static) variable references to pointers
# to nodes in the StaticObj tree:
sys.stderr.write(' done\nlooking up @variables...')
# Here we assign pointers for variables in "write_once(){text}" templates:
AssignStaticVarPtrs(static_tree_root, search_instance_commands=False)
# Here we assign pointers for variables in "write(){text}" templates:
AssignStaticVarPtrs(static_tree_root, search_instance_commands=True)
sys.stderr.write(' done\nconstructing the tree of class definitions...')
sys.stderr.write(' done\n\nclass_def_tree = ' + str(static_tree_root) + '\n\n')
#gc.collect()
# Step 4: Construct the instance tree (the tree of instantiated
# classes) from the static tree of type definitions.
sys.stderr.write('constructing the instance tree...\n')
class_parents_in_use = set([])
instance_tree_root.BuildInstanceTree(static_tree_root, class_parents_in_use)
#sys.stderr.write('done\n garbage collection...')
#gc.collect()
sys.stderr.write(' done\n')
#sys.stderr.write('instance_tree = ' + str(instance_tree_root) + '\n')
# Step 5: The commands must be carried out in a specific order.
# (for example, the "write()" and "new" commands).
# Search through the tree, and append commands to a command list.
# Then re-order the list in the order the commands should have
# been executed in. (We don't carry out the commands yet,
# we just store them and sort them.)
class_parents_in_use = set([])
static_tree_root.BuildCommandList(static_commands)
instance_tree_root.BuildCommandList(instance_commands)
#sys.stderr.write('static_commands = '+str(static_commands)+'\n')
#sys.stderr.write('instance_commands = '+str(instance_commands)+'\n')
# Step 6: We are about to assign numbers to the variables.
# We need to decide the order in which to assign them.
# By default static variables (@) are assigned in the order
# they appear in the file.
# And, by default instance variables ($)
# are assigned in the order they are created during instantiation.
#sys.stderr.write(' done\ndetermining variable count order...')
AssignVarOrderByCommand(static_commands, '@')
#AssignVarOrderByFile(static_tree_root, search_instance_commands=False)
#AssignVarOrderByFile(static_tree_root, search_instance_commands=True)
AssignVarOrderByCommand(instance_commands, '$')
# Step 7: Assign the variables.
# (If the user requested any customized variable bindings,
# load those now.)
if len(settings.user_bindings_x) > 0:
reserved_values = CustomizeBindings(settings.user_bindings_x,
static_tree_root,
instance_tree_root)
else:
reserved_values = None
sys.stderr.write('sorting variables...\n')
AutoAssignVals(static_tree_root,
(settings.order_method != 'by_tree'),
reserved_values)
AutoAssignVals(instance_tree_root,
(settings.order_method != 'by_tree'),
reserved_values)
if len(settings.user_bindings) > 0:
CustomizeBindings(settings.user_bindings,
static_tree_root,
instance_tree_root)
sys.stderr.write(' done\n')
if __name__ == "__main__":
"""
This is is a "main module" wrapper for invoking ttree.py
as a stand alone program. This program:
1)reads a ttree file,
2)constructs a tree of class definitions (g_objectdefs)
3)constructs a tree of instantiated class objects (g_objects),
4)automatically assigns values to the variables,
5)and carries out the "write" commands to write the templates a file(s).
"""
####### Main Code Below: #######
g_program_name = g_filename
sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+' ')
sys.stderr.write('\n(python version '+str(sys.version)+')\n')
try:
settings = BasicUISettings()
BasicUIParseArgs(sys.argv, settings)
# Data structures to store the class definitionss and instances
g_objectdefs = StaticObj('', None) # The root of the static tree
# has name '' (equivalent to '/')
g_objects = InstanceObj('', None) # The root of the instance tree
# has name '' (equivalent to '/')
# A list of commands to carry out
g_static_commands = []
g_instance_commands = []
BasicUI(settings,
g_objectdefs,
g_objects,
g_static_commands,
g_instance_commands)
# Now write the files
# (Finally carry out the "write()" and "write_once()" commands.)
# Optional: Multiple commands to write to the same file can be merged to
# reduce the number of times the file is openned and closed.
sys.stderr.write('writing templates...\n')
# Erase the files that will be written to:
EraseTemplateFiles(g_static_commands)
EraseTemplateFiles(g_instance_commands)
g_static_commands = MergeWriteCommands(g_static_commands)
g_instance_commands = MergeWriteCommands(g_instance_commands)
# Write the files with the original variable names present
WriteTemplatesVarName(g_static_commands)
WriteTemplatesVarName(g_instance_commands)
# Write the files with the variable names substituted by values
WriteTemplatesValue(g_static_commands)
WriteTemplatesValue(g_instance_commands)
sys.stderr.write(' done\n')
# Step 11: Now write the variable bindings/assignments table.
sys.stderr.write('writing \"ttree_assignments.txt\" file...')
open('ttree_assignments.txt', 'w').close() # <-- erase previous version.
WriteVarBindingsFile(g_objectdefs)
WriteVarBindingsFile(g_objects)
sys.stderr.write(' done\n')
except (ValueError, InputError) as err:
sys.stderr.write('\n\n'+str(err)+'\n')
sys.exit(-1)
|
ganzenmg/lammps_current
|
tools/moltemplate/src/ttree.py
|
Python
|
gpl-2.0
| 221,566
|
[
"LAMMPS",
"NAMD",
"VisIt"
] |
8758822ae1d4d595164d04f4e5a35acf32f815ef5f7d17a8a17dd9506a53c1be
|
"""Install next gen sequencing analysis tools not currently packaged.
"""
import os
import re
from fabric.api import *
from fabric.contrib.files import *
import yaml
from shared import (_if_not_installed, _make_tmp_dir,
_get_install, _get_install_local, _make_copy, _configure_make,
_java_install, _python_cmd,
_symlinked_java_version_dir, _fetch_and_unpack, _python_make,
_get_lib_dir, _get_include_dir, _apply_patch)
from cloudbio.custom import shared, versioncheck
from cloudbio import libraries
from cloudbio.flavor.config import get_config_file
@_if_not_installed("twoBitToFa")
def install_ucsc_tools(env):
"""Useful executables from UCSC.
todo: install from source to handle 32bit and get more programs
http://hgdownload.cse.ucsc.edu/admin/jksrc.zip
"""
tools = ["liftOver", "faToTwoBit", "bedToBigBed",
"bigBedInfo", "bigBedSummary", "bigBedToBed",
"bedGraphToBigWig", "bigWigInfo", "bigWigSummary",
"bigWigToBedGraph", "bigWigToWig",
"fetchChromSizes", "wigToBigWig", "faSize", "twoBitInfo",
"twoBitToFa", "faCount", "gtfToGenePred"]
url = "http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/"
_download_executables(env, url, tools)
@_if_not_installed("blat")
def install_kent_tools(env):
"""
Please note that the Blat source and executables are freely available for
academic, nonprofit and personal use. Commercial licensing information is
available on the Kent Informatics website (http://www.kentinformatics.com/).
"""
tools = ["blat", "gfClient", "gfServer"]
url = "http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/blat/"
_download_executables(env, url, tools)
def _download_executables(env, base_url, tools):
install_dir = shared._get_bin_dir(env)
with _make_tmp_dir() as work_dir:
with cd(work_dir):
for tool in tools:
final_tool = os.path.join(install_dir, tool)
if not env.safe_exists(final_tool) and shared._executable_not_on_path(tool):
shared._remote_fetch(env, "%s%s" % (base_url, tool))
env.safe_sudo("cp -f %s %s" % (tool, install_dir))
final_path = os.path.join(install_dir, tool)
env.safe_sudo("chmod uga+rx %s" % final_path)
# --- Alignment tools
def install_featurecounts(env):
"""
featureCounts from the subread package for counting reads mapping to
genomic features
"""
default_version = "1.4.4"
version = env.get("tool_version", default_version)
if versioncheck.up_to_date(env, "featureCounts", version, stdout_flag="Version"):
return
platform = "MacOS" if env.distribution == "macosx" else "Linux"
url = ("http://downloads.sourceforge.net/project/subread/"
"subread-%s/subread-%s-%s-x86_64.tar.gz"
% (version, version, platform))
_get_install(url, env, _make_copy("find . -type f -perm -100 -name 'featureCounts'",
do_make=False))
@_if_not_installed("bowtie")
def install_bowtie(env):
"""The bowtie short read aligner.
http://bowtie-bio.sourceforge.net/index.shtml
"""
default_version = "1.0.0"
version = env.get("tool_version", default_version)
url = "http://downloads.sourceforge.net/project/bowtie-bio/bowtie/%s/" \
"bowtie-%s-src.zip" % (version, version)
_get_install(url, env, _make_copy("find . -perm -100 -name 'bowtie*'"))
@_if_not_installed("bowtie2")
def install_bowtie2(env):
"""bowtie2 short read aligner, with gap support.
http://bowtie-bio.sourceforge.net/bowtie2/index.shtml
"""
default_version = "2.1.0"
version = env.get("tool_version", default_version)
url = "http://downloads.sourceforge.net/project/bowtie-bio/bowtie2/%s/" \
"bowtie2-%s-source.zip" % (version, version)
_get_install(url, env, _make_copy("find . -perm -100 -name 'bowtie2*'"))
@_if_not_installed("bfast")
def install_bfast(env):
"""BFAST: Blat-like Fast Accurate Search Tool.
http://sourceforge.net/apps/mediawiki/bfast/index.php?title=Main_Page
"""
default_version = "0.7.0a"
version = env.get("tool_version", default_version)
major_version_regex = "\d+\.\d+\.\d+"
major_version = re.search(major_version_regex, version).group(0)
url = "http://downloads.sourceforge.net/project/bfast/bfast/%s/bfast-%s.tar.gz"\
% (major_version, version)
_get_install(url, env, _configure_make)
@_if_not_installed("perm")
def install_perm(env):
"""Efficient mapping of short sequences accomplished with periodic full sensitive spaced seeds.
https://code.google.com/p/perm/
"""
default_version = "4"
version = env.get("tool_version", default_version)
url = "http://perm.googlecode.com/files/PerM%sSource.tar.gz" % version
def gcc44_makefile_patch():
gcc_cmd = "g++44"
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True):
result = env.safe_run("%s -v" % gcc_cmd)
print result.return_code
if result.return_code == 0:
env.safe_sed("makefile", "g\+\+", gcc_cmd)
_get_install(url, env, _make_copy("ls -1 perm", gcc44_makefile_patch))
@_if_not_installed("snap")
def install_snap(env):
"""Scalable Nucleotide Alignment Program
http://snap.cs.berkeley.edu/
"""
version = "0.15"
url = "http://github.com/downloads/amplab/snap/" \
"snap-%s-linux.tar.gz" % version
_get_install(url, env, _make_copy("find . -perm -100 -type f", do_make=False))
def install_stampy(env):
"""Stampy: mapping of short reads from illumina sequencing machines onto a reference genome.
http://www.well.ox.ac.uk/project-stampy
"""
version = "1.0.21"
#version = base_version
#revision = "1654"
#version = "{0}r{1}".format(base_version, revision)
#url = "http://www.well.ox.ac.uk/bioinformatics/Software/" \
# "stampy-%s.tgz" % (version)
# Ugh -- Stampy now uses a 'Stampy-latest' download target
url = "http://www.well.ox.ac.uk/bioinformatics/Software/" \
"Stampy-latest.tgz"
def _clean_makefile(env):
env.safe_sed("makefile", " -Wl", "")
_get_install_local(url, env, _make_copy(),
dir_name="stampy-{0}".format(version),
post_unpack_fn=_clean_makefile)
@_if_not_installed("gmap")
def install_gmap(env):
"""GMAP and GSNAP: A Genomic Mapping and Alignment Program for mRNA EST and short reads.
http://research-pub.gene.com/gmap/
"""
version = "2012-11-09"
url = "http://research-pub.gene.com/gmap/src/gmap-gsnap-%s.tar.gz" % version
_get_install(url, env, _configure_make)
def _wget_with_cookies(ref_url, dl_url):
env.safe_run("wget --cookies=on --keep-session-cookies --save-cookies=cookie.txt %s"
% (ref_url))
env.safe_run("wget --referer=%s --cookies=on --load-cookies=cookie.txt "
"--keep-session-cookies --save-cookies=cookie.txt %s" %
(ref_url, dl_url))
@_if_not_installed("novoalign")
def install_novoalign(env):
"""Novoalign short read aligner using Needleman-Wunsch algorithm with affine gap penalties.
http://www.novocraft.com/main/index.php
"""
base_version = "V3.00.02"
cs_version = "V1.03.02"
_url = "http://www.novocraft.com/downloads/%s/" % base_version
ref_url = "http://www.novocraft.com/main/downloadpage.php"
base_url = "%s/novocraft%s.gcc.tar.gz" % (_url, base_version)
cs_url = "%s/novoalignCS%s.gcc.tar.gz" % (_url, cs_version)
install_dir = shared._get_bin_dir(env)
with _make_tmp_dir() as work_dir:
with cd(work_dir):
_wget_with_cookies(ref_url, base_url)
env.safe_run("tar -xzvpf novocraft%s.gcc.tar.gz" % base_version)
with cd("novocraft"):
for fname in ["isnovoindex", "novo2maq", "novo2paf",
"novo2sam.pl", "novoalign", "novobarcode",
"novoindex", "novope2bed.pl", "novorun.pl",
"novoutil"]:
env.safe_sudo("mv %s %s" % (fname, install_dir))
with _make_tmp_dir() as work_dir:
with cd(work_dir):
_wget_with_cookies(ref_url, cs_url)
env.safe_run("tar -xzvpf novoalignCS%s.gcc.tar.gz" % cs_version)
with cd("novoalignCS"):
for fname in ["novoalignCS"]:
env.safe_sudo("mv %s %s" % (fname, install_dir))
@_if_not_installed("novosort")
def install_novosort(env):
"""Multithreaded sort and merge for BAM files.
http://www.novocraft.com/wiki/tiki-index.php?page=Novosort
"""
base_version = "V3.00.02"
version = "V1.00.02"
url = "http://www.novocraft.com/downloads/%s/novosort%s.gcc.tar.gz" % (base_version, version)
ref_url = "http://www.novocraft.com/main/downloadpage.php"
install_dir = shared._get_bin_dir(env)
with _make_tmp_dir() as work_dir:
with cd(work_dir):
_wget_with_cookies(ref_url, url)
env.safe_run("tar -xzvpf novosort%s.gcc.tar.gz" % version)
with cd("novosort"):
for fname in ["novosort"]:
env.safe_sudo("mv %s %s" % (fname, install_dir))
@_if_not_installed("lastz")
def install_lastz(env):
"""LASTZ sequence alignment program.
http://www.bx.psu.edu/miller_lab/dist/README.lastz-1.02.00/README.lastz-1.02.00a.html
"""
default_version = "1.02.00"
version = env.get("tool_version", default_version)
url = "http://www.bx.psu.edu/miller_lab/dist/" \
"lastz-%s.tar.gz" % version
def _remove_werror(env):
env.safe_sed("src/Makefile", " -Werror", "")
_get_install(url, env, _make_copy("find . -perm -100 -name 'lastz'"),
post_unpack_fn=_remove_werror)
@_if_not_installed("MosaikAligner")
def install_mosaik(env):
"""MOSAIK: reference-guided aligner for next-generation sequencing technologies
http://code.google.com/p/mosaik-aligner/
"""
version = "2.1.73"
url = "http://mosaik-aligner.googlecode.com/files/" \
"MOSAIK-%s-binary.tar" % version
_get_install(url, env, _make_copy("find . -perm -100 -type f", do_make=False))
# --- Utilities
def install_samtools(env):
"""SAM Tools provide various utilities for manipulating alignments in the SAM format.
http://samtools.sourceforge.net/
"""
default_version = "0.1.19"
version = env.get("tool_version", default_version)
if versioncheck.up_to_date(env, "samtools", version, stdout_flag="Version:"):
env.logger.info("samtools version {0} is up to date; not installing"
.format(version))
return
url = "http://downloads.sourceforge.net/project/samtools/samtools/" \
"%s/samtools-%s.tar.bz2" % (version, version)
def _safe_ncurses_make(env):
"""Combine samtools, removing ncurses refs if not present on system.
"""
with settings(warn_only=True):
result = env.safe_run("make")
# no ncurses, fix Makefile and rebuild
if result.failed:
env.safe_sed("Makefile", "-D_CURSES_LIB=1", "-D_CURSES_LIB=0")
env.safe_sed("Makefile", "-lcurses", "# -lcurses")
env.safe_run("make clean")
env.safe_run("make")
install_dir = shared._get_bin_dir(env)
for fname in env.safe_run_output("ls -1 samtools bcftools/bcftools bcftools/vcfutils.pl misc/wgsim").split("\n"):
env.safe_sudo("cp -f %s %s" % (fname.rstrip("\r"), install_dir))
_get_install(url, env, _safe_ncurses_make)
def install_gemini(env):
"""A lightweight db framework for disease and population genetics.
https://github.com/arq5x/gemini
"""
version = "0.7.0"
if versioncheck.up_to_date(env, "gemini -v", version, stdout_flag="gemini"):
return
elif not shared._executable_not_on_path("gemini -v"):
env.safe_run("gemini update")
else:
iurl = "https://raw.github.com/arq5x/gemini/master/gemini/scripts/gemini_install.py"
data_dir = os.path.join(env.system_install,
"local" if env.system_install.find("/local") == -1 else "",
"share", "gemini")
with _make_tmp_dir(ext="-gemini") as work_dir:
with cd(work_dir):
if env.safe_exists(os.path.basename(iurl)):
env.safe_run("rm -f %s" % os.path.basename(iurl))
installer = shared._remote_fetch(env, iurl)
env.safe_run("%s %s %s %s %s" %
(_python_cmd(env), installer, "" if env.use_sudo else "--nosudo",
env.system_install, data_dir))
env.safe_run("rm -f gemini_install.py")
@_if_not_installed("vtools")
def install_varianttools(env):
"""Annotation, selection, and analysis of variants in the context of next-gen sequencing analysis.
http://varianttools.sourceforge.net/
"""
version = "1.0.6"
url = "http://downloads.sourceforge.net/project/varianttools/" \
"{ver}/variant_tools-{ver}-src.tar.gz".format(ver=version)
_get_install(url, env, _python_make)
@_if_not_installed("dwgsim")
def install_dwgsim(env):
"""DWGSIM: simulating NGS data and evaluating mappings and variant calling.
http://sourceforge.net/apps/mediawiki/dnaa/index.php?title=Main_Page
"""
version = "0.1.10"
samtools_version = "0.1.18"
url = "http://downloads.sourceforge.net/project/dnaa/dwgsim/" \
"dwgsim-{0}.tar.gz".format(version)
samtools_url = "http://downloads.sourceforge.net/project/samtools/samtools/" \
"{ver}/samtools-{ver}.tar.bz2".format(ver=samtools_version)
def _get_samtools(env):
shared._remote_fetch(env, samtools_url)
env.safe_run("tar jxf samtools-{0}.tar.bz2".format(samtools_version))
env.safe_run("ln -s samtools-{0} samtools".format(samtools_version))
_get_install(url, env, _make_copy("ls -1 dwgsim dwgsim_eval scripts/dwgsim_pileup_eval.pl"),
post_unpack_fn=_get_samtools)
@_if_not_installed("fastq_screen")
def install_fastq_screen(env):
"""A screening application for high througput sequence data.
http://www.bioinformatics.babraham.ac.uk/projects/fastq_screen/
"""
version = "0.4"
url = "http://www.bioinformatics.babraham.ac.uk/projects/fastq_screen/" \
"fastq_screen_v%s.tar.gz" % version
install_dir = shared._symlinked_shared_dir("fastqc_screen", version, env)
executable = "fastq_screen"
if install_dir:
with _make_tmp_dir() as work_dir:
with cd(work_dir):
out_file = shared._remote_fetch(env, url)
env.safe_run("tar -xzvpf %s" % out_file)
with cd("fastq_screen_v%s" % version):
env.safe_sudo("mv * %s" % install_dir)
env.safe_sudo("ln -s %s/%s %s/bin/%s" % (install_dir, executable,
env.system_install, executable))
def install_bedtools(env):
"""A flexible suite of utilities for comparing genomic features.
https://code.google.com/p/bedtools/
"""
version = "2.17.0"
if versioncheck.up_to_date(env, "bedtools --version", version, stdout_flag="bedtools"):
return
url = "https://bedtools.googlecode.com/files/" \
"BEDTools.v%s.tar.gz" % version
_get_install(url, env, _make_copy("ls -1 bin/*"))
_shrec_run = """
#!/usr/bin/perl
use warnings;
use strict;
use FindBin qw($RealBin);
use Getopt::Long;
my @java_args;
my @args;
foreach (@ARGV) {
if (/^\-X/) {push @java_args,$_;}
else {push @args,$_;}}
system("java -cp $RealBin @java_args Shrec @args");
"""
@_if_not_installed("shrec")
def install_shrec(env):
"""Shrec is a bioinformatics tool for error correction of HTS read data.
http://sourceforge.net/projects/shrec-ec/
"""
version = "2.2"
url = "http://downloads.sourceforge.net/project/shrec-ec/SHREC%%20%s/bin.zip" % version
install_dir = _symlinked_java_version_dir("shrec", version, env)
if install_dir:
shrec_script = "%s/shrec" % install_dir
with _make_tmp_dir() as work_dir:
with cd(work_dir):
out_file = shared._remote_fetch(env, url)
env.safe_run("unzip %s" % out_file)
env.safe_sudo("mv *.class %s" % install_dir)
for line in _shrec_run.split("\n"):
if line.strip():
env.safe_append(shrec_script, line, use_sudo=env.use_sudo)
env.safe_sudo("chmod a+rwx %s" % shrec_script)
env.safe_sudo("ln -s %s %s/bin/shrec" % (shrec_script, env.system_install))
def install_echo(env):
"""ECHO: A reference-free short-read error correction algorithm
http://uc-echo.sourceforge.net/
"""
version = "1_12"
url = "http://downloads.sourceforge.net/project/uc-echo/source%20release/" \
"echo_v{0}.tgz".format(version)
_get_install_local(url, env, _make_copy())
# -- Analysis
def install_picard(env):
"""Command-line utilities that manipulate BAM files with a Java API.
http://picard.sourceforge.net/
"""
version = "1.96"
url = "http://downloads.sourceforge.net/project/picard/" \
"picard-tools/%s/picard-tools-%s.zip" % (version, version)
_java_install("picard", version, url, env)
def install_alientrimmer(env):
"""
Adapter removal tool
http://www.ncbi.nlm.nih.gov/pubmed/23912058
"""
version = "0.3.2"
url = ("ftp://ftp.pasteur.fr/pub/gensoft/projects/AlienTrimmer/"
"AlienTrimmer_%s.tar.gz" % version)
_java_install("AlienTrimmer", version, url, env)
def install_rnaseqc(env):
"""Quality control metrics for RNA-seq data
https://www.broadinstitute.org/cancer/cga/rna-seqc
"""
version = "1.1.7"
url = ("https://github.com/chapmanb/RNA-SeQC/releases/download/"
"v%s/RNA-SeQC_v%s.jar" % (version, version))
install_dir = _symlinked_java_version_dir("RNA-SeQC", version, env)
if install_dir:
with _make_tmp_dir() as work_dir:
with cd(work_dir):
out_file = shared._remote_fetch(env, url)
env.safe_sudo("mv %s %s" % (out_file, install_dir))
def install_varscan(env):
"""Variant detection in massively parallel sequencing data
http://varscan.sourceforge.net/
"""
version = "2.3.7"
url = "http://downloads.sourceforge.net/project/varscan/VarScan.v%s.jar" % version
install_dir = _symlinked_java_version_dir("varscan", version, env)
if install_dir:
with _make_tmp_dir() as work_dir:
with cd(work_dir):
out_file = shared._remote_fetch(env, url)
env.safe_sudo("mv %s %s" % (out_file, install_dir))
def install_mutect(env):
version = "1.1.5"
url = "https://github.com/broadinstitute/mutect/releases/download/" \
"%s/muTect-%s-bin.zip" % (version, version)
install_dir = _symlinked_java_version_dir("mutect", version, env)
if install_dir:
with _make_tmp_dir() as work_dir:
with cd(work_dir):
out_file = shared._remote_fetch(env, url)
env.safe_run("unzip %s" % out_file)
env.safe_sudo("mv *.jar version.txt LICENSE* %s" % install_dir)
@_if_not_installed("bam")
def install_bamutil(env):
"""Utilities for working with BAM files, from U of M Center for Statistical Genetics.
http://genome.sph.umich.edu/wiki/BamUtil
"""
version = "1.0.7"
url = "http://genome.sph.umich.edu/w/images/5/5d/BamUtilLibStatGen.%s.tgz" % version
_get_install(url, env, _make_copy("ls -1 bamUtil/bin/bam"),
dir_name="bamUtil_%s" % version)
@_if_not_installed("tabix")
def install_tabix(env):
"""Generic indexer for TAB-delimited genome position files
http://samtools.sourceforge.net/tabix.shtml
"""
version = "0.2.6"
url = "http://downloads.sourceforge.net/project/samtools/tabix/tabix-%s.tar.bz2" % version
_get_install(url, env, _make_copy("ls -1 tabix bgzip"))
@_if_not_installed("disambiguate.py")
def install_disambiguate(env):
"""a tool for disambiguating reads aligning to multiple genomes
https://github.com:mjafin/disambiguate
"""
repository = "git clone https://github.com/mjafin/disambiguate.git"
_get_install(repository, env, _python_make)
def install_grabix(env):
"""a wee tool for random access into BGZF files
https://github.com/arq5x/grabix
"""
version = "0.1.4"
revision = "5179949d"
try:
uptodate = versioncheck.up_to_date(env, "grabix", version, stdout_flag="version:")
# Old versions will not have any version information
except IOError:
uptodate = False
if uptodate:
return
repository = "git clone https://github.com/arq5x/grabix.git"
_get_install(repository, env, _make_copy("ls -1 grabix"),
revision=revision)
@_if_not_installed("pbgzip")
def install_pbgzip(env):
"""Parallel blocked bgzip -- compatible with bgzip but with thread support.
https://github.com/nh13/samtools/tree/master/pbgzip
"""
repository = "git clone https://github.com/chapmanb/samtools.git"
revision = "2cce3ffa97"
def _build(env):
with cd("pbgzip"):
env.safe_run("make")
install_dir = shared._get_bin_dir(env)
env.safe_sudo("cp -f pbgzip %s" % (install_dir))
_get_install(repository, env, _build, revision=revision)
@_if_not_installed("bamtools")
def install_bamtools(env):
"""command-line toolkit for working with BAM data
https://github.com/pezmaster31/bamtools
"""
version = "3fe66b9"
repository = "git clone --recursive https://github.com/pezmaster31/bamtools.git"
def _cmake_bamtools(env):
env.safe_run("mkdir build")
with cd("build"):
env.safe_run("cmake ..")
env.safe_run("make")
env.safe_sudo("cp bin/* %s" % shared._get_bin_dir(env))
env.safe_sudo("cp lib/* %s" % shared._get_lib_dir(env))
_get_install(repository, env, _cmake_bamtools,
revision=version)
@_if_not_installed("ogap")
def install_ogap(env):
"""gap opening realigner for BAM data streams
https://github.com/ekg/ogap
"""
version = "652c525"
repository = "git clone --recursive https://github.com/ekg/ogap.git"
_get_install(repository, env, _make_copy("ls ogap"),
revision=version)
def install_tophat(env):
"""TopHat is a fast splice junction mapper for RNA-Seq reads
http://ccb.jhu.edu/software/tophat/index.shtml
"""
default_version = "2.0.9"
version = env.get("tool_version", default_version)
if versioncheck.is_version(env, "tophat", version, args="--version", stdout_flag="TopHat"):
env.logger.info("tophat version {0} is up to date; not installing"
.format(version))
return
platform = "OSX" if env.distribution == "macosx" else "Linux"
url = "http://ccb.jhu.edu/software/tophat/downloads/" \
"tophat-%s.%s_x86_64.tar.gz" % (version, platform)
_get_install(url, env,
_make_copy("find . -perm -100 -type f", do_make=False))
install_tophat2 = install_tophat
@_if_not_installed("cufflinks")
def install_cufflinks(env):
"""Cufflinks assembles transcripts and tests for differential expression and regulation in RNA-Seq samples.
http://cufflinks.cbcb.umd.edu/
"""
default_version = "2.1.1"
version = env.get("tool_version", default_version)
url = "http://cufflinks.cbcb.umd.edu/downloads/" \
"cufflinks-%s.Linux_x86_64.tar.gz" % version
_get_install(url, env, _make_copy("find . -perm -100 -type f",
do_make=False))
# --- Assembly
@_if_not_installed("ABYSS")
def install_abyss(env):
"""Assembly By Short Sequences - a de novo, parallel, paired-end sequence assembler.
http://www.bcgsc.ca/platform/bioinfo/software/abyss
"""
# XXX check for no sparehash on non-ubuntu systems
default_version = "1.3.4"
version = env.get("tool_version", default_version)
url = "http://www.bcgsc.ca/downloads/abyss/abyss-%s.tar.gz" % version
def _remove_werror_get_boost(env):
env.safe_sed("configure", " -Werror", "")
# http://osdir.com/ml/abyss-users-science/2011-10/msg00108.html
url = "http://downloads.sourceforge.net/project/boost/boost/1.47.0/boost_1_47_0.tar.bz2"
dl_file = shared._remote_fetch(env, url)
env.safe_run("tar jxf %s" % dl_file)
env.safe_run("ln -s boost_1_47_0/boost boost")
_get_install(url, env, _configure_make, post_unpack_fn=_remove_werror_get_boost)
def install_transabyss(env):
"""Analyze ABySS multi-k-assembled shotgun transcriptome data.
http://www.bcgsc.ca/platform/bioinfo/software/trans-abyss
"""
version = "1.4.4"
url = "http://www.bcgsc.ca/platform/bioinfo/software/trans-abyss/" \
"releases/%s/trans-ABySS-v%s.tar.gz" % (version, version)
_get_install_local(url, env, _make_copy(do_make=False))
@_if_not_installed("velvetg")
def install_velvet(env):
"""Sequence assembler for very short reads.
http://www.ebi.ac.uk/~zerbino/velvet/
"""
default_version = "1.2.08"
version = env.get("tool_version", default_version)
url = "http://www.ebi.ac.uk/~zerbino/velvet/velvet_%s.tgz" % version
def _fix_library_order(env):
"""Fix library order problem in recent gcc versions
http://biostar.stackexchange.com/questions/13713/
error-installing-velvet-assembler-1-1-06-on-ubuntu-server
"""
env.safe_sed("Makefile", "Z_LIB_FILES=-lz", "Z_LIB_FILES=-lz -lm")
_get_install(url, env, _make_copy("find . -perm -100 -name 'velvet*'"),
post_unpack_fn=_fix_library_order)
@_if_not_installed("Ray")
def install_ray(env):
"""Ray -- Parallel genome assemblies for parallel DNA sequencing
http://denovoassembler.sourceforge.net/
"""
default_version = "2.2.0"
version = env.get("tool_version", default_version)
url = "http://downloads.sourceforge.net/project/denovoassembler/Ray-v%s.tar.bz2" % version
def _ray_do_nothing(env):
return
_get_install(url, env, _make_copy("find . -name Ray"),
post_unpack_fn=_ray_do_nothing)
def install_trinity(env):
"""Efficient and robust de novo reconstruction of transcriptomes from RNA-seq data.
http://trinityrnaseq.sourceforge.net/
"""
version = "r2012-10-05"
url = "http://downloads.sourceforge.net/project/trinityrnaseq/" \
"trinityrnaseq_%s.tgz" % version
def _remove_werror(env):
env.safe_sed("trinity-plugins/jellyfish/Makefile.in", " -Werror", "")
_get_install_local(url, env, _make_copy(),
post_unpack_fn=_remove_werror)
def install_cortex_var(env):
"""De novo genome assembly and variation analysis from sequence data.
http://cortexassembler.sourceforge.net/index_cortex_var.html
"""
version = "1.0.5.21"
url = "http://downloads.sourceforge.net/project/cortexassembler/cortex_var/" \
"latest/CORTEX_release_v{0}.tgz".format(version)
def _cortex_build(env):
env.safe_sed("Makefile", "\-L/full/path/\S*",
"-L{0}/lib -L/usr/lib -L/usr/local/lib".format(env.system_install))
env.safe_sed("Makefile", "^IDIR_GSL =.*$",
"IDIR_GSL={0}/include -I/usr/include -I/usr/local/include".format(env.system_install))
env.safe_sed("Makefile", "^IDIR_GSL_ALSO =.*$",
"IDIR_GSL_ALSO={0}/include/gsl -I/usr/include/gsl -I/usr/local/include/gsl".format(
env.system_install))
with cd("libs/gsl-1.15"):
env.safe_run("make clean")
with cd("libs/htslib"):
env.safe_run("make clean")
env.safe_run("make")
for cols in ["1", "2", "3", "4", "5"]:
for kmer in ["31", "63", "95"]:
env.safe_run("make MAXK={0} NUM_COLS={1} cortex_var".format(kmer, cols))
with cd("scripts/analyse_variants/needleman_wunsch"):
env.safe_sed("Makefile", "string_buffer.c", "string_buffer.c -lz")
# Fix incompatibilities with gzfile struct in zlib 1.2.6+
for fix_gz in ["libs/string_buffer/string_buffer.c", "libs/bioinf/bioinf.c",
"libs/string_buffer/string_buffer.h", "libs/bioinf/bioinf.h"]:
env.safe_sed(fix_gz, "gzFile \*", "gzFile ")
env.safe_sed(fix_gz, "gzFile\*", "gzFile")
env.safe_run("make")
_get_install_local(url, env, _cortex_build)
def install_bcbio_variation(env):
"""Toolkit to analyze genomic variation data with comparison and ensemble approaches.
https://github.com/chapmanb/bcbio.variation
"""
version = "0.1.9"
url = "https://github.com/chapmanb/bcbio.variation/releases/download/" \
"v%s/bcbio.variation-%s-standalone.jar" % (version, version)
install_dir = _symlinked_java_version_dir("bcbio_variation", version, env)
if install_dir:
with _make_tmp_dir() as work_dir:
with cd(work_dir):
jar_file = shared._remote_fetch(env, url)
env.safe_sudo("mv %s %s" % (jar_file, install_dir))
# --- ChIP-seq
@_if_not_installed("macs14")
def install_macs(env):
"""Model-based Analysis for ChIP-Seq.
http://liulab.dfci.harvard.edu/MACS/
"""
default_version = "1.4.2"
version = env.get("tool_version", default_version)
url = "https://github.com/downloads/taoliu/MACS/" \
"MACS-%s.tar.gz" % version
_get_install(url, env, _python_make)
# --- Structural variation
@_if_not_installed("hydra")
def install_hydra(env):
"""Hydra detects structural variation breakpoints in both unique and duplicated genomic regions.
https://code.google.com/p/hydra-sv/
"""
version = "0.5.3"
url = "http://hydra-sv.googlecode.com/files/Hydra.v{0}.tar.gz".format(version)
def clean_libs(env):
env.safe_run("make clean")
_get_install(url, env, _make_copy("ls -1 bin/* scripts/*"),
post_unpack_fn=clean_libs)
def install_freec(env):
"""Control-FREEC: a tool for detection of copy number changes and allelic imbalances.
http://bioinfo-out.curie.fr/projects/freec/
"""
version = "6.4"
if env.distribution in ["ubuntu", "debian"]:
if env.is_64bit:
url = "http://bioinfo-out.curie.fr/projects/freec/src/FREEC_Linux64.tar.gz"
else:
url = "http://bioinfo-out.curie.fr/projects/freec/src/FREEC_LINUX32.tar.gz"
if not versioncheck.up_to_date(env, "freec", version, stdout_index=1):
_get_install(url, env, _make_copy("find . -name 'freec'"), dir_name=".")
@_if_not_installed("CRISP.py")
def install_crisp(env):
"""Detect SNPs and short indels from pooled sequencing data.
https://sites.google.com/site/vibansal/software/crisp/
"""
version = "5"
url = "https://sites.google.com/site/vibansal/software/crisp/" \
"CRISP-linux-v{0}.tar.gz".format(version)
def _make_executable():
env.safe_run("chmod a+x *.py")
_get_install(url, env, _make_copy("ls -1 CRISP.py crisp_to_vcf.py",
premake_cmd=_make_executable,
do_make=False))
@_if_not_installed("run_pipeline.pl")
def install_tassel(env):
"""TASSEL: evaluate traits associations, evolutionary patterns, and linkage disequilibrium.
http://www.maizegenetics.net/index.php?option=com_content&task=view&id=89&/Itemid=119
"""
version = "4.0"
url = "http://www.maizegenetics.net/tassel/tassel{0}_standalone.zip".format(version)
executables = ["start_tassel.pl", "run_pipeline.pl"]
install_dir = _symlinked_java_version_dir("tassel", version, env)
if install_dir:
with _make_tmp_dir() as work_dir:
with cd(work_dir):
dl_file = shared._remote_fetch(env, url)
env.safe_run("unzip %s" % dl_file)
with cd("tassel{0}_standalone".format(version)):
for x in executables:
env.safe_sed(x, "^my \$top.*;",
"use FindBin qw($RealBin); my $top = $RealBin;")
env.safe_sudo("chmod a+rwx %s" % x)
env.safe_sudo("mv * %s" % install_dir)
for x in executables:
env.safe_sudo("ln -s %s/%s %s/bin/%s" % (install_dir, x,
env.system_install, x))
@_if_not_installed("ustacks")
def install_stacks(env):
"""Stacks: build loci out of a set of short-read sequenced samples.
http://creskolab.uoregon.edu/stacks/
"""
version = "0.9999"
url = "http://creskolab.uoregon.edu/stacks/source/" \
"stacks-{0}.tar.gz".format(version)
_get_install(url, env, _configure_make)
@_if_not_installed("seqlogo")
def install_weblogo(env):
"""Weblogo
http://weblogo.berkeley.edu/
"""
version = "2.8.2"
url = "http://weblogo.berkeley.edu/release/weblogo.%s.tar.gz" % version
_get_install(url, env, _make_copy("find . -perm -100 -type f", do_make=False))
def _cp_pm(env):
for perl_module in ["template.pm", "logo.pm", "template.eps"]:
env.safe_sudo("cp %s %s/lib/perl5" % (perl_module, env.system_install))
_get_install(url, env, _cp_pm(env))
|
lpantano/cloudbiolinux
|
cloudbio/custom/bio_nextgen.py
|
Python
|
mit
| 33,863
|
[
"Bowtie"
] |
5ddeb7ceb0d852eaf25eb5725f83221a04d7bd304588ae4d84d5cca1c9c3763e
|
#!/usr/bin/env python
# vim: fileencoding=utf-8 et ts=4 sts=4 sw=4 tw=0 fdm=marker fmr=#{,#}
""" A simple asynchronous RPC client that shows how to:
* use specific serializer
* make multiple RPC calls at the same time using the Python threading API
* handle remote exceptions
* do load balancing
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012-2014. Brian Granger, Min Ragan-Kelley, Alexander Glyzov
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE distributed as part of this software.
#-----------------------------------------------------------------------------
from netcall.concurrency import get_tools
from netcall.threading import ThreadingRPCClient
from netcall import RemoteRPCError, RPCTimeoutError, JSONSerializer
def printer(msg, func, *args):
"run a function, print results"
print msg, '<request>'
res = func(*args)
print msg, '<response>', res
if __name__ == '__main__':
#from netcall import setup_logger
#setup_logger()
tools = get_tools(env=None)
executor = tools.Executor(64)
spawn = executor.submit
# Custom serializer/deserializer functions can be passed in. The server
# side ones must match.
echo = ThreadingRPCClient(executor=executor, serializer=JSONSerializer())
echo.connect('tcp://127.0.0.1:5555')
tasks = [spawn(printer, "[echo] Echoing \"Hi there\"", echo.echo, "Hi there")]
try:
print "Testing a remote exception...",
echo.error()
print "FAIL, no remote exception!"
except RemoteRPCError, e:
print "OK, got an expected remote exception:"
#print e.ename
print e.evalue
print e.traceback
try:
print "Testing a timeout...",
echo.call('sleep', args=[2.3], timeout=1.1)
print "FAIL, timeout didn't work!"
except RPCTimeoutError, e:
print "OK, got an expected timeout:"
print repr(e)
print
print 'Ignoring result... ',
echo.call('error', result='ignore')
print 'OK\n'
tasks.append(spawn(printer, "[echo] Sleeping for 2 seconds...", echo.sleep, 2.0))
math = ThreadingRPCClient(executor=executor)
# By connecting to two instances, requests are load balanced.
math.connect('tcp://127.0.0.1:5556')
math.connect('tcp://127.0.0.1:5557')
for i in range(5):
for j in range(5):
tasks.append(
spawn(printer, "[math] Adding: %s + %s" % (i, j), math.add, i, j)
)
for task in tasks:
task.result()
|
srault95/netcall
|
examples/threading/client_threading.py
|
Python
|
bsd-3-clause
| 2,637
|
[
"Brian"
] |
9c5415d5e0e5a19ba8e38b3a6d97b343234a94c6b9108c435aec3d05e5151b31
|
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import gtk
import os
from zeroinstall.support import tasks
from zeroinstall.gtkui import gtkutils
n_windows = 0
last_error = None
builderfile = os.path.join(os.path.dirname(__file__), 'zero-install.ui')
class Template(gtkutils.Template):
def __init__(self, root):
gtkutils.Template.__init__(self, builderfile, root)
class Dialog(gtk.Dialog):
def __init__(self):
gtk.Dialog.__init__(self)
self.set_has_separator(False)
self.set_position(gtk.WIN_POS_CENTER)
def add_mixed_button(self, message, stock, response):
button = MixedButton(message, stock)
button.set_flags(gtk.CAN_DEFAULT)
self.add_action_widget(button, response)
button.show_all()
return button
class DialogResponse(tasks.Blocker):
response = None
def __init__(self, dialog):
tasks.Blocker.__init__(self, dialog.get_title())
a = None
def response(d, resp):
self.response = resp
d.disconnect(a)
self.trigger()
a = dialog.connect('response', response)
class ButtonClickedBlocker(tasks.Blocker):
def __init__(self, button):
tasks.Blocker.__init__(self, "Button click")
a = None
def clicked(b):
b.disconnect(a)
self.trigger()
a = button.connect('clicked', lambda b: self.trigger())
def alert(parent, message, type = gtk.MESSAGE_ERROR):
if type == gtk.MESSAGE_ERROR:
global last_error
last_error = message
gtkutils.show_message_box(parent, message, type)
def MixedButton(message, stock, x_align = 0.5, button = None):
if button is None:
button = gtk.Button()
label = gtk.Label('')
label.set_text_with_mnemonic(message)
label.set_mnemonic_widget(button)
image = gtk.image_new_from_stock(stock, gtk.ICON_SIZE_BUTTON)
box = gtk.HBox(False, 2)
align = gtk.Alignment(x_align, 0.5, 0.0, 0.0)
box.pack_start(image, False, False, 0)
box.pack_end(label, False, False, 0)
button.add(align)
align.add(box)
return button
|
timdiels/zeroinstall
|
zeroinstall/0launch-gui/dialog.py
|
Python
|
lgpl-2.1
| 1,958
|
[
"VisIt"
] |
cfdf4fbfa7340747b8808121baf4f87baa4d42fad3edf45e33bf5a80d13cf560
|
# Copyright 2006 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library to allow for long running, slow operations.
Intended mainly for database operations, this library provides a
skeleton to safely execute a given set of operations such that the
runtime is a fraction of realtime (such as N% utilization). It is
designed to keep track of its position and be restartable, though that
is dependent upon the class that subclasses TrickledOperation.
"""
__author__ = 'bbiskebo@google.com (Brian Biskeborn)'
# Original author: chip@google.com (Chip Turner)
import logging
import time
class TrickledOperation(object):
"""The class representing both how a trickle operation is performed and
the state associated with a given operation.
Users of this class will subclass it and implement the protected
methods _SetupTrickle, _Finished, _PerformTrickle, and
_FinalizeTrickle. These methods will be called during a trickle
operation such that the size of the batch passed to _PerformTrickle
tries to be exactly utilization_percent of each cycle_time.
Specifying utilization_percent=100 turns off all sleeping, and
does the work at maximum speed.
"""
def __init__(self, utilization_percent, cycle_time):
"""Constructor. Subclass and store operation-specific state here.
Args:
utilization_percent: percent of time to spend in _PerformTrickle
cycle_time: interval over which utilization_percent is
calculated per run
"""
self._utilization_fraction = utilization_percent / 100.0
self._cycle_time = cycle_time
self._batch_size = 1 # Start maximally cautious (single statement)
# and increase batch size as we can
self._batch_size_limit = 1000000
self.verbose = True
def _SetupTrickle(self):
"""A setup method, invoked before a trickle loop is run.
It is valid to execute multiple loops, and this method will be
invoked for each. Useful for finding the next starting position.
"""
raise NotImplementedError('call to pure virtual function')
def _FinalizeTrickle(self):
"""A completion method copied after a series of trickle loops.
Intended to finish up state, such as the final 'slosh' rows of a
continual copy loop.
"""
raise NotImplementedError('call to pure virtual function')
def _Finished(self):
"""Called to determine if a trickle is complete."""
raise NotImplementedError('call to pure virtual function')
def _PerformTrickle(self, batch_size):
"""The method invoked to perform the actual trickle operation.
This method will be invoked multiple times and is passed the size
of a batch to execute, which will vary depending upon the runtime
of the previous batch.
Args:
batch_size: size of the current batch (subclass dependent)
Returns:
number of items processed (usually batch_size)
"""
raise NotImplementedError('call to pure virtual function')
def _GetProgress(self):
"""Called to fetch progress information
This method is intended to be overridden by inheriting classes
that can provide data about their progress through the trickle.
Args:
none
Returns:
String representation of current progress state or None if unknown
"""
return None
def SetBatchSizeLimit(self, n):
"""Set the maximum batch_size that will ever be submitted by Trickle().
Intended for operations that need to be artificially slowed down in
excess of what the throttler would have done. For instance, when
replicas are drastically slower than the primary for some reason, which
the throttler will be unable to detect.
Args:
n: How many queries may be run in a batch. Values above 1000000 are
probably ill-advised.
"""
self._batch_size_limit = n
def Trickle(self):
"""Perform the actual trickle operation.
This function will loop until self._Finished() returns true. It
calls the above methods in this order:
self._SetupTrickle()
while not self._Finished():
...
self._PerformTrickle(self, batch_size)
...
self._FinalizeTrickle()
Args:
None
Returns:
None
"""
self._SetupTrickle()
# track the last ten cycles worth of copy rates
copy_rates = [0] * 10
copy_rate_idx = 0
# also track average copies over this invocation's lifetime
rows_copied = 0
start_time = time.time()
while not self._Finished():
then = time.time()
batch_size = self._batch_size
rowcount = self._PerformTrickle(batch_size)
# Increase or decrease batch size to get the target utilization
# percentage at the given cycle time. At all times, we're cautious about
# floating point rounding effects and the like.
time_delta = time.time() - then
ideal_delta = self._cycle_time * self._utilization_fraction
if time_delta > 2.0 * ideal_delta:
# If our utilization fraction is way too high, we want to decrease our
# batch size fairly drastically, but we don't want to chop things to
# nothing in case what we saw was a transient blip. Note that if we get
# here, either (a) we've seen enough short statements to increase our
# batch size from its initial value of 1 before hitting this, or (b) a
# batch size of 1 is still way too big, in which case we modulate cycle
# time below instead.
self._batch_size = max(int(batch_size/2), 1)
elif time_delta * 1.5 < ideal_delta:
# We don't want to increase batch size quite as drastically
self._batch_size = max(int(batch_size * 1.5), batch_size + 1)
else:
# batch_size is between 2x and 0.67x target. Modulate it directly to
# the target value.
self._batch_size = max(int(batch_size * ideal_delta / time_delta), 1)
# Rev limiter in case an operation is a no-op by accident.
self._batch_size = min(self._batch_size, self._batch_size_limit)
# How long are we going to sleep this time? At least the rest of the
# cycle time, longer if utilization would otherwise be too high, and
# at least one second regardless but no more than 2x the cycle time.
if self._utilization_fraction < 1.0:
sleep_time = min(
max(self._cycle_time - time_delta,
(time_delta / self._utilization_fraction) - time_delta,
1),
2 * self._cycle_time)
else:
# But if running with 100% utilization, don't sleep at all.
sleep_time = 0.0
# update average copy rate
this_batch_rate = batch_size / (sleep_time + time_delta)
copy_rates[copy_rate_idx] = this_batch_rate
copy_rate_idx = (copy_rate_idx + 1) % len(copy_rates)
current_rate_avg = sum(copy_rates) / float(len(copy_rates))
rows_copied += rowcount
if self.verbose:
self._LogStatus(batch_size, start_time, time_delta, sleep_time,
current_rate_avg, rows_copied, copy_rate_idx)
time.sleep(sleep_time)
if self.verbose:
self._LogFinish(rows_copied, start_time)
self._FinalizeTrickle()
def _LogStatus(self, batch_size, start_time, time_delta, sleep_time,
current_rate_avg, rows_copied, copy_rate_idx):
progress = self._GetProgress()
if progress:
progress = ', ' + progress
else:
progress = ''
logging.info('batch of %d in %.2f s%s, sleeping %.2f s'
% (batch_size, time_delta, progress, sleep_time))
logging.info('util %.2f, new batch size %d '
'(%.2f current, %.2f avg rows/sec)'
% (time_delta / (time_delta + sleep_time),
self._batch_size,
current_rate_avg,
rows_copied / (time.time() - start_time)))
def _LogFinish(self, rows_copied, start_time):
logging.info('Done: %.2f avg rows/sec',
(rows_copied / (time.time() - start_time)))
class GeneratorOperation(TrickledOperation):
"""Adapts blocking functions so they can run within trickle_lib.
The adapter only requires that users insert a 'yield' statement after each
entry in the batch has been processed.
"""
def __init__(self, generator, utilization_percent, cycle_time):
"""Constructor.
Args:
generator: A function, that does work every time every time it is iterated
over.
utilization_percent: An int, percent of time to spend in _PerformTrickle.
cycle_time: interval over which utilization_percent is
calculated per run
"""
TrickledOperation.__init__(self, utilization_percent, cycle_time)
self._generator = generator
self._finished = False
def _SetupTrickle(self):
pass
def _FinalizeTrickle(self):
pass
def _Finished(self):
return self._finished
def _PerformTrickle(self, batch_size):
processed = 0
try:
for _ in xrange(batch_size):
self._generator.next()
processed += 1
except StopIteration:
self._finished = True
return processed
|
dbarobin/google-mysql-tools
|
pylib/trickle_lib.py
|
Python
|
apache-2.0
| 9,685
|
[
"Brian"
] |
552901fd8afd1d3d7ddb89e466c99ddfe31346c152e1973b4af0dc1a9939b1fc
|
#!/usr/bin/env python2
#
# Copyright (C) 2013-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
#
import sys
import time
import espressopp
import mpi4py.MPI as MPI
import unittest
class TestFreeEnergyCompensation(unittest.TestCase):
def setUp(self):
# set up system
system = espressopp.System()
box = (10, 10, 10)
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
system.skin = 0.3
system.comm = MPI.COMM_WORLD
nodeGrid = espressopp.tools.decomp.nodeGrid(espressopp.MPI.COMM_WORLD.size,box,rc=1.5,skin=system.skin)
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rc=1.5, skin=system.skin)
system.storage = espressopp.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid)
self.system = system
def test_slab(self):
# add some particles
particle_list = [
(1, 1, 0, espressopp.Real3D(5.5, 5.0, 5.0), 1.0, 0),
(2, 1, 0, espressopp.Real3D(6.5, 5.0, 5.0), 1.0, 0),
(3, 1, 0, espressopp.Real3D(7.5, 5.0, 5.0), 1.0, 0),
(4, 1, 0, espressopp.Real3D(8.5, 5.0, 5.0), 1.0, 0),
(5, 1, 0, espressopp.Real3D(9.5, 5.0, 5.0), 1.0, 0),
(6, 0, 0, espressopp.Real3D(5.5, 5.0, 5.0), 1.0, 1),
(7, 0, 0, espressopp.Real3D(6.5, 5.0, 5.0), 1.0, 1),
(8, 0, 0, espressopp.Real3D(7.5, 5.0, 5.0), 1.0, 1),
(9, 0, 0, espressopp.Real3D(8.5, 5.0, 5.0), 1.0, 1),
(10, 0, 0, espressopp.Real3D(9.5, 5.0, 5.0), 1.0, 1),
]
tuples = [(1,6),(2,7),(3,8),(4,9),(5,10)]
self.system.storage.addParticles(particle_list, 'id', 'type', 'q', 'pos', 'mass','adrat')
ftpl = espressopp.FixedTupleListAdress(self.system.storage)
ftpl.addTuples(tuples)
self.system.storage.setFixedTuplesAdress(ftpl)
self.system.storage.decompose()
# generate a verlet list
vl = espressopp.VerletListAdress(self.system, cutoff=1.5, adrcut=1.5,
dEx=2.0, dHy=1.0, adrCenter=[5.0, 5.0, 5.0], sphereAdr=False)
# initialize lambda values
integrator = espressopp.integrator.VelocityVerlet(self.system)
integrator.dt = 0.01
adress = espressopp.integrator.Adress(self.system,vl,ftpl)
integrator.addExtension(adress)
espressopp.tools.AdressDecomp(self.system, integrator)
# set up FEC
fec = espressopp.integrator.FreeEnergyCompensation(self.system, center=[5.0, 5.0, 5.0])
fec.addForce(itype=3,filename="table_fec.tab",type=1)
integrator.addExtension(fec)
# x coordinates of particles before integration
before = [self.system.storage.getParticle(i).pos[0] for i in range(1,6)]
# run ten steps and compute energy
integrator.run(10)
energy = fec.computeCompEnergy()
# x coordinates of particles after integration
after = [self.system.storage.getParticle(i).pos[0] for i in range(1,6)]
# run checks (only one particle is in hybrid region and should feel the FEC. Also check that its FEC energy is correct)
self.assertEqual(before[0], after[0])
self.assertEqual(before[1], after[1])
self.assertAlmostEqual(after[2], 7.598165, places=5)
self.assertEqual(before[3], after[3])
self.assertEqual(before[4], after[4])
self.assertAlmostEqual(energy, 6.790157, places=5)
def test_sphere(self):
# add some particles
particle_list = [
(1, 1, 0, espressopp.Real3D(5.0, 5.5, 5.0), 1.0, 0),
(2, 1, 0, espressopp.Real3D(5.0, 6.5, 5.0), 1.0, 0),
(3, 1, 0, espressopp.Real3D(5.0, 7.5, 5.0), 1.0, 0),
(4, 1, 0, espressopp.Real3D(5.0, 8.5, 5.0), 1.0, 0),
(5, 1, 0, espressopp.Real3D(5.0, 9.5, 5.0), 1.0, 0),
(6, 0, 0, espressopp.Real3D(5.0, 5.5, 5.0), 1.0, 1),
(7, 0, 0, espressopp.Real3D(5.0, 6.5, 5.0), 1.0, 1),
(8, 0, 0, espressopp.Real3D(5.0, 7.5, 5.0), 1.0, 1),
(9, 0, 0, espressopp.Real3D(5.0, 8.5, 5.0), 1.0, 1),
(10, 0, 0, espressopp.Real3D(5.0, 9.5, 5.0), 1.0, 1),
]
tuples = [(1,6),(2,7),(3,8),(4,9),(5,10)]
self.system.storage.addParticles(particle_list, 'id', 'type', 'q', 'pos', 'mass','adrat')
ftpl = espressopp.FixedTupleListAdress(self.system.storage)
ftpl.addTuples(tuples)
self.system.storage.setFixedTuplesAdress(ftpl)
self.system.storage.decompose()
# generate a verlet list
vl = espressopp.VerletListAdress(self.system, cutoff=1.5, adrcut=1.5,
dEx=2.0, dHy=1.0, adrCenter=[5.0, 5.0, 5.0], sphereAdr=True)
# initialize lambda values
integrator = espressopp.integrator.VelocityVerlet(self.system)
integrator.dt = 0.01
adress = espressopp.integrator.Adress(self.system,vl,ftpl)
integrator.addExtension(adress)
espressopp.tools.AdressDecomp(self.system, integrator)
# set up FEC
fec = espressopp.integrator.FreeEnergyCompensation(self.system, center=[5.0, 5.0, 5.0], sphereAdr=True)
fec.addForce(itype=3,filename="table_fec.tab",type=1)
integrator.addExtension(fec)
# y coordinates of particles before integration
before = [self.system.storage.getParticle(i).pos[1] for i in range(1,6)]
# run ten steps
integrator.run(10)
energy = fec.computeCompEnergy()
# y coordinates of particles after integration
after = [self.system.storage.getParticle(i).pos[1] for i in range(1,6)]
# run checks (as for test with slab-geometry, but check y-coordinates this time. Given the now spherical setup, particles should move as before but along the y-axis).
self.assertEqual(before[0], after[0])
self.assertEqual(before[1], after[1])
self.assertAlmostEqual(after[2], 7.598165, places=5)
self.assertEqual(before[3], after[3])
self.assertEqual(before[4], after[4])
self.assertAlmostEqual(energy, 6.790157, places=5)
if __name__ == '__main__':
unittest.main()
|
govarguz/espressopp
|
testsuite/AdResS/FreeEnergyCompensation/test_FreeEnergyCompensation.py
|
Python
|
gpl-3.0
| 6,885
|
[
"ESPResSo"
] |
93faf1a09d7512823f14c9d3051f8816870851763c32b4bfd6cbf3953cff3bb4
|
# dagutil.py - dag utilities for mercurial
#
# Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
# and Peter Arrenbrecht <peter@arrenbrecht.ch>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import nullrev
from i18n import _
class basedag(object):
'''generic interface for DAGs
terms:
"ix" (short for index) identifies a nodes internally,
"id" identifies one externally.
All params are ixs unless explicitly suffixed otherwise.
Pluralized params are lists or sets.
'''
def __init__(self):
self._inverse = None
def nodeset(self):
'''set of all node idxs'''
raise NotImplementedError
def heads(self):
'''list of head ixs'''
raise NotImplementedError
def parents(self, ix):
'''list of parents ixs of ix'''
raise NotImplementedError
def inverse(self):
'''inverse DAG, where parents becomes children, etc.'''
raise NotImplementedError
def ancestorset(self, starts, stops=None):
'''
set of all ancestors of starts (incl), but stop walk at stops (excl)
'''
raise NotImplementedError
def descendantset(self, starts, stops=None):
'''
set of all descendants of starts (incl), but stop walk at stops (excl)
'''
return self.inverse().ancestorset(starts, stops)
def headsetofconnecteds(self, ixs):
'''
subset of connected list of ixs so that no node has a descendant in it
By "connected list" we mean that if an ancestor and a descendant are in
the list, then so is at least one path connecting them.
'''
raise NotImplementedError
def externalize(self, ix):
'''return a list of (or set if given a set) of node ids'''
return self._externalize(ix)
def externalizeall(self, ixs):
'''return a list of (or set if given a set) of node ids'''
ids = self._externalizeall(ixs)
if isinstance(ixs, set):
return set(ids)
return list(ids)
def internalize(self, id):
'''return a list of (or set if given a set) of node ixs'''
return self._internalize(id)
def internalizeall(self, ids, filterunknown=False):
'''return a list of (or set if given a set) of node ids'''
ixs = self._internalizeall(ids, filterunknown)
if isinstance(ids, set):
return set(ixs)
return list(ixs)
class genericdag(basedag):
'''generic implementations for DAGs'''
def ancestorset(self, starts, stops=None):
stops = stops and set(stops) or set()
seen = set()
pending = list(starts)
while pending:
n = pending.pop()
if n not in seen and n not in stops:
seen.add(n)
pending.extend(self.parents(n))
return seen
def headsetofconnecteds(self, ixs):
hds = set(ixs)
if not hds:
return hds
for n in ixs:
for p in self.parents(n):
hds.discard(p)
assert hds
return hds
class revlogbaseddag(basedag):
'''generic dag interface to a revlog'''
def __init__(self, revlog, nodeset):
basedag.__init__(self)
self._revlog = revlog
self._heads = None
self._nodeset = nodeset
def nodeset(self):
return self._nodeset
def heads(self):
if self._heads is None:
self._heads = self._getheads()
return self._heads
def _externalize(self, ix):
return self._revlog.index[ix][7]
def _externalizeall(self, ixs):
idx = self._revlog.index
return [idx[i][7] for i in ixs]
def _internalize(self, id):
ix = self._revlog.rev(id)
if ix == nullrev:
raise LookupError(id, self._revlog.indexfile, _('nullid'))
return ix
def _internalizeall(self, ids, filterunknown):
rl = self._revlog
if filterunknown:
return [r for r in map(rl.nodemap.get, ids)
if r is not None and r != nullrev]
return map(self._internalize, ids)
class revlogdag(revlogbaseddag):
'''dag interface to a revlog'''
def __init__(self, revlog):
revlogbaseddag.__init__(self, revlog, set(xrange(len(revlog))))
def _getheads(self):
return [r for r in self._revlog.headrevs() if r != nullrev]
def parents(self, ix):
rlog = self._revlog
idx = rlog.index
revdata = idx[ix]
prev = revdata[5]
if prev != nullrev:
prev2 = revdata[6]
if prev2 == nullrev:
return [prev]
return [prev, prev2]
prev2 = revdata[6]
if prev2 != nullrev:
return [prev2]
return []
def inverse(self):
if self._inverse is None:
self._inverse = inverserevlogdag(self)
return self._inverse
def ancestorset(self, starts, stops=None):
rlog = self._revlog
idx = rlog.index
stops = stops and set(stops) or set()
seen = set()
pending = list(starts)
while pending:
rev = pending.pop()
if rev not in seen and rev not in stops:
seen.add(rev)
revdata = idx[rev]
for i in [5, 6]:
prev = revdata[i]
if prev != nullrev:
pending.append(prev)
return seen
def headsetofconnecteds(self, ixs):
if not ixs:
return set()
rlog = self._revlog
idx = rlog.index
headrevs = set(ixs)
for rev in ixs:
revdata = idx[rev]
for i in [5, 6]:
prev = revdata[i]
if prev != nullrev:
headrevs.discard(prev)
assert headrevs
return headrevs
def linearize(self, ixs):
'''linearize and topologically sort a list of revisions
The linearization process tries to create long runs of revs where
a child rev comes immediately after its first parent. This is done by
visiting the heads of the given revs in inverse topological order,
and for each visited rev, visiting its second parent, then its first
parent, then adding the rev itself to the output list.
'''
sorted = []
visit = list(self.headsetofconnecteds(ixs))
visit.sort(reverse=True)
finished = set()
while visit:
cur = visit.pop()
if cur < 0:
cur = -cur - 1
if cur not in finished:
sorted.append(cur)
finished.add(cur)
else:
visit.append(-cur - 1)
visit += [p for p in self.parents(cur)
if p in ixs and p not in finished]
assert len(sorted) == len(ixs)
return sorted
class inverserevlogdag(revlogbaseddag, genericdag):
'''inverse of an existing revlog dag; see revlogdag.inverse()'''
def __init__(self, orig):
revlogbaseddag.__init__(self, orig._revlog, orig._nodeset)
self._orig = orig
self._children = {}
self._roots = []
self._walkfrom = len(self._revlog) - 1
def _walkto(self, walkto):
rev = self._walkfrom
cs = self._children
roots = self._roots
idx = self._revlog.index
while rev >= walkto:
data = idx[rev]
isroot = True
for prev in [data[5], data[6]]: # parent revs
if prev != nullrev:
cs.setdefault(prev, []).append(rev)
isroot = False
if isroot:
roots.append(rev)
rev -= 1
self._walkfrom = rev
def _getheads(self):
self._walkto(nullrev)
return self._roots
def parents(self, ix):
if ix is None:
return []
if ix <= self._walkfrom:
self._walkto(ix)
return self._children.get(ix, [])
def inverse(self):
return self._orig
|
iaddict/mercurial.rb
|
vendor/mercurial/mercurial/dagutil.py
|
Python
|
mit
| 8,237
|
[
"VisIt"
] |
ad997157fe91254bec6c2d41e855a34f3abc1fe67f8c8972357d058fbb6e2867
|
################################################################################
# #
# Copyright (C) 2010-2018 The ESPResSo project #
# #
# This file is part of ESPResSo. #
# #
# ESPResSo is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# ESPResSo is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
# #
# Active Matter: Swimmer Flow Field Tutorial #
# #
##########################################################################
import numpy as np
import os
import sys
import espressomd
from espressomd import assert_features, lb
assert_features(["ENGINE", "CUDA", "MASS", "ROTATION", "ROTATIONAL_INERTIA"])
# Read in the hydrodynamic type (pusher/puller) and position
if len(sys.argv) != 3:
print("Usage:", sys.argv[0], "<type> <pos>")
exit()
mode = sys.argv[1]
pos = float(sys.argv[2])
##########################################################################
outdir = "./RESULTS_FLOW_FIELD/T_{}_P_{}/".format(mode, pos)
try:
os.makedirs(outdir)
except:
print("INFO: Directory \"{}\" exists".format(outdir))
# System parameters
length = 25.0
prod_steps = 1000
prod_length = 50
dt = 0.01
system = espressomd.System(box_l=[length, length, length])
system.cell_system.skin = 0.3
system.time_step = dt
system.min_global_cut = 1.0
##########################################################################
# Set the position of the particle
x0 = 0.5 * length
y0 = 0.5 * length
z0 = 0.5 * length + pos
# Sphere size, mass, and moment of inertia, dipole force
sph_size = 0.5
sph_mass = 4.8
Ixyz = 4.8
force = 0.1
# Setup the particle
system.part.add(
pos=[x0, y0, z0], type=0, mass=sph_mass, rinertia=[Ixyz, Ixyz, Ixyz],
swimming={'f_swim': force, 'mode': mode, 'dipole_length': sph_size + 0.5})
##########################################################################
# Setup the fluid (quiescent)
agrid = 1
vskin = 0.1
frict = 20.0
visco = 1.0
densi = 1.0
lbf = lb.LBFluidGPU(agrid=agrid, dens=densi,
visc=visco, tau=dt)
system.actors.add(lbf)
system.thermostat.set_lb(LB_fluid=lbf, gamma=frict, seed=42)
##########################################################################
# Output the coordinates
with open("{}/trajectory.dat".format(outdir), 'w') as outfile:
print("####################################################", file=outfile)
print("# time position velocity #", file=outfile)
print("####################################################", file=outfile)
# Production run
for k in range(prod_steps):
if (k + 1) % 10 == 0:
print('\rprogress: %.0f%%' % ((k + 1) * 100. / prod_steps), end='')
sys.stdout.flush()
# Output quantities
print("{time} {pos[0]} {pos[1]} {pos[2]} {vel[0]} {vel[1]} {vel[2]}"
.format(time=system.time, pos=system.part[0].pos, vel=system.part[0].v),
file=outfile)
# Output 50 simulations
if k % (prod_steps / 50) == 0:
num = k // (prod_steps // 50)
lbf.print_vtk_velocity("{}/lb_velocity_{}.vtk".format(outdir, num))
system.part.writevtk(
"{}/position_{}.vtk".format(outdir, num), types=[0])
system.integrator.run(prod_length)
print()
|
mkuron/espresso
|
doc/tutorials/06-active_matter/SOLUTIONS/flow_field.py
|
Python
|
gpl-3.0
| 4,745
|
[
"ESPResSo",
"VTK"
] |
9f652d9677a45d8fde595ca83b94ed2773a6b5b0c16f946444bc7311d9684519
|
# -*- coding: utf-8 -*-
## does summation of echoes using external python script
import sys
import os
import os.path
import subprocess
import JTutils
dataset = CURDATA()
LB = GETPAR("1 LB")
GB = GETPAR("LB")
s = GETPAR("USERP1")
res = INPUT_DIALOG("processing parameters",
"""
please provide :
the gaussian broadening (GB) applyied
the slope for the gaussian center shift (1 for SQSQ or 2 for DQSQ for example).
Should be followed by xf2;xf1 (not xfb as some bug may arise related to F1 apodization)
""",
["GB=","slope"],[GB,s])
(GB,s) = (res[0],res[1])
PUTPAR("LB",GB)
PUTPAR("USERP1",s)
fulldataPATH = JTutils.fullpath(dataset)
opt_args = " -g %s -l %s -s %s" % (GB,"0",s)
JTutils.run_CpyBin_script('apod2DEAE_.py', opt_args.split()+[fulldataPATH])
RE(dataset)
|
jtrebosc/JTutils
|
TSpy/apod2DEAE.py
|
Python
|
bsd-3-clause
| 807
|
[
"Gaussian"
] |
4765439465a6151e46b921efeda68533e0f6df62a5f7450e1b23fcf44ec95242
|
class Item:
def __init__(self, item_id, item_name):
self.id = item_id
self.name = item_name
class Craftables
def __init__(self):
pass
cord_of_winsome_sorrows = Item(119336, "Cord of Winsome Sorrows")
everburning_candle = Item(118880, "Everburning Candle")
savage_blood = Item(118472, "Savage Blood")
sorcerous_air = Item(113264, "Sorcerous Air")
sorcerous_earth = Item(113263, "Sorcerous Earth")
sorcerous_water = Item(113262, "Sorcerous Water")
sorcerous_fire = Item(113261, "Sorcerous Fire")
temporal_crystal = Item(113588, "Temporal Crystal")
luminous_shard = Item(111245, "Luminous Shard")
draenic_dust = Item(109693, "Draenic Dust")
blackrock_ore = Item(109118, "Blackrock Ore")
true_iron_ore = Item(109119, "True Iron Ore")
raw_talbuk_meat = Item(109132, "Raw Talbuk Meat")
rylak_egg = Item(109133, "Rylak Egg")
raw_elekk_meat = Item(109134, "Raw Elekk Meat")
raw_riverbeast_meat = Item(109135, "Raw Riverbeast Meat")
raw_boar_meat = Item(109136, "Raw Boar Meat")
# buy @ like 30g?
imperfect_draenethyst_fragment = Item(10593, "Imperfect Draenethyst Fragment")
flawless_draenethyst_sphere = Item(8244, "Flawless Draenethyst Sphere")
talador_orchid = Item(109129, "Talador Orchid")
nagrand_arrowbloom = Item(109128, "Nagrand Arrowbloom")
starflower = Item(109127, "Starflower")
gorgrond_flytrap = Item(109126, "Gorgrond Flytrap")
fireweed = Item(109125, "Fireweed")
frostweed = Item(109124, "Frostweed")
draenor_boe_epic_ids = [113593,113602,113610,113632,113865,113882,113932,113959,118808,118810,118812,118814,118816,118840,118842,118844,118846,118848,118851,118852,118862,118864,118866,118868,118870,118872,118874,118876,118878,118880,118882,118884,118885,118886,118887,118888,118889,118890,118891,118892,118893,118894,118895,118896,119331,119332,119333,119334,119335,119336,119337,119338,119339,119340,119341,119342,119343,119344,119345,119346,119347,120077]
draenor_herbs = [talador_orchid, nagrand_arrowbloom, starflower, gorgrond_flytrap, fireweed, frostweed]
draenor_ores = [blackrock_ore.id, true_iron_ore.id]
draenor_enchanting = [temporal_crystal.id, luminous_shard.id, draenic_dust.id]
sorcerous = [c.id for c in [sorcerous_air, sorcerous_earth, sorcerous_fire, sorcerous_water]]
|
trigunshin/tsm
|
items.py
|
Python
|
mit
| 2,216
|
[
"CRYSTAL"
] |
efd326320dc4b3ee9aa470da62c1080552346e59bd8ff32e19805b12f4e6a4cb
|
# (C) British Crown Copyright 2017 - 2018, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Routines for lazy data handling.
To avoid replicating implementation-dependent test and conversion code.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
from functools import wraps
import dask
import dask.array as da
import dask.context
from dask.local import get_sync as dget_sync
import numpy as np
import numpy.ma as ma
def non_lazy(func):
"""
Turn a lazy function into a function that returns a result immediately.
"""
@wraps(func)
def inner(*args, **kwargs):
"""Immediately return the results of a lazy function."""
result = func(*args, **kwargs)
return dask.compute(result)[0]
return inner
def is_lazy_data(data):
"""
Return whether the argument is an Iris 'lazy' data array.
At present, this means simply a Dask array.
We determine this by checking for a "compute" property.
"""
result = hasattr(data, 'compute')
return result
# A magic value, chosen to minimise chunk creation time and chunk processing
# time within dask.
_MAX_CHUNK_SIZE = 8 * 1024 * 1024 * 2
def _limited_shape(shape):
# Reduce a shape to less than a default overall number-of-points, reducing
# earlier dimensions preferentially.
# Note: this is only a heuristic, assuming that earlier dimensions are
# 'outer' storage dimensions -- not *always* true, even for NetCDF data.
shape = list(shape)
i_reduce = 0
while np.prod(shape) > _MAX_CHUNK_SIZE:
factor = np.ceil(np.prod(shape) / _MAX_CHUNK_SIZE)
new_dim = int(shape[i_reduce] / factor)
if new_dim < 1:
new_dim = 1
shape[i_reduce] = new_dim
i_reduce += 1
return tuple(shape)
def as_lazy_data(data, chunks=None, asarray=False):
"""
Convert the input array `data` to a dask array.
Args:
* data:
An array. This will be converted to a dask array.
Kwargs:
* chunks:
Describes how the created dask array should be split up. Defaults to a
value first defined in biggus (being `8 * 1024 * 1024 * 2`).
For more information see
http://dask.pydata.org/en/latest/array-creation.html#chunks.
* asarray:
If True, then chunks will be converted to instances of `ndarray`.
Set to False (default) to pass passed chunks through unchanged.
Returns:
The input array converted to a dask array.
"""
if chunks is None:
# Default to the shape of the wrapped array-like,
# but reduce it if larger than a default maximum size.
chunks = _limited_shape(data.shape)
if isinstance(data, ma.core.MaskedConstant):
data = ma.masked_array(data.data, mask=data.mask)
if not is_lazy_data(data):
data = da.from_array(data, chunks=chunks, asarray=asarray)
return data
def _co_realise_lazy_arrays(arrays):
"""
Compute multiple lazy arrays and return a list of real values.
All the arrays are computed together, so they can share results for common
graph elements.
Casts all results with `np.asanyarray`, and converts any MaskedConstants
appearing into masked arrays, to ensure that all return values are
writeable NumPy array objects.
Any non-lazy arrays are passed through, as they are by `da.compute`.
They undergo the same result standardisation.
"""
computed_arrays = da.compute(*arrays)
results = []
for lazy_in, real_out in zip(arrays, computed_arrays):
# Ensure we always have arrays.
# Note : in some cases dask (and numpy) will return a scalar
# numpy.int/numpy.float object rather than an ndarray.
# Recorded in https://github.com/dask/dask/issues/2111.
real_out = np.asanyarray(real_out)
if isinstance(real_out, ma.core.MaskedConstant):
# Convert any masked constants into NumPy masked arrays.
# NOTE: in this case, also apply the original lazy-array dtype, as
# masked constants *always* have dtype float64.
real_out = ma.masked_array(real_out.data, mask=real_out.mask,
dtype=lazy_in.dtype)
results.append(real_out)
return results
def as_concrete_data(data):
"""
Return the actual content of a lazy array, as a numpy array.
If the input data is a NumPy `ndarray` or masked array, return it
unchanged.
If the input data is lazy, return the realised result.
Args:
* data:
A dask array, NumPy `ndarray` or masked array
Returns:
A NumPy `ndarray` or masked array.
"""
if is_lazy_data(data):
data, = _co_realise_lazy_arrays([data])
return data
def multidim_lazy_stack(stack):
"""
Recursively build a multidimensional stacked dask array.
This is needed because dask.array.stack only accepts a 1-dimensional list.
Args:
* stack:
An ndarray of dask arrays.
Returns:
The input array converted to a lazy dask array.
"""
if stack.ndim == 0:
# A 0-d array cannot be stacked.
result = stack.item()
elif stack.ndim == 1:
# Another base case : simple 1-d goes direct in dask.
result = da.stack(list(stack))
else:
# Recurse because dask.stack does not do multi-dimensional.
result = da.stack([multidim_lazy_stack(subarray)
for subarray in stack])
return result
def co_realise_cubes(*cubes):
"""
Fetch 'real' data for multiple cubes, in a shared calculation.
This computes any lazy data, equivalent to accessing each `cube.data`.
However, lazy calculations and data fetches can be shared between the
computations, improving performance.
Args:
* cubes (list of :class:`~iris.cube.Cube`):
Arguments, each of which is a cube to be realised.
For example::
# Form stats.
a_std = cube_a.collapsed(['x', 'y'], iris.analysis.STD_DEV)
b_std = cube_b.collapsed(['x', 'y'], iris.analysis.STD_DEV)
ab_mean_diff = (cube_b - cube_a).collapsed(['x', 'y'],
iris.analysis.MEAN)
std_err = (a_std * a_std + b_std * b_std) ** 0.5
# Compute stats together (to avoid multiple data passes).
co_realise_cubes(a_std, b_std, ab_mean_diff, std_err)
.. Note::
Cubes with non-lazy data may also be passed, with no ill effect.
"""
results = _co_realise_lazy_arrays([cube.core_data() for cube in cubes])
for cube, result in zip(cubes, results):
cube.data = result
def lazy_elementwise(lazy_array, elementwise_op):
"""
Apply a (numpy-style) elementwise array operation to a lazy array.
Elementwise means that it performs a independent calculation at each point
of the input, producing a result array of the same shape.
Args:
* lazy_array:
The lazy array object to operate on.
* elementwise_op:
The elementwise operation, a function operating on numpy arrays.
.. note:
A single-point "dummy" call is made to the operation function, to
determine dtype of the result.
This return dtype must be stable in actual operation (!)
"""
# This is just a wrapper to provide an Iris-specific abstraction for a
# lazy operation in Dask (map_blocks).
# Explicitly determine the return type with a dummy call.
# This makes good practical sense for unit conversions, as a Unit.convert
# call may cast to float, or not, depending on unit equality : Thus, it's
# much safer to get udunits to decide that for us.
dtype = elementwise_op(np.zeros(1, lazy_array.dtype)).dtype
return da.map_blocks(elementwise_op, lazy_array, dtype=dtype)
|
dkillick/iris
|
lib/iris/_lazy_data.py
|
Python
|
lgpl-3.0
| 8,548
|
[
"NetCDF"
] |
08206cb51c74f1bdc1a275158c8d08f24401ae4058e3e432fd1526b6d5c3d56f
|
# coding: utf-8
from __future__ import unicode_literals
import re
import time
import xml.etree.ElementTree as etree
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
unescapeHTML,
urlencode_postdata,
unified_timestamp,
ExtractorError,
)
MSO_INFO = {
'DTV': {
'name': 'DIRECTV',
'username_field': 'username',
'password_field': 'password',
},
'Rogers': {
'name': 'Rogers',
'username_field': 'UserName',
'password_field': 'UserPassword',
},
'Comcast_SSO': {
'name': 'Comcast XFINITY',
'username_field': 'user',
'password_field': 'passwd',
},
'TWC': {
'name': 'Time Warner Cable | Spectrum',
'username_field': 'Ecom_User_ID',
'password_field': 'Ecom_Password',
},
'Charter_Direct': {
'name': 'Charter Spectrum',
'username_field': 'IDToken1',
'password_field': 'IDToken2',
},
'Verizon': {
'name': 'Verizon FiOS',
'username_field': 'IDToken1',
'password_field': 'IDToken2',
},
'thr030': {
'name': '3 Rivers Communications'
},
'com140': {
'name': 'Access Montana'
},
'acecommunications': {
'name': 'AcenTek'
},
'acm010': {
'name': 'Acme Communications'
},
'ada020': {
'name': 'Adams Cable Service'
},
'alb020': {
'name': 'Albany Mutual Telephone'
},
'algona': {
'name': 'Algona Municipal Utilities'
},
'allwest': {
'name': 'All West Communications'
},
'all025': {
'name': 'Allen\'s Communications'
},
'spl010': {
'name': 'Alliance Communications'
},
'all070': {
'name': 'ALLO Communications'
},
'alpine': {
'name': 'Alpine Communications'
},
'hun015': {
'name': 'American Broadband'
},
'nwc010': {
'name': 'American Broadband Missouri'
},
'com130-02': {
'name': 'American Community Networks'
},
'com130-01': {
'name': 'American Warrior Networks'
},
'tom020': {
'name': 'Amherst Telephone/Tomorrow Valley'
},
'tvc020': {
'name': 'Andycable'
},
'arkwest': {
'name': 'Arkwest Communications'
},
'art030': {
'name': 'Arthur Mutual Telephone Company'
},
'arvig': {
'name': 'Arvig'
},
'nttcash010': {
'name': 'Ashland Home Net'
},
'astound': {
'name': 'Astound (now Wave)'
},
'dix030': {
'name': 'ATC Broadband'
},
'ara010': {
'name': 'ATC Communications'
},
'she030-02': {
'name': 'Ayersville Communications'
},
'baldwin': {
'name': 'Baldwin Lightstream'
},
'bal040': {
'name': 'Ballard TV'
},
'cit025': {
'name': 'Bardstown Cable TV'
},
'bay030': {
'name': 'Bay Country Communications'
},
'tel095': {
'name': 'Beaver Creek Cooperative Telephone'
},
'bea020': {
'name': 'Beaver Valley Cable'
},
'bee010': {
'name': 'Bee Line Cable'
},
'wir030': {
'name': 'Beehive Broadband'
},
'bra020': {
'name': 'BELD'
},
'bel020': {
'name': 'Bellevue Municipal Cable'
},
'vol040-01': {
'name': 'Ben Lomand Connect / BLTV'
},
'bev010': {
'name': 'BEVCOMM'
},
'big020': {
'name': 'Big Sandy Broadband'
},
'ble020': {
'name': 'Bledsoe Telephone Cooperative'
},
'bvt010': {
'name': 'Blue Valley Tele-Communications'
},
'bra050': {
'name': 'Brandenburg Telephone Co.'
},
'bte010': {
'name': 'Bristol Tennessee Essential Services'
},
'annearundel': {
'name': 'Broadstripe'
},
'btc010': {
'name': 'BTC Communications'
},
'btc040': {
'name': 'BTC Vision - Nahunta'
},
'bul010': {
'name': 'Bulloch Telephone Cooperative'
},
'but010': {
'name': 'Butler-Bremer Communications'
},
'tel160-csp': {
'name': 'C Spire SNAP'
},
'csicable': {
'name': 'Cable Services Inc.'
},
'cableamerica': {
'name': 'CableAmerica'
},
'cab038': {
'name': 'CableSouth Media 3'
},
'weh010-camtel': {
'name': 'Cam-Tel Company'
},
'car030': {
'name': 'Cameron Communications'
},
'canbytel': {
'name': 'Canby Telcom'
},
'crt020': {
'name': 'CapRock Tv'
},
'car050': {
'name': 'Carnegie Cable'
},
'cas': {
'name': 'CAS Cable'
},
'casscomm': {
'name': 'CASSCOMM'
},
'mid180-02': {
'name': 'Catalina Broadband Solutions'
},
'cccomm': {
'name': 'CC Communications'
},
'nttccde010': {
'name': 'CDE Lightband'
},
'cfunet': {
'name': 'Cedar Falls Utilities'
},
'dem010-01': {
'name': 'Celect-Bloomer Telephone Area'
},
'dem010-02': {
'name': 'Celect-Bruce Telephone Area'
},
'dem010-03': {
'name': 'Celect-Citizens Connected Area'
},
'dem010-04': {
'name': 'Celect-Elmwood/Spring Valley Area'
},
'dem010-06': {
'name': 'Celect-Mosaic Telecom'
},
'dem010-05': {
'name': 'Celect-West WI Telephone Area'
},
'net010-02': {
'name': 'Cellcom/Nsight Telservices'
},
'cen100': {
'name': 'CentraCom'
},
'nttccst010': {
'name': 'Central Scott / CSTV'
},
'cha035': {
'name': 'Chaparral CableVision'
},
'cha050': {
'name': 'Chariton Valley Communication Corporation, Inc.'
},
'cha060': {
'name': 'Chatmoss Cablevision'
},
'nttcche010': {
'name': 'Cherokee Communications'
},
'che050': {
'name': 'Chesapeake Bay Communications'
},
'cimtel': {
'name': 'Cim-Tel Cable, LLC.'
},
'cit180': {
'name': 'Citizens Cablevision - Floyd, VA'
},
'cit210': {
'name': 'Citizens Cablevision, Inc.'
},
'cit040': {
'name': 'Citizens Fiber'
},
'cit250': {
'name': 'Citizens Mutual'
},
'war040': {
'name': 'Citizens Telephone Corporation'
},
'wat025': {
'name': 'City Of Monroe'
},
'wadsworth': {
'name': 'CityLink'
},
'nor100': {
'name': 'CL Tel'
},
'cla010': {
'name': 'Clarence Telephone and Cedar Communications'
},
'ser060': {
'name': 'Clear Choice Communications'
},
'tac020': {
'name': 'Click! Cable TV'
},
'war020': {
'name': 'CLICK1.NET'
},
'cml010': {
'name': 'CML Telephone Cooperative Association'
},
'cns': {
'name': 'CNS'
},
'com160': {
'name': 'Co-Mo Connect'
},
'coa020': {
'name': 'Coast Communications'
},
'coa030': {
'name': 'Coaxial Cable TV'
},
'mid055': {
'name': 'Cobalt TV (Mid-State Community TV)'
},
'col070': {
'name': 'Columbia Power & Water Systems'
},
'col080': {
'name': 'Columbus Telephone'
},
'nor105': {
'name': 'Communications 1 Cablevision, Inc.'
},
'com150': {
'name': 'Community Cable & Broadband'
},
'com020': {
'name': 'Community Communications Company'
},
'coy010': {
'name': 'commZoom'
},
'com025': {
'name': 'Complete Communication Services'
},
'cat020': {
'name': 'Comporium'
},
'com071': {
'name': 'ComSouth Telesys'
},
'consolidatedcable': {
'name': 'Consolidated'
},
'conwaycorp': {
'name': 'Conway Corporation'
},
'coo050': {
'name': 'Coon Valley Telecommunications Inc'
},
'coo080': {
'name': 'Cooperative Telephone Company'
},
'cpt010': {
'name': 'CP-TEL'
},
'cra010': {
'name': 'Craw-Kan Telephone'
},
'crestview': {
'name': 'Crestview Cable Communications'
},
'cross': {
'name': 'Cross TV'
},
'cro030': {
'name': 'Crosslake Communications'
},
'ctc040': {
'name': 'CTC - Brainerd MN'
},
'phe030': {
'name': 'CTV-Beam - East Alabama'
},
'cun010': {
'name': 'Cunningham Telephone & Cable'
},
'dpc010': {
'name': 'D & P Communications'
},
'dak030': {
'name': 'Dakota Central Telecommunications'
},
'nttcdel010': {
'name': 'Delcambre Telephone LLC'
},
'tel160-del': {
'name': 'Delta Telephone Company'
},
'sal040': {
'name': 'DiamondNet'
},
'ind060-dc': {
'name': 'Direct Communications'
},
'doy010': {
'name': 'Doylestown Cable TV'
},
'dic010': {
'name': 'DRN'
},
'dtc020': {
'name': 'DTC'
},
'dtc010': {
'name': 'DTC Cable (Delhi)'
},
'dum010': {
'name': 'Dumont Telephone Company'
},
'dun010': {
'name': 'Dunkerton Telephone Cooperative'
},
'cci010': {
'name': 'Duo County Telecom'
},
'eagle': {
'name': 'Eagle Communications'
},
'weh010-east': {
'name': 'East Arkansas Cable TV'
},
'eatel': {
'name': 'EATEL Video, LLC'
},
'ell010': {
'name': 'ECTA'
},
'emerytelcom': {
'name': 'Emery Telcom Video LLC'
},
'nor200': {
'name': 'Empire Access'
},
'endeavor': {
'name': 'Endeavor Communications'
},
'sun045': {
'name': 'Enhanced Telecommunications Corporation'
},
'mid030': {
'name': 'enTouch'
},
'epb020': {
'name': 'EPB Smartnet'
},
'jea010': {
'name': 'EPlus Broadband'
},
'com065': {
'name': 'ETC'
},
'ete010': {
'name': 'Etex Communications'
},
'fbc-tele': {
'name': 'F&B Communications'
},
'fal010': {
'name': 'Falcon Broadband'
},
'fam010': {
'name': 'FamilyView CableVision'
},
'far020': {
'name': 'Farmers Mutual Telephone Company'
},
'fay010': {
'name': 'Fayetteville Public Utilities'
},
'sal060': {
'name': 'fibrant'
},
'fid010': {
'name': 'Fidelity Communications'
},
'for030': {
'name': 'FJ Communications'
},
'fli020': {
'name': 'Flint River Communications'
},
'far030': {
'name': 'FMT - Jesup'
},
'foo010': {
'name': 'Foothills Communications'
},
'for080': {
'name': 'Forsyth CableNet'
},
'fbcomm': {
'name': 'Frankfort Plant Board'
},
'tel160-fra': {
'name': 'Franklin Telephone Company'
},
'nttcftc010': {
'name': 'FTC'
},
'fullchannel': {
'name': 'Full Channel, Inc.'
},
'gar040': {
'name': 'Gardonville Cooperative Telephone Association'
},
'gbt010': {
'name': 'GBT Communications, Inc.'
},
'tec010': {
'name': 'Genuine Telecom'
},
'clr010': {
'name': 'Giant Communications'
},
'gla010': {
'name': 'Glasgow EPB'
},
'gle010': {
'name': 'Glenwood Telecommunications'
},
'gra060': {
'name': 'GLW Broadband Inc.'
},
'goldenwest': {
'name': 'Golden West Cablevision'
},
'vis030': {
'name': 'Grantsburg Telcom'
},
'gpcom': {
'name': 'Great Plains Communications'
},
'gri010': {
'name': 'Gridley Cable Inc'
},
'hbc010': {
'name': 'H&B Cable Services'
},
'hae010': {
'name': 'Haefele TV Inc.'
},
'htc010': {
'name': 'Halstad Telephone Company'
},
'har005': {
'name': 'Harlan Municipal Utilities'
},
'har020': {
'name': 'Hart Communications'
},
'ced010': {
'name': 'Hartelco TV'
},
'hea040': {
'name': 'Heart of Iowa Communications Cooperative'
},
'htc020': {
'name': 'Hickory Telephone Company'
},
'nttchig010': {
'name': 'Highland Communication Services'
},
'hig030': {
'name': 'Highland Media'
},
'spc010': {
'name': 'Hilliary Communications'
},
'hin020': {
'name': 'Hinton CATV Co.'
},
'hometel': {
'name': 'HomeTel Entertainment, Inc.'
},
'hoodcanal': {
'name': 'Hood Canal Communications'
},
'weh010-hope': {
'name': 'Hope - Prescott Cable TV'
},
'horizoncable': {
'name': 'Horizon Cable TV, Inc.'
},
'hor040': {
'name': 'Horizon Chillicothe Telephone'
},
'htc030': {
'name': 'HTC Communications Co. - IL'
},
'htccomm': {
'name': 'HTC Communications, Inc. - IA'
},
'wal005': {
'name': 'Huxley Communications'
},
'imon': {
'name': 'ImOn Communications'
},
'ind040': {
'name': 'Independence Telecommunications'
},
'rrc010': {
'name': 'Inland Networks'
},
'stc020': {
'name': 'Innovative Cable TV St Croix'
},
'car100': {
'name': 'Innovative Cable TV St Thomas-St John'
},
'icc010': {
'name': 'Inside Connect Cable'
},
'int100': {
'name': 'Integra Telecom'
},
'int050': {
'name': 'Interstate Telecommunications Coop'
},
'irv010': {
'name': 'Irvine Cable'
},
'k2c010': {
'name': 'K2 Communications'
},
'kal010': {
'name': 'Kalida Telephone Company, Inc.'
},
'kal030': {
'name': 'Kalona Cooperative Telephone Company'
},
'kmt010': {
'name': 'KMTelecom'
},
'kpu010': {
'name': 'KPU Telecommunications'
},
'kuh010': {
'name': 'Kuhn Communications, Inc.'
},
'lak130': {
'name': 'Lakeland Communications'
},
'lan010': {
'name': 'Langco'
},
'lau020': {
'name': 'Laurel Highland Total Communications, Inc.'
},
'leh010': {
'name': 'Lehigh Valley Cooperative Telephone'
},
'bra010': {
'name': 'Limestone Cable/Bracken Cable'
},
'loc020': {
'name': 'LISCO'
},
'lit020': {
'name': 'Litestream'
},
'tel140': {
'name': 'LivCom'
},
'loc010': {
'name': 'LocalTel Communications'
},
'weh010-longview': {
'name': 'Longview - Kilgore Cable TV'
},
'lon030': {
'name': 'Lonsdale Video Ventures, LLC'
},
'lns010': {
'name': 'Lost Nation-Elwood Telephone Co.'
},
'nttclpc010': {
'name': 'LPC Connect'
},
'lumos': {
'name': 'Lumos Networks'
},
'madison': {
'name': 'Madison Communications'
},
'mad030': {
'name': 'Madison County Cable Inc.'
},
'nttcmah010': {
'name': 'Mahaska Communication Group'
},
'mar010': {
'name': 'Marne & Elk Horn Telephone Company'
},
'mcc040': {
'name': 'McClure Telephone Co.'
},
'mctv': {
'name': 'MCTV'
},
'merrimac': {
'name': 'Merrimac Communications Ltd.'
},
'metronet': {
'name': 'Metronet'
},
'mhtc': {
'name': 'MHTC'
},
'midhudson': {
'name': 'Mid-Hudson Cable'
},
'midrivers': {
'name': 'Mid-Rivers Communications'
},
'mid045': {
'name': 'Midstate Communications'
},
'mil080': {
'name': 'Milford Communications'
},
'min030': {
'name': 'MINET'
},
'nttcmin010': {
'name': 'Minford TV'
},
'san040-02': {
'name': 'Mitchell Telecom'
},
'mlg010': {
'name': 'MLGC'
},
'mon060': {
'name': 'Mon-Cre TVE'
},
'mou110': {
'name': 'Mountain Telephone'
},
'mou050': {
'name': 'Mountain Village Cable'
},
'mtacomm': {
'name': 'MTA Communications, LLC'
},
'mtc010': {
'name': 'MTC Cable'
},
'med040': {
'name': 'MTC Technologies'
},
'man060': {
'name': 'MTCC'
},
'mtc030': {
'name': 'MTCO Communications'
},
'mul050': {
'name': 'Mulberry Telecommunications'
},
'mur010': {
'name': 'Murray Electric System'
},
'musfiber': {
'name': 'MUS FiberNET'
},
'mpw': {
'name': 'Muscatine Power & Water'
},
'nttcsli010': {
'name': 'myEVTV.com'
},
'nor115': {
'name': 'NCC'
},
'nor260': {
'name': 'NDTC'
},
'nctc': {
'name': 'Nebraska Central Telecom, Inc.'
},
'nel020': {
'name': 'Nelsonville TV Cable'
},
'nem010': {
'name': 'Nemont'
},
'new075': {
'name': 'New Hope Telephone Cooperative'
},
'nor240': {
'name': 'NICP'
},
'cic010': {
'name': 'NineStar Connect'
},
'nktelco': {
'name': 'NKTelco'
},
'nortex': {
'name': 'Nortex Communications'
},
'nor140': {
'name': 'North Central Telephone Cooperative'
},
'nor030': {
'name': 'Northland Communications'
},
'nor075': {
'name': 'Northwest Communications'
},
'nor125': {
'name': 'Norwood Light Broadband'
},
'net010': {
'name': 'Nsight Telservices'
},
'dur010': {
'name': 'Ntec'
},
'nts010': {
'name': 'NTS Communications'
},
'new045': {
'name': 'NU-Telecom'
},
'nulink': {
'name': 'NuLink'
},
'jam030': {
'name': 'NVC'
},
'far035': {
'name': 'OmniTel Communications'
},
'onesource': {
'name': 'OneSource Communications'
},
'cit230': {
'name': 'Opelika Power Services'
},
'daltonutilities': {
'name': 'OptiLink'
},
'mid140': {
'name': 'OPTURA'
},
'ote010': {
'name': 'OTEC Communication Company'
},
'cci020': {
'name': 'Packerland Broadband'
},
'pan010': {
'name': 'Panora Telco/Guthrie Center Communications'
},
'otter': {
'name': 'Park Region Telephone & Otter Tail Telcom'
},
'mid050': {
'name': 'Partner Communications Cooperative'
},
'fib010': {
'name': 'Pathway'
},
'paulbunyan': {
'name': 'Paul Bunyan Communications'
},
'pem020': {
'name': 'Pembroke Telephone Company'
},
'mck010': {
'name': 'Peoples Rural Telephone Cooperative'
},
'pul010': {
'name': 'PES Energize'
},
'phi010': {
'name': 'Philippi Communications System'
},
'phonoscope': {
'name': 'Phonoscope Cable'
},
'pin070': {
'name': 'Pine Belt Communications, Inc.'
},
'weh010-pine': {
'name': 'Pine Bluff Cable TV'
},
'pin060': {
'name': 'Pineland Telephone Cooperative'
},
'cam010': {
'name': 'Pinpoint Communications'
},
'pio060': {
'name': 'Pioneer Broadband'
},
'pioncomm': {
'name': 'Pioneer Communications'
},
'pioneer': {
'name': 'Pioneer DTV'
},
'pla020': {
'name': 'Plant TiftNet, Inc.'
},
'par010': {
'name': 'PLWC'
},
'pro035': {
'name': 'PMT'
},
'vik011': {
'name': 'Polar Cablevision'
},
'pottawatomie': {
'name': 'Pottawatomie Telephone Co.'
},
'premiercomm': {
'name': 'Premier Communications'
},
'psc010': {
'name': 'PSC'
},
'pan020': {
'name': 'PTCI'
},
'qco010': {
'name': 'QCOL'
},
'qua010': {
'name': 'Quality Cablevision'
},
'rad010': {
'name': 'Radcliffe Telephone Company'
},
'car040': {
'name': 'Rainbow Communications'
},
'rai030': {
'name': 'Rainier Connect'
},
'ral010': {
'name': 'Ralls Technologies'
},
'rct010': {
'name': 'RC Technologies'
},
'red040': {
'name': 'Red River Communications'
},
'ree010': {
'name': 'Reedsburg Utility Commission'
},
'mol010': {
'name': 'Reliance Connects- Oregon'
},
'res020': {
'name': 'Reserve Telecommunications'
},
'weh010-resort': {
'name': 'Resort TV Cable'
},
'rld010': {
'name': 'Richland Grant Telephone Cooperative, Inc.'
},
'riv030': {
'name': 'River Valley Telecommunications Coop'
},
'rockportcable': {
'name': 'Rock Port Cablevision'
},
'rsf010': {
'name': 'RS Fiber'
},
'rtc': {
'name': 'RTC Communication Corp'
},
'res040': {
'name': 'RTC-Reservation Telephone Coop.'
},
'rte010': {
'name': 'RTEC Communications'
},
'stc010': {
'name': 'S&T'
},
'san020': {
'name': 'San Bruno Cable TV'
},
'san040-01': {
'name': 'Santel'
},
'sav010': {
'name': 'SCI Broadband-Savage Communications Inc.'
},
'sco050': {
'name': 'Scottsboro Electric Power Board'
},
'scr010': {
'name': 'Scranton Telephone Company'
},
'selco': {
'name': 'SELCO'
},
'she010': {
'name': 'Shentel'
},
'she030': {
'name': 'Sherwood Mutual Telephone Association, Inc.'
},
'ind060-ssc': {
'name': 'Silver Star Communications'
},
'sjoberg': {
'name': 'Sjoberg\'s Inc.'
},
'sou025': {
'name': 'SKT'
},
'sky050': {
'name': 'SkyBest TV'
},
'nttcsmi010': {
'name': 'Smithville Communications'
},
'woo010': {
'name': 'Solarus'
},
'sou075': {
'name': 'South Central Rural Telephone Cooperative'
},
'sou065': {
'name': 'South Holt Cablevision, Inc.'
},
'sou035': {
'name': 'South Slope Cooperative Communications'
},
'spa020': {
'name': 'Spanish Fork Community Network'
},
'spe010': {
'name': 'Spencer Municipal Utilities'
},
'spi005': {
'name': 'Spillway Communications, Inc.'
},
'srt010': {
'name': 'SRT'
},
'cccsmc010': {
'name': 'St. Maarten Cable TV'
},
'sta025': {
'name': 'Star Communications'
},
'sco020': {
'name': 'STE'
},
'uin010': {
'name': 'STRATA Networks'
},
'sum010': {
'name': 'Sumner Cable TV'
},
'pie010': {
'name': 'Surry TV/PCSI TV'
},
'swa010': {
'name': 'Swayzee Communications'
},
'sweetwater': {
'name': 'Sweetwater Cable Television Co'
},
'weh010-talequah': {
'name': 'Tahlequah Cable TV'
},
'tct': {
'name': 'TCT'
},
'tel050': {
'name': 'Tele-Media Company'
},
'com050': {
'name': 'The Community Agency'
},
'thr020': {
'name': 'Three River'
},
'cab140': {
'name': 'Town & Country Technologies'
},
'tra010': {
'name': 'Trans-Video'
},
'tre010': {
'name': 'Trenton TV Cable Company'
},
'tcc': {
'name': 'Tri County Communications Cooperative'
},
'tri025': {
'name': 'TriCounty Telecom'
},
'tri110': {
'name': 'TrioTel Communications, Inc.'
},
'tro010': {
'name': 'Troy Cablevision, Inc.'
},
'tsc': {
'name': 'TSC'
},
'cit220': {
'name': 'Tullahoma Utilities Board'
},
'tvc030': {
'name': 'TV Cable of Rensselaer'
},
'tvc015': {
'name': 'TVC Cable'
},
'cab180': {
'name': 'TVision'
},
'twi040': {
'name': 'Twin Lakes'
},
'tvtinc': {
'name': 'Twin Valley'
},
'uis010': {
'name': 'Union Telephone Company'
},
'uni110': {
'name': 'United Communications - TN'
},
'uni120': {
'name': 'United Services'
},
'uss020': {
'name': 'US Sonet'
},
'cab060': {
'name': 'USA Communications'
},
'she005': {
'name': 'USA Communications/Shellsburg, IA'
},
'val040': {
'name': 'Valley TeleCom Group'
},
'val025': {
'name': 'Valley Telecommunications'
},
'val030': {
'name': 'Valparaiso Broadband'
},
'cla050': {
'name': 'Vast Broadband'
},
'sul015': {
'name': 'Venture Communications Cooperative, Inc.'
},
'ver025': {
'name': 'Vernon Communications Co-op'
},
'weh010-vicksburg': {
'name': 'Vicksburg Video'
},
'vis070': {
'name': 'Vision Communications'
},
'volcanotel': {
'name': 'Volcano Vision, Inc.'
},
'vol040-02': {
'name': 'VolFirst / BLTV'
},
'ver070': {
'name': 'VTel'
},
'nttcvtx010': {
'name': 'VTX1'
},
'bci010-02': {
'name': 'Vyve Broadband'
},
'wab020': {
'name': 'Wabash Mutual Telephone'
},
'waitsfield': {
'name': 'Waitsfield Cable'
},
'wal010': {
'name': 'Walnut Communications'
},
'wavebroadband': {
'name': 'Wave'
},
'wav030': {
'name': 'Waverly Communications Utility'
},
'wbi010': {
'name': 'WBI'
},
'web020': {
'name': 'Webster-Calhoun Cooperative Telephone Association'
},
'wes005': {
'name': 'West Alabama TV Cable'
},
'carolinata': {
'name': 'West Carolina Communications'
},
'wct010': {
'name': 'West Central Telephone Association'
},
'wes110': {
'name': 'West River Cooperative Telephone Company'
},
'ani030': {
'name': 'WesTel Systems'
},
'westianet': {
'name': 'Western Iowa Networks'
},
'nttcwhi010': {
'name': 'Whidbey Telecom'
},
'weh010-white': {
'name': 'White County Cable TV'
},
'wes130': {
'name': 'Wiatel'
},
'wik010': {
'name': 'Wiktel'
},
'wil070': {
'name': 'Wilkes Communications, Inc./RiverStreet Networks'
},
'wil015': {
'name': 'Wilson Communications'
},
'win010': {
'name': 'Windomnet/SMBS'
},
'win090': {
'name': 'Windstream Cable TV'
},
'wcta': {
'name': 'Winnebago Cooperative Telecom Association'
},
'wtc010': {
'name': 'WTC'
},
'wil040': {
'name': 'WTC Communications, Inc.'
},
'wya010': {
'name': 'Wyandotte Cable'
},
'hin020-02': {
'name': 'X-Stream Services'
},
'xit010': {
'name': 'XIT Communications'
},
'yel010': {
'name': 'Yelcot Communications'
},
'mid180-01': {
'name': 'yondoo'
},
'cou060': {
'name': 'Zito Media'
},
}
class AdobePassIE(InfoExtractor):
_SERVICE_PROVIDER_TEMPLATE = 'https://sp.auth.adobe.com/adobe-services/%s'
_USER_AGENT = 'Mozilla/5.0 (X11; Linux i686; rv:47.0) Gecko/20100101 Firefox/47.0'
_MVPD_CACHE = 'ap-mvpd'
def _download_webpage_handle(self, *args, **kwargs):
headers = kwargs.get('headers', {})
headers.update(self.geo_verification_headers())
kwargs['headers'] = headers
return super(AdobePassIE, self)._download_webpage_handle(*args, **kwargs)
@staticmethod
def _get_mvpd_resource(provider_id, title, guid, rating):
channel = etree.Element('channel')
channel_title = etree.SubElement(channel, 'title')
channel_title.text = provider_id
item = etree.SubElement(channel, 'item')
resource_title = etree.SubElement(item, 'title')
resource_title.text = title
resource_guid = etree.SubElement(item, 'guid')
resource_guid.text = guid
resource_rating = etree.SubElement(item, 'media:rating')
resource_rating.attrib = {'scheme': 'urn:v-chip'}
resource_rating.text = rating
return '<rss version="2.0" xmlns:media="http://search.yahoo.com/mrss/">' + etree.tostring(channel).decode() + '</rss>'
def _extract_mvpd_auth(self, url, video_id, requestor_id, resource):
def xml_text(xml_str, tag):
return self._search_regex(
'<%s>(.+?)</%s>' % (tag, tag), xml_str, tag)
def is_expired(token, date_ele):
token_expires = unified_timestamp(re.sub(r'[_ ]GMT', '', xml_text(token, date_ele)))
return token_expires and token_expires <= int(time.time())
def post_form(form_page_res, note, data={}):
form_page, urlh = form_page_res
post_url = self._html_search_regex(r'<form[^>]+action=(["\'])(?P<url>.+?)\1', form_page, 'post url', group='url')
if not re.match(r'https?://', post_url):
post_url = compat_urlparse.urljoin(urlh.geturl(), post_url)
form_data = self._hidden_inputs(form_page)
form_data.update(data)
return self._download_webpage_handle(
post_url, video_id, note, data=urlencode_postdata(form_data), headers={
'Content-Type': 'application/x-www-form-urlencoded',
})
def raise_mvpd_required():
raise ExtractorError(
'This video is only available for users of participating TV providers. '
'Use --ap-mso to specify Adobe Pass Multiple-system operator Identifier '
'and --ap-username and --ap-password or --netrc to provide account credentials.', expected=True)
mvpd_headers = {
'ap_42': 'anonymous',
'ap_11': 'Linux i686',
'ap_z': self._USER_AGENT,
'User-Agent': self._USER_AGENT,
}
guid = xml_text(resource, 'guid') if '<' in resource else resource
count = 0
while count < 2:
requestor_info = self._downloader.cache.load(self._MVPD_CACHE, requestor_id) or {}
authn_token = requestor_info.get('authn_token')
if authn_token and is_expired(authn_token, 'simpleTokenExpires'):
authn_token = None
if not authn_token:
# TODO add support for other TV Providers
mso_id = self._downloader.params.get('ap_mso')
if not mso_id:
raise_mvpd_required()
username, password = self._get_login_info('ap_username', 'ap_password', mso_id)
if not username or not password:
raise_mvpd_required()
mso_info = MSO_INFO[mso_id]
provider_redirect_page_res = self._download_webpage_handle(
self._SERVICE_PROVIDER_TEMPLATE % 'authenticate/saml', video_id,
'Downloading Provider Redirect Page', query={
'noflash': 'true',
'mso_id': mso_id,
'requestor_id': requestor_id,
'no_iframe': 'false',
'domain_name': 'adobe.com',
'redirect_url': url,
})
if mso_id == 'Comcast_SSO':
# Comcast page flow varies by video site and whether you
# are on Comcast's network.
provider_redirect_page, urlh = provider_redirect_page_res
if 'automatically signing you in' in provider_redirect_page:
oauth_redirect_url = self._html_search_regex(
r'window\.location\s*=\s*[\'"]([^\'"]+)',
provider_redirect_page, 'oauth redirect')
self._download_webpage(
oauth_redirect_url, video_id, 'Confirming auto login')
else:
if '<form name="signin"' in provider_redirect_page:
provider_login_page_res = provider_redirect_page_res
elif 'http-equiv="refresh"' in provider_redirect_page:
oauth_redirect_url = self._html_search_regex(
r'content="0;\s*url=([^\'"]+)',
provider_redirect_page, 'meta refresh redirect')
provider_login_page_res = self._download_webpage_handle(
oauth_redirect_url, video_id,
'Downloading Provider Login Page')
else:
provider_login_page_res = post_form(
provider_redirect_page_res,
'Downloading Provider Login Page')
mvpd_confirm_page_res = post_form(
provider_login_page_res, 'Logging in', {
mso_info['username_field']: username,
mso_info['password_field']: password,
})
mvpd_confirm_page, urlh = mvpd_confirm_page_res
if '<button class="submit" value="Resume">Resume</button>' in mvpd_confirm_page:
post_form(mvpd_confirm_page_res, 'Confirming Login')
elif mso_id == 'Verizon':
# In general, if you're connecting from a Verizon-assigned IP,
# you will not actually pass your credentials.
provider_redirect_page, urlh = provider_redirect_page_res
if 'Please wait ...' in provider_redirect_page:
saml_redirect_url = self._html_search_regex(
r'self\.parent\.location=(["\'])(?P<url>.+?)\1',
provider_redirect_page,
'SAML Redirect URL', group='url')
saml_login_page = self._download_webpage(
saml_redirect_url, video_id,
'Downloading SAML Login Page')
else:
saml_login_page_res = post_form(
provider_redirect_page_res, 'Logging in', {
mso_info['username_field']: username,
mso_info['password_field']: password,
})
saml_login_page, urlh = saml_login_page_res
if 'Please try again.' in saml_login_page:
raise ExtractorError(
'We\'re sorry, but either the User ID or Password entered is not correct.')
saml_login_url = self._search_regex(
r'xmlHttp\.open\("POST"\s*,\s*(["\'])(?P<url>.+?)\1',
saml_login_page, 'SAML Login URL', group='url')
saml_response_json = self._download_json(
saml_login_url, video_id, 'Downloading SAML Response',
headers={'Content-Type': 'text/xml'})
self._download_webpage(
saml_response_json['targetValue'], video_id,
'Confirming Login', data=urlencode_postdata({
'SAMLResponse': saml_response_json['SAMLResponse'],
'RelayState': saml_response_json['RelayState']
}), headers={
'Content-Type': 'application/x-www-form-urlencoded'
})
else:
provider_login_page_res = post_form(
provider_redirect_page_res, 'Downloading Provider Login Page')
mvpd_confirm_page_res = post_form(provider_login_page_res, 'Logging in', {
mso_info.get('username_field', 'username'): username,
mso_info.get('password_field', 'password'): password,
})
if mso_id != 'Rogers':
post_form(mvpd_confirm_page_res, 'Confirming Login')
session = self._download_webpage(
self._SERVICE_PROVIDER_TEMPLATE % 'session', video_id,
'Retrieving Session', data=urlencode_postdata({
'_method': 'GET',
'requestor_id': requestor_id,
}), headers=mvpd_headers)
if '<pendingLogout' in session:
self._downloader.cache.store(self._MVPD_CACHE, requestor_id, {})
count += 1
continue
authn_token = unescapeHTML(xml_text(session, 'authnToken'))
requestor_info['authn_token'] = authn_token
self._downloader.cache.store(self._MVPD_CACHE, requestor_id, requestor_info)
authz_token = requestor_info.get(guid)
if authz_token and is_expired(authz_token, 'simpleTokenTTL'):
authz_token = None
if not authz_token:
authorize = self._download_webpage(
self._SERVICE_PROVIDER_TEMPLATE % 'authorize', video_id,
'Retrieving Authorization Token', data=urlencode_postdata({
'resource_id': resource,
'requestor_id': requestor_id,
'authentication_token': authn_token,
'mso_id': xml_text(authn_token, 'simpleTokenMsoID'),
'userMeta': '1',
}), headers=mvpd_headers)
if '<pendingLogout' in authorize:
self._downloader.cache.store(self._MVPD_CACHE, requestor_id, {})
count += 1
continue
if '<error' in authorize:
raise ExtractorError(xml_text(authorize, 'details'), expected=True)
authz_token = unescapeHTML(xml_text(authorize, 'authzToken'))
requestor_info[guid] = authz_token
self._downloader.cache.store(self._MVPD_CACHE, requestor_id, requestor_info)
mvpd_headers.update({
'ap_19': xml_text(authn_token, 'simpleSamlNameID'),
'ap_23': xml_text(authn_token, 'simpleSamlSessionIndex'),
})
short_authorize = self._download_webpage(
self._SERVICE_PROVIDER_TEMPLATE % 'shortAuthorize',
video_id, 'Retrieving Media Token', data=urlencode_postdata({
'authz_token': authz_token,
'requestor_id': requestor_id,
'session_guid': xml_text(authn_token, 'simpleTokenAuthenticationGuid'),
'hashed_guid': 'false',
}), headers=mvpd_headers)
if '<pendingLogout' in short_authorize:
self._downloader.cache.store(self._MVPD_CACHE, requestor_id, {})
count += 1
continue
return short_authorize
|
Tithen-Firion/youtube-dl
|
youtube_dl/extractor/adobepass.py
|
Python
|
unlicense
| 39,507
|
[
"COLUMBUS",
"Elk"
] |
2e8110971d89cb19d1094a8cb4e2c48fa8dec40f1231d944943927b35ec50c93
|
# -*- coding: utf-8 -*-
#
# Instant Press. Instant sites. CMS developed in Web2py Framework
# Site: http://www.instant2press.com
#
# Copyright (c) 2010 Mulone, Pablo Martín
#
# License Code: GPL, General Public License v. 2.0
# License Content: Creative Commons Attribution 3.0
#
# Also visit: www.web2py.com
# or Groups: http://groups.google.com/group/web2py
# http://groups.google.com/group/web2py-usuarios
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import math
import datetime
from gluon.html import *
from gluon.http import *
from gluon.validators import *
from gluon.sqlhtml import *
from gluon.sql import *
import gluon.contrib.simplejson as sj
#local
from utils import *
ADMIN_USERS_LIST_PER_PAGE = 5
ADMIN_MAX_LIST_PAGES = 10
class Users(object):
def __init__(self, i2p):
self.i2p = i2p
def get_user_title(self, id):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
if request.env.web2py_runtime_gae:
#in gae the user administrator is the only author.
user_title = T("Site administrator")
else:
user_title = T("Anonymous")
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
user_title = user.last_name + ", " + user.first_name
return user_title
def is_user_an_admin(self):
request = self.i2p.environment.request
auth = self.i2p.environment.auth
if request.env.web2py_runtime_gae:
from google.appengine.api import users
if users.is_current_user_admin():
return True
else:
if auth:
if auth.is_logged_in():
is_admin = auth.has_membership(auth.id_group(self.i2p.config.group_admin))
if is_admin:
return True
return False
#added support to gae admin users
def check_credentials_is_admin(self):
request = self.i2p.environment.request
auth = self.i2p.environment.auth
is_an_admin = self.is_user_an_admin()
if not is_an_admin:
if request.env.web2py_runtime_gae:
from google.appengine.api import users
login_html = '<a href="%s">%s</a>.' \
% (users.create_login_url(request.env.path_info), \
T('Sign in with your google account'))
raise HTTP(200, '<html><body>%s</body></html>' % login_html)
else:
next = auth.settings.on_failed_authorization
redirect(next)
return is_an_admin
#The user is logged in?
def is_user_logged_in(self):
logged_in=False
auth = self.i2p.environment.auth
if auth:
if auth.is_logged_in():
logged_in=True
return logged_in
class admUsers(object):
def __init__(self, i2p):
self.i2p = i2p
#ADMIN
def list(self, currentpage=1, search_text=""):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
auth = self.i2p.environment.auth
if not isinstance(search_text, (unicode, str) ):
search_text = ""
max_users=ADMIN_USERS_LIST_PER_PAGE
max_display_pages=ADMIN_MAX_LIST_PAGES
limit_inf = (max_users * currentpage) - max_users
limit_sup = limit_inf + max_users
query = (db.auth_user.id>0)
if search_text!="":
query = query & (db.auth_user.last_name.like(search_text+"%"))
count_users = db(query).count()
last_users = db(query).select(db.auth_user.ALL,\
orderby=~db.auth_user.created_on,\
limitby=(limit_inf, limit_sup))
link_register = A(T('Register'), \
_href=URL(request.application,\
self.i2p.config.controller_default,\
'user/register'), \
_style="padding-left: 5px;")
icon_register_url = URL(request.application,'static','images/toolbar_add.png')
toolbar_register_style = 'padding-left: 20px; background-image: url(%s); background-repeat: no-repeat;' \
% icon_register_url
refresh_list = A(T("Refresh"), _href='javascript: void(0);', \
_onclick="UserList(1);" ,\
_title="%s"%T('Reload the list'))
icon_refresh_url = URL(request.application,'static','images/toolbar_refresh.png')
toolbar_refresh_style = 'padding-left: 20px; background-image: url(%s); background-repeat: no-repeat;' \
% icon_refresh_url
input_search = '<input type="text" id="input-search" style="width: 200px; height: 20px; margin: 0px;" />'
icon_search_url = URL(request.application,'static','images/search.png')
icon_search = IMG(_src=icon_search_url, _alt="search")
do_search = A(icon_search, _href='javascript: void(0);', \
_onclick="UserSearch();" ,\
_title="%s"%T('Search in last name'))
toolbar_register = '<li style="%s">%s</li>' % (toolbar_register_style,link_register.xml())
toolbar_refresh = '<li style="%s">%s</li>' % (toolbar_refresh_style,refresh_list.xml())
toolbar_input_search = '<li>%s %s</li>' % (input_search, do_search.xml())
toolbar = '<ul>%s %s %s</ul>' % (toolbar_register,toolbar_refresh,toolbar_input_search)
list = '<div class="toolbar" style="height: 40px; width: 500px;">%s</div>' % toolbar
if last_users:
#create the header column
checkbox_all = '<input type="checkbox" id="checkboxall" />'
caption_column1 = checkbox_all
caption_column2 = T('Avatar')
caption_column3 = T('Last name')
caption_column4 = T('First name')
caption_column5 = T('Email')
caption_column6 = T('Status')
caption_column7 = T('Actions')
caption_column8 = T('Created')
row_column1 = '<div class="column1">%s</div>' % caption_column1
row_column2 = '<div class="column2">%s</div>' % caption_column2
row_column3 = '<div class="column3">%s</div>' % caption_column3
row_column4 = '<div class="column4">%s</div>' % caption_column4
row_column5 = '<div class="column5">%s</div>' % caption_column5
row_column6 = '<div class="column6">%s</div>' % caption_column6
row_column7 = '<div class="column7">%s</div>' % caption_column7
row_column8 = '<div class="column8">%s</div>' % caption_column8
row_clear = '<div style="clear: both;"></div>'
row_user_xml = '<div class="row-user-headers"> %s %s %s %s %s %s %s %s %s </div>' \
% (row_column1,row_column2,row_column3,row_column4,\
row_column5,row_column6,row_column7,row_column8,row_clear)
list += row_user_xml
#titles are hints
title_edit_firstname = T('Click to change the first name of this user')
title_edit_lastname = T('Click to change the last name of this user')
title_edit_email = T('Click to change the email of this user')
title_delete = T('Click to delete this user')
title_activate = T('Click to activate this user, this will delete the disabled, blocked and pending status of the current user')
title_disable = T('Click to disable this user')
title_changepass = T('Click to change pass')
title_setasadmin = T('Click to set this user as an admin. In AppEngine make an admin in your Appspot account')
title_block = T('Click to block this user')
#id group admin
id_group_admin = auth.id_group(self.i2p.config.group_admin)
for user in last_users:
title_avatar = T('User ID: %s'%user.id)
if user.registration_key == 'pending':
caption_status = '<span style="color: orange;">' + str(T('Pending')) + '</span>'
elif user.registration_key == 'disabled':
caption_status = '<span style="color: orange;">' + str(T('Disabled'))+ '</span>'
elif user.registration_key == 'blocked':
caption_status = '<span style="color: orange;">' + str(T('Blocked')) + '</span>'
else:
caption_status = '<span style="color: green;">' + str(T('Active')) + '</span>'
if auth.has_membership(id_group_admin, user.id, self.i2p.config.group_admin):
caption_status += ', <span style="color: red;">' + str(T('Admin'))+ '</span>'
checkbox_user = '<input type="checkbox" id="checkbox-%s" />'%user.id
icon_avatar = IMG(_src=URL(request.application,'static','images/avatar.png'), \
_alt="avatar", _width="24px", _height="24px", \
_title="%s"%title_avatar)
link_edit_firstname = A(user.first_name, _href='javascript: void(0);', \
_onclick="UserFirstName(%s);"%(user.id), \
_title="%s"%title_edit_firstname)
link_edit_lastname = A(user.last_name, _href='javascript: void(0);', \
_onclick="UserLastName(%s);"%(user.id), \
_title="%s"%title_edit_lastname)
link_edit_email = A(user.email , _href='javascript: void(0);', \
_onclick="UserEmail(%s);"%(user.id), \
_title="%s"%title_edit_email)
icon_remove = IMG(_src=URL(request.application,'static','images/remove.png'), \
_alt="remove")
link_delete = A(icon_remove , _href='javascript: void(0);', \
_onclick="UserDelete(%s);"%(user.id), \
_title="%s"%title_delete)
icon_activate = IMG(_src=URL(request.application,'static','images/activate.png'), \
_alt="activate")
link_activate = A(icon_activate , _href='javascript: void(0);', \
_onclick="UserActivate(%s);"%(user.id), \
_title="%s"%title_activate)
icon_disable = IMG(_src=URL(request.application,'static','images/disable.png'), \
_alt="disable")
link_desactivate = A(icon_disable , _href='javascript: void(0);', \
_onclick="UserDisable(%s);"%(user.id), \
_title="%s"%title_disable)
icon_change = IMG(_src=URL(request.application,'static','images/pass.gif'), \
_alt="change pass")
link_change = A(icon_change, _href='javascript: void(0);', \
_onclick="UserPassword(%s);"%(user.id), \
_title="%s"%title_changepass)
icon_setadmin = IMG(_src=URL(request.application,'static','images/setadmin.png'), \
_alt="set admin")
link_setadmin = A(icon_setadmin , _href='javascript: void(0);', \
_onclick="UserSetAdmin(%s);"%(user.id), \
_title="%s"%title_setasadmin)
link_block = A(icon_disable , _href='javascript: void(0);', \
_onclick="UserBlock(%s);"%(user.id), \
_title="%s"%title_block)
link_actions = link_delete.xml() + ' ' + link_activate.xml() + ' ' + \
link_desactivate.xml() + ' ' + link_change.xml() + ' ' + \
link_setadmin.xml() + ' ' + link_block.xml()
created_on = user.created_on.strftime("%Y-%m-%d:%I:%M:%p")
row_column1 = '<div class="column1">%s</div>' % checkbox_user
row_column2 = '<div class="column2">%s</div>' % icon_avatar.xml()
row_column3 = '<div class="column3">%s</div>' % link_edit_lastname.xml()
row_column4 = '<div class="column4">%s</div>' % link_edit_firstname.xml()
row_column5 = '<div class="column5">%s</div>' % link_edit_email.xml()
row_column6 = '<div class="column6">%s</div>' % caption_status
row_column7 = '<div class="column7">%s</div>' % link_actions
row_column8 = '<div class="column8">%s</div>' % created_on
row_clear = '<div style="clear: both;"></div>'
row_user_xml = '<div class="row-user" id="row-%s"> %s %s %s %s %s %s %s %s %s</div>' \
% (user.id,row_column1,row_column2,row_column3,row_column4,\
row_column5,row_column6,row_column7,row_column8,row_clear)
list += row_user_xml
if count_users>max_users:
total_pages = count_users // max_users
if (count_users % max_users)>0:
total_pages += 1
first_page = int(math.ceil(currentpage / max_display_pages)) * max_display_pages
if first_page<1:
first_page=1
if total_pages < max_display_pages:
last_page = total_pages
else:
last_page=max_display_pages
else:
last_page=first_page + max_display_pages
backward = A(T("Prior"), _href='javascript: void(0);', \
_onclick="UsersList(%s,'%s');"%(currentpage-1,search_text))
forward = A(T("Next"), _href='javascript: void(0);', \
_onclick="UsersList(%s,'%s');"%(currentpage+1,search_text))
listpages=""
if currentpage>1:
listpages += "<li>%s</li>" % backward.xml()
for page in range(first_page, last_page+1):
page_a = A(unicode(page), _href='javascript: void(0);', \
_onclick="UsersList(%s,'%s');"%(page,search_text))
if page<=total_pages:
if page==currentpage:
class_current = ' class="current"'
else:
class_current = ''
listpages += "<li%s>%s</li>" % (class_current, page_a.xml())
if total_pages>currentpage:
listpages += "<li>%s</li>" % forward.xml()
if listpages!="":
list+='<div class="pages"><ul>%s</ul></div>' % listpages
page_content=list
else:
page_content=list + "%s"%T("No users")
html_content = '<h2>%s</h2>'%T("Users")
html_content += "%s"%page_content
info={}
info['html']=sanitate_string(html_content)
return sj.dumps(info)
def delete(self, id):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
session = self.i2p.environment.session
auth = self.i2p.environment.auth
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
if user.email!=self.i2p.config.email or request.env.web2py_runtime_gae:
if not request.env.web2py_runtime_gae:
if user.email==auth.user.email:
return json_response(message=T("You cannot delete yourself!"),\
alert=2,value="")
id_user = user.id
db(db.auth_group.role=='user_%s'%id_user).delete()
db(db.auth_membership.user_id==id_user).delete()
db(db.auth_user.id == id_user).delete()
return json_response(message=T("User deleted"),\
alert=0,value="")
else:
return json_response(message=T("You cannot delete default user!"),\
alert=2,value="")
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def disable(self, id):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
session = self.i2p.environment.session
auth = self.i2p.environment.auth
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
if not request.env.web2py_runtime_gae:
if user.email==auth.user.email:
return json_response(message=T("You cannot disable yourself!"),\
alert=2,value="")
user.update_record(registration_key = 'disabled')
return json_response(message=T("User disabled"),\
alert=0,value="")
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def activate(self, id):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
session = self.i2p.environment.session
auth = self.i2p.environment.auth
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
if user.email!=self.i2p.config.email or request.env.web2py_runtime_gae:
if not request.env.web2py_runtime_gae:
if user.email==auth.user.email:
return json_response(message=T("You cannot activate yourself!"), \
alert=2,value="")
user.update_record(registration_key = '')
return json_response(message= T("User activated"),\
alert=0,value="")
else:
return json_response(message=T("You cannot activate default user!"),\
alert=2,value="")
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def setadmin(self, id):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
session = self.i2p.environment.session
auth = self.i2p.environment.auth
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
if user.email!=self.i2p.config.email or request.env.web2py_runtime_gae:
if not request.env.web2py_runtime_gae:
if user.email==auth.user.email:
return json_response(message=T("You cannot set as admin yourself!"),\
alert=2,value="")
id_group_admin = auth.id_group(self.i2p.config.group_admin)
if auth.has_membership(id_group_admin, user.id, self.i2p.config.group_admin):
auth.del_membership(id_group_admin, user.id)
return json_response(message=T("User has been removed from admin list") ,\
alert=0,value="")
else:
auth.add_membership(id_group_admin, user.id)
return json_response(message=T("User has been added to admin list"),\
alert=0,value="")
else:
return json_response(message=T("You cannot edit default user!"),\
alert=2,value="")
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def save_firstname(self, id, value):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
session = self.i2p.environment.session
auth = self.i2p.environment.auth
value = value.strip()
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
if user.email!=self.i2p.config.email or request.env.web2py_runtime_gae:
user.update_record(first_name = value)
return json_response(message=T('Firstname updated'),\
alert=0,value="")
else:
return json_response(message=T("You cannot edit default user!"),\
alert=2,value="")
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def save_lastname(self, id, value):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
session = self.i2p.environment.session
auth = self.i2p.environment.auth
value = value.strip()
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
if user.email!=self.i2p.config.email or request.env.web2py_runtime_gae:
user.update_record(last_name = value)
return json_response(message=T('Lastname updated'),\
alert=0,value="")
else:
return json_response(message=T("You cannot edit default user!"),\
alert=2,value="")
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
#need to check if IS IN DB
def save_email(self, id, value):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
session = self.i2p.environment.session
auth = self.i2p.environment.auth
value = value.strip()
notvalid = (IS_EMAIL()(value))[1]
if notvalid:
return json_response(message=T("The email is not valid"),\
alert=2,value="")
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
if user.email!=self.i2p.config.email or request.env.web2py_runtime_gae:
user.update_record(email = value)
return json_response(message=T('Email updated'),\
alert=0,value="")
else:
return json_response(message=T("You cannot edit default user!"),\
alert=2,value="")
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def get_email(self, id):
db = self.i2p.db
T = self.i2p.environment.T
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
value = user.email
return json_response(message="",alert=0,value=value)
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def get_firstname(self, id):
db = self.i2p.db
T = self.i2p.environment.T
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
value = user.first_name
return json_response(message="",alert=0,value=value)
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def get_lastname(self, id):
db = self.i2p.db
T = self.i2p.environment.T
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
value = user.last_name
return json_response(message="",alert=0,value=value)
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def block(self, id):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
session = self.i2p.environment.session
auth = self.i2p.environment.auth
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
if not request.env.web2py_runtime_gae:
if user.email==auth.user.email:
return json_response(message= T("You cannot block yourself!"),\
alert=2,value="")
user.update_record(registration_key = 'blocked')
return json_response(message=T("User blocked"),\
alert=0,value="")
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def change_password(self, id, value):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
session = self.i2p.environment.session
auth = self.i2p.environment.auth
value = value.strip()
notvalid = (IS_LENGTH(minsize=6)(value))[1]
if notvalid:
return json_response(message=T("The password is not valid, the minsize of a password is 6 character"),\
alert=2,value="")
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
if user.email!=self.i2p.config.email or request.env.web2py_runtime_gae:
if not request.env.web2py_runtime_gae:
if user.email==auth.user.email:
return json_response(message=T("You cannot change your password in this panel, use your profile"),\
alert=2,value="")
my_crypt = CRYPT(key=auth.settings.hmac_key)
crypt_pass = my_crypt(value)[0]
user.update_record(password = crypt_pass)
return json_response(message= T("User password changed"),\
alert=0,value="")
else:
return json_response(message=T("You cannot change passwor of the default user!"),\
alert=2,value="")
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
#this only return a random generated password,
#the password are hashed in db.
def get_password(self, id):
db = self.i2p.db
T = self.i2p.environment.T
def random_password():
import string
import random
password = ''
specials=r'!#$*'
for i in range(0,3):
password += random.choice(string.lowercase)
password += random.choice(string.uppercase)
password += random.choice(string.digits)
password += random.choice(specials)
return ''.join(random.sample(password,len(password)))
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
value = random_password()
return json_response(message="",alert=0,value=value)
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
|
LispyAriaro/instant-press
|
modules/users.py
|
Python
|
gpl-2.0
| 31,385
|
[
"VisIt"
] |
5ee8253d5e78460e19de767f7aadd2e13940a2f78e78d47b6941b5f78b611286
|
"""Tests for the DIRAC.Core.Utilities.Extensions module"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import DIRAC
from DIRAC.Core.Utilities.Extensions import (
findSystems,
findAgents,
findExecutors,
findServices,
findDatabases,
extensionsByPriority,
getExtensionMetadata,
)
def test_findSystems():
systems = findSystems([DIRAC])
assert len(systems) > 5
assert all(system.endswith("System") for system in systems)
def test_findAgents():
agents = findAgents([DIRAC])
assert len(agents) > 5
def test_findExecutors():
executors = findExecutors([DIRAC])
assert len(executors) > 1
def test_findServices():
services = findServices([DIRAC])
assert len(services) > 5
def test_findDatabases():
databases = findDatabases([DIRAC])
assert len(databases) > 5
assert all(str(fn).endswith(".sql") for system, fn in databases)
def test_extensionsByPriority():
assert "DIRAC" in extensionsByPriority()
def test_getExtensionMetadata():
metadata = getExtensionMetadata("DIRAC")
assert metadata["priority"] == 0
|
ic-hep/DIRAC
|
src/DIRAC/Core/Utilities/test/Test_Extensions.py
|
Python
|
gpl-3.0
| 1,162
|
[
"DIRAC"
] |
d2422777a5f7ee976c522902db5ca6ecb61f4eb33591eb763b73564ede0834bb
|
# procedure for existing halos, after the first snapshot was initiated :
import sys
# only input parameter is the numbering of the snapshot. These have to be
#processed in sequence, cannot be done in parallel ... First 1, 2, 3, ...
#ii = int(sys.argv[1])
#print('snapshot' ii)
import time
t0 = time.time()
from multiprocessing import Pool
#p=Pool(12)
import h5py
import os
import glob
import numpy as n
import EmergeStellarMass as sm
model = sm.StellarMass()
import pandas as pd
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115)#, Ob0=0.048206)
import astropy.constants as constants
# generic functions
# =================
f_loss = lambda t : 0.05*n.log( 1 + t / (1.4*10**6))
t_dyn = lambda rvir, mvir : (rvir**3./(9.797465327217671e-24*mvir))**0.5
def tau_quenching( m_star, tdyn, tau_0=4.282, tau_s=0.363):
out = n.zeros_like(m_star)
case_1 = (m_star < 1e10 )
out[case_1] = tdyn[case_1] * tau_0
case_2 = (case_1==False)
out[case_2] = tdyn[case_2] * tau_0 * (m_star[case_2] * 10.**(-10.))**(tau_s)
return out
def compute_qtys_new_halos_pk(mvir, rvir, redshift, age_yr):
"""
Creates a new galaxy along with the new halo.
Integrates since the start of the Universe.
Updates the initiated quantities with the values of interest.
:param mvir: list of mvir [Msun], length = n.
:param rvir: list of rvir [kpc] , length = n.
:param redshift: redshift of the snapshot replicated n times.
:param age_yr: age of the Universe for the snapshot replicated n times.
Typically inputs should be :
* mvir=self.f1['/halo_properties/mvir'].value[self.mask_f1_new_halos],
* rvir=self.f1['/halo_properties/rvir'].value[self.mask_f1_new_halos],
* age_yr=self.f1.attrs['age_yr']
returns
mvir_dot, rvir_dot, dMdt, dmdt_star, star_formation_rate, stellar_mass
"""
f_b=model.f_b
epsilon = model.epsilon(mvir, redshift )
f_lost = f_loss(age_yr)
# evaluate equation (4)
mvir_dot = mvir / age_yr
# no pseudo evolution correction
dMdt = mvir_dot
# evaluate equation (1)
dmdt_star = f_b * dMdt * epsilon
# evaluate accretion: 0 in this first step
# self.dmdt_star_accretion = n.zeros_like(self.dmdt_star)
# evaluate equation (11)
# equation (12)
# evaluate stellar mass
star_formation_rate = dmdt_star * (1. - f_lost)
return mvir_dot, rvir / age_yr, dMdt, dmdt_star, star_formation_rate, star_formation_rate * age_yr
def compute_qtys_evolving_halos_pk(mvir_f0, mvir_f1, age_f0, age_f1, rvir_f0, rvir_f1, redshift, t_dynamical, rs_f1, mpeak_f1, mpeak_scale_f1, f1_scale, m_icm_f0, stellar_mass_f0, star_formation_rate_f0 ):
"""
update the quantities for evolving halos, present in f0 and f1.
inputs
mvir_f0 [Msun] : self.f0['/halo_properties/mvir'].value[self.mask_f0_evolving_11_halos]
mvir_f1 [Msun] : self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos]
age_f0 [yr] : self.f0.attrs['age_yr'] * n.ones_like(self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos])
age_f1 [yr] : self.f1.attrs['age_yr'] * n.ones_like(self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos])
mvir_f0 [Msun] : self.f0['/halo_properties/rvir'].value[self.mask_f0_evolving_11_halos]
mvir_f1 [Msun] : self.f1['/halo_properties/rvir'].value[self.mask_f1_evolving_11_halos]
redshift : self.f1.attrs['redshift'] * n.ones_like(self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos])
t_dynamical : self.t_dynamical[self.mask_f1_evolving_11_halos]
rs_f1 [kpc] : self.f1['/halo_properties/rs'].value[self.mask_f1_evolving_11_halos]
mpeak_f1 [Msun] : self.f1['/halo_properties/Mpeak'].value[self.mask_f1_evolving_11_halos]
mpeak_scale_f1 : self.f1['/halo_properties/Mpeak_scale'].value[self.mask_f1_evolving_11_halos]
f1_scale : float(self.f1_scale)
m_icm_f0 [Msun] : self.f0['/emerge_data/m_icm'].value[self.mask_f0_evolving_11_halos]
stellar_mass_f0 [Msun] : self.f0['/emerge_data/stellar_mass'].value[self.mask_f0_evolving_11_halos]
star_formation_rate_f0 [Msun/yr] : self.f0['/halo_properties/star_formation_rate'].value[self.mask_f0_evolving_11_halos]
masks :
* mask_f1_evolving_11_halos
* mask_f0_evolving_11_halos
subcases :
* quenching : (mvir < Mpeak) & (Mpeak_scale < f1_scale)
* case 1. ( age >= t_mpeak ) & ( age_yr < t_mpeak + t_quench)
* case 2. (age_yr >= t_mpeak + t_quench)
* stripping, case 1 : (dMdt < 0), then all mass goes to ICM, m=0, mdot=0
* stripping, case 2 : after reaching its peak mass, if M < 0.122 * Mpeak, then all mass goes to ICM, m=0, mdot=0
"""
# computing dMdt for the halo
dt = age_f1 - age_f0
mvir_dot = (mvir_f1-mvir_f0) / (dt)
rvir_dot = (rvir_f1-rvir_f0) / (dt)
c = rvir_f1 / rs_f1
rho_nfw = mvir_f1 / (rs_f1**3. * 4. * n.pi * c * (1+c)**2. * (n.log(1.+c)-c/(1.+c)))
pseudo_evolution_correction = 4. * n.pi * rvir_f1 * rvir_f1 * rvir_dot * rho_nfw
dMdt = mvir_dot - pseudo_evolution_correction
# initialize the ICM mass to the previous value
m_icm = m_icm_f0
# Direct estimates of stellar mass and SFR
dmdt_star = model.f_b * dMdt * model.epsilon(mvir_f1, redshift)
# evaluate accretion: 0 in this first step
# dmdt_star_accretion = n.zeros_like(dmdt_star)
# evaluate equation (11)
f_lost = f_loss(dt)
# evaluate stellar mass
star_formation_rate = dmdt_star * (1. - f_lost)
stellar_mass = star_formation_rate * dt + stellar_mass_f0
# Variations due to stripping, merging and quenching
# quenching
quenching = (mvir_f1 < mpeak_f1) & (mpeak_scale_f1 < f1_scale)
#t_quench = tau_quenching( stellar_mass_f0, t_dynamical )
if stellar_mass_f0 < 1e10 :
t_quench = t_dynamical * 4.282
else :
t_quench = t_dynamical * 4.282 * (stellar_mass_f0 * 10.**(-10.))**(0.363)
t_mpeak = cosmoMD.age( 1. / mpeak_scale_f1 - 1. ).to(u.yr).value
# case 1. mdot = mdot at tpeak
quench_1 = (quenching) & (age_f1 >= t_mpeak ) & ( age_f1 < t_mpeak + t_quench)
if quench_1 :
star_formation_rate = n.ones_like(star_formation_rate)*star_formation_rate_f0
stellar_mass = star_formation_rate * dt + stellar_mass_f0
# case 2. m dot =0
quench_2 = (quenching) &(age_f1 >= t_mpeak + t_quench )
if quench_2:
star_formation_rate = n.zeros_like(star_formation_rate)
stellar_mass = stellar_mass_f0
# stripping, case 1
# negative growth value self.dMdt => 0
stripping_1 = (dMdt < 0)
# stripping, case 2
# after reaching its peak mass,
# if M < 0.122 * Mpeak, all mass goes to ICM, m=0, mdot=0
stripping_2 = (mvir_f1 < 0.122*mpeak_f1) & (mpeak_scale_f1 < f1_scale)
# both cases together
stripping = (stripping_1) | (stripping_1)
if stripping :
m_icm += stellar_mass_f0
stellar_mass = n.zeros_like(stellar_mass)
star_formation_rate = n.zeros_like(star_formation_rate)
return mvir_dot, rvir_dot, dMdt, dmdt_star, star_formation_rate, stellar_mass, m_icm
def merge_system(mvir_f0, mvir_f1, age_f0, age_f1, rvir_f0, rvir_f1, redshift, t_dynamical, rs_f1, mpeak_f1, mpeak_scale_f1, f1_scale, m_icm_f0, stellar_mass_f0, star_formation_rate_f0, sum_stellar_mass_guests):
"""
given f1_host, f0_host and f0 guests,
creates the right quantities, stellar mass and so on...
# m_star_sat x f_esc => m_host_ICM
# m_star_sat x (1-f_esc) => m_star_host
# f_esc = 0.388
#Time_to_future_merger: Time (in Gyr) until the given halo merges into a larger halo. (-1 if no future merger happens)
#Future_merger_MMP_ID: most-massive progenitor of the halo into which the given halo merges. (-1 if the main progenitor of the future merger halo does not exist at the given scale factor.)
"""
# evolution of the host
dt = age_f1 - age_f0
mvir_dot = (mvir_f1-mvir_f0) / (dt)
rvir_dot = (rvir_f1-rvir_f0) / (dt)
c = rvir_f1 / rs_f1
rho_nfw = mvir_f1 / (rs_f1**3. * 4. * n.pi * c * (1+c)**2. * (n.log(1.+c)-c/(1.+c)))
pseudo_evolution_correction = 4. * n.pi * rvir_f1 * rvir_f1 * rvir_dot * rho_nfw
dMdt = mvir_dot - pseudo_evolution_correction
m_icm = m_icm_f0
dmdt_star = model.f_b * dMdt * model.epsilon(mvir_f1, redshift)
f_lost = f_loss(dt)
star_formation_rate = dmdt_star * (1. - f_lost)
stellar_mass = star_formation_rate * dt + stellar_mass_f0
# merging the sub systems, i.e. adding stellar mass
stellar_mass += (1.-0.388)*sum_stellar_mass_guests
m_icm += 0.388*sum_stellar_mass_guests
return mvir_dot, rvir_dot, dMdt, dmdt_star, stellar_mass, star_formation_rate, m_icm
class EmergeIterate():
"""
Loads iterates one step with the Emerge model.
:param ii: index of the snapshot of interest
:param env: environment variable of the box. In this dir must be a sub dir
'h5' with the 'hlist_?.?????_emerge.hdf5' data files in it
:param L_box: length of the box in Mpc/h
Running the iteration
---------------------
ipython3
import EmergeIterate
iterate = EmergeIterate.EmergeIterate(12, 'MD10')
iterate.open_snapshots()
iterate.map_halos_between_snapshots()
iterate.init_new_quantities()
if len((iterate.mask_f1_new_halos).nonzero()[0]) > 0 :
iterate.compute_qtys_new_halos()
if len((iterate.mask_f0_evolving_11_halos).nonzero()[0]) > 0 :
iterate.compute_qtys_evolving_halos()
if len(self.mask_f1_in_a_merging.nonzero()[0]) > 0 :
iterate.compute_qtys_merging_halos()
self.write_results()
"""
def __init__(self, ii, env, L_box=1000.0 ):
self.ii = ii
self.env = env
self.L_box = L_box # box length
def open_snapshots(self):
"""
Opens the files into the class as f0 and f1
"""
h5_dir = os.path.join(os.environ[self.env], 'h5' )
input_list = n.array(glob.glob(os.path.join(h5_dir, "hlist_?.?????_emerge.hdf5")))
input_list.sort()
file_0 = input_list[self.ii-1]
file_1 = input_list[self.ii]
self.f0 = h5py.File(file_0, "r")
self.f0_scale = os.path.basename(file_0).split('_')[1]
self.positions_f0 = n.arange(len(self.f0['/halo_properties/id'].value))
self.f1 = h5py.File(file_1, "r+")
self.f1_scale = os.path.basename(file_1).split('_')[1]
self.positions_f1 = n.arange(len(self.f1['/halo_properties/id'].value))
def map_halos_between_snapshots(self):
"""
id mapping for halos present in the previous snapshot
Creates 6 arrays to do the mapping in the different cases
* mask_f1_new_halos
* mask_f0_evolving_11_halos
* mask_f1_evolving_11_halos
* f1_id_with_multiple_progenitors
* mask_f1_in_a_merging
* mask_f0_in_a_merging
"""
#f0_desc_id_unique_list_all_descendents = n.unique(self.f0['/halo_properties/desc_id'].value)
f1_id_unique_list_descendents_detected_at_next_scale = n.intersect1d(n.unique(self.f0['/halo_properties/desc_id'].value), self.f1['/halo_properties/id'].value)
mask_f0_to_propagate = n.in1d(self.f0['/halo_properties/desc_id'].value, f1_id_unique_list_descendents_detected_at_next_scale)
# mask_f0_lost = (mask_f0_to_propagate == False )
# evolving halos are given after applying this boolean mask to a f1 quantity :
mask_f1_evolved_from_previous = n.in1d( self.f1['/halo_properties/id'].value, f1_id_unique_list_descendents_detected_at_next_scale )
# new halos are given after applying this boolean mask to a f1 quantity
# new halos in f1, not present in f0
self.mask_f1_new_halos = (mask_f1_evolved_from_previous==False)
print('new halos', len(self.mask_f1_new_halos.nonzero()[0]))
# halos descending :
# mask_f0_to_propagate
# mask_f1_evolved_from_previous
s = pd.Series(self.f0['/halo_properties/desc_id'].value[mask_f0_to_propagate])
self.f1_id_with_multiple_progenitors = s[s.duplicated()].get_values()
# also = f0_desc_id merging into 1 halo in f1
# merging systems [many halos in f0 into a single f1 halo]
self.mask_f1_in_a_merging = n.in1d( self.f1['/halo_properties/id'].value, self.f1_id_with_multiple_progenitors )
self.mask_f0_in_a_merging = n.in1d( self.f0['/halo_properties/desc_id'].value, self.f1_id_with_multiple_progenitors )
# halos mapped 1::1 between snapshots
self.mask_f0_evolving_11_halos = ( mask_f0_to_propagate ) & ( self.mask_f0_in_a_merging == False )
self.mask_f1_evolving_11_halos = ( mask_f1_evolved_from_previous ) & ( self.mask_f1_in_a_merging == False )
print('11 mapping', len(self.mask_f0_evolving_11_halos.nonzero()[0]), len(self.mask_f1_evolving_11_halos.nonzero()[0]))
print('merging systems', len(self.f1_id_with_multiple_progenitors))
#for dede in self.f1_id_with_multiple_progenitors :
#sel = self.f0['/halo_properties/desc_id'].value == dede
#print( dede )
#print('desc id', self.f0['/halo_properties/desc_id'].value[sel])
#print('id', self.f0['/halo_properties/id'].value[sel])
#print('pid', self.f0['/halo_properties/pid'].value[sel])
#print('mvir', self.f0['/halo_properties/mvir'].value[sel])
#print('futur merger mmpid', self.f0['/halo_properties/Future_merger_MMP_ID'].value[sel])
#print('time to future merger', self.f0['/halo_properties/Time_to_future_merger'].value[sel])
#print('Ms', self.f0['/emerge_data/stellar_mass'].value[sel])
#print('SFR',self.f0['/emerge_data/star_formation_rate'].value[sel])
#print('mCIM',self.f0['/emerge_data/m_icm'].value[sel])
#print('=================================')
def init_new_quantities(self):
"""
Quantities computed for every halos are initialized to 0
* mvir_dot
* rvir_dot
* dMdt
* dmdt_star
* dmdt_star_accretion
* stellar_mass
* star_formation_rate
* m_icm
* t_dynamical [in years]
Along with the iteration, these quantities will be updated accordingly
"""
self.mvir_dot = n.zeros_like(self.f1['/halo_properties/mvir'].value)
self.rvir_dot = n.zeros_like(self.f1['/halo_properties/mvir'].value)
self.dMdt = n.zeros_like(self.f1['/halo_properties/mvir'].value)
self.dmdt_star = n.zeros_like(self.f1['/halo_properties/mvir'].value)
self.dmdt_star_accretion=n.zeros_like(self.f1['/halo_properties/mvir'].value)
self.stellar_mass = n.zeros_like(self.f1['/halo_properties/mvir'].value)
self.star_formation_rate = n.zeros_like(self.f1['/halo_properties/mvir'].value)
self.m_icm = n.zeros_like(self.f1['/halo_properties/mvir'].value)
self.t_dynamical = t_dyn( self.f1['/halo_properties/rvir'].value, self.f1['/halo_properties/mvir'].value )
def write_results(self):
"""
After computing all quantities, you need to write the results in the h5 file.
"""
emerge_data = self.f1.create_group('emerge_data')
#emerge_data.attrs['f_lost'] = f_lost
ds = emerge_data.create_dataset('mvir_dot', data = self.mvir_dot )
ds.attrs['units'] = r'$h^{-1} M_\odot / yr$'
ds.attrs['long_name'] = r'$d M_{vir} / dt$'
ds = emerge_data.create_dataset('rvir_dot', data = self.rvir_dot )
ds.attrs['units'] = r'$h^{-1} kpc / yr$'
ds.attrs['long_name'] = r'$d r_{vir} / dt$'
ds = emerge_data.create_dataset('dMdt', data = self.dMdt )
ds.attrs['units'] = r'$h^{-1} M_\odot / yr$'
ds.attrs['long_name'] = r'$\langle d M / dt \rangle$ (4)'
ds = emerge_data.create_dataset('dmdt_star', data = self.dmdt_star )
ds.attrs['units'] = r'$h^{-1} M_\odot / yr$'
ds.attrs['long_name'] = r'$ d m_* / dt $ (1)'
ds = emerge_data.create_dataset('dmdt_star_accretion', data =
self.dmdt_star_accretion )
ds.attrs['units'] = r'$h^{-1} M_\odot / yr$'
ds.attrs['long_name'] = r'$ d m_{acc} / dt $ '
ds = emerge_data.create_dataset('star_formation_rate', data =
self.star_formation_rate )
ds.attrs['units'] = r'$h^{-1} M_\odot / yr$'
ds.attrs['long_name'] = r'$ d m / dt $ '
ds = emerge_data.create_dataset('stellar_mass', data = self.stellar_mass )
ds.attrs['units'] = r'$h^{-1} M_\odot $'
ds.attrs['long_name'] = r'$ m_* $ (11)'
ds = emerge_data.create_dataset('m_icm', data = self.m_icm )
ds.attrs['units'] = r'$h^{-1} M_\odot $'
ds.attrs['long_name'] = r'$ m_{ICM}$ '
self.f0.close()
self.f1.close()
print("Results written")
def compute_qtys_new_halos(self):
"""
Creates a new galaxy along with the new halo.
Integrates since the start of the Universe.
Updates the initiated quantities with the values of interest.
"""
# evaluate equation (4)
self.mvir_dot[self.mask_f1_new_halos] = self.f1['/halo_properties/mvir'].value[self.mask_f1_new_halos] / self.f1.attrs['age_yr']
self.rvir_dot[self.mask_f1_new_halos] = self.f1['/halo_properties/rvir'].value[self.mask_f1_new_halos] / self.f1.attrs['age_yr']
# no pseudo evolution correction
self.dMdt[self.mask_f1_new_halos] = self.mvir_dot[self.mask_f1_new_halos]
# evaluate equation (1)
self.dmdt_star[self.mask_f1_new_halos] = model.f_b * self.dMdt[self.mask_f1_new_halos] * model.epsilon(self.f1['/halo_properties/mvir'].value[self.mask_f1_new_halos], self.f1.attrs['redshift'] * n.ones_like(self.f1['/halo_properties/mvir'].value[self.mask_f1_new_halos]))
# evaluate accretion: 0 in this first step
# self.dmdt_star_accretion[self.mask_f1_new_halos] = n.zeros_like(self.dmdt_star[self.mask_f1_new_halos])
# evaluate equation (11)
f_lost = f_loss(self.f1.attrs['age_yr']) # equation (12)
# evaluate stellar mass
self.star_formation_rate[self.mask_f1_new_halos] = self.dmdt_star[self.mask_f1_new_halos] * (1. - f_lost) + self.dmdt_star_accretion[self.mask_f1_new_halos]
self.stellar_mass[self.mask_f1_new_halos] = self.star_formation_rate[self.mask_f1_new_halos] * self.f1.attrs['age_yr']
# intra-cluster mass is currently 0
# self.m_icm[self.mask_f1_new_halos] = n.zeros_like(self.stellar_mass[self.mask_f1_new_halos])
def compute_qtys_evolving_halos(self):
"""
update the quantities for evolving halos, present in f0 and f1.
masks :
* mask_f1_evolving_11_halos
* mask_f0_evolving_11_halos
subcases :
* quenching : (mvir < Mpeak) & (Mpeak_scale < f1_scale)
* case 1. ( age >= t_mpeak ) & ( age_yr < t_mpeak + t_quench)
* case 2. (age_yr >= t_mpeak + t_quench)
* stripping, case 1 : (dMdt < 0), then all mass goes to ICM, m=0, mdot=0
* stripping, case 2 : after reaching its peak mass, if M < 0.122 * Mpeak, then all mass goes to ICM, m=0, mdot=0
"""
# computing dMdt for the halo
self.mvir_dot[self.mask_f1_evolving_11_halos] = (self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos]-self.f0['/halo_properties/mvir'].value[self.mask_f0_evolving_11_halos]) / (self.f1.attrs['age_yr'] - self.f0.attrs['age_yr'])
self.rvir_dot[self.mask_f1_evolving_11_halos] = (self.f1['/halo_properties/rvir'].value[self.mask_f1_evolving_11_halos]-self.f0['/halo_properties/rvir'].value[self.mask_f0_evolving_11_halos]) / (self.f1.attrs['age_yr'] - self.f0.attrs['age_yr'])
c = self.f1['/halo_properties/rvir'].value[self.mask_f1_evolving_11_halos] / self.f1['/halo_properties/rs'].value[self.mask_f1_evolving_11_halos]
rho_nfw = self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos] / (self.f1['/halo_properties/rs'].value[self.mask_f1_evolving_11_halos]**3. * 4. * n.pi * c * (1+c)**2. * (n.log(1.+c)-c/(1.+c)))
pseudo_evolution_correction = 4.*n.pi*self.f1['/halo_properties/rvir'].value[self.mask_f1_evolving_11_halos] *self.f1['/halo_properties/rvir'].value[self.mask_f1_evolving_11_halos] * self.rvir_dot[self.mask_f1_evolving_11_halos] * rho_nfw
self.dMdt[self.mask_f1_evolving_11_halos] = self.mvir_dot[self.mask_f1_evolving_11_halos] - pseudo_evolution_correction
# initialize the ICM mass to the previous value
self.m_icm[self.mask_f1_evolving_11_halos] = self.f0['/emerge_data/m_icm'].value[self.mask_f0_evolving_11_halos]
# Direct estimates of stellar mass and SFR
self.dmdt_star[self.mask_f1_evolving_11_halos] = model.f_b * self.dMdt[self.mask_f1_evolving_11_halos] * model.epsilon(self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos], self.f1.attrs['redshift'] * n.ones_like(self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos]))
# evaluate accretion: 0 in this first step
# dmdt_star_accretion = n.zeros_like(dmdt_star[self.mask_f1_evolving_11_halos])
# evaluate equation (11)
f_lost = f_loss(self.f1.attrs['age_yr']-self.f0.attrs['age_yr'])
# evaluate stellar mass
self.star_formation_rate[self.mask_f1_evolving_11_halos] = self.dmdt_star[self.mask_f1_evolving_11_halos] * (1. - f_lost) + self.dmdt_star_accretion[self.mask_f1_evolving_11_halos]
self.stellar_mass[self.mask_f1_evolving_11_halos] = self.star_formation_rate[self.mask_f1_evolving_11_halos] * (self.f1.attrs['age_yr']-self.f0.attrs['age_yr']) + self.f0['/emerge_data/stellar_mass'].value[self.mask_f0_evolving_11_halos]
# Variations due to stripping, merging and quenching
# quenching
quenching = (self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos] < self.f1['/halo_properties/Mpeak'].value[self.mask_f1_evolving_11_halos]) & (self.f1['/halo_properties/Mpeak_scale'].value[self.mask_f1_evolving_11_halos] < float(self.f1_scale))
t_quench = tau_quenching( self.f0['/emerge_data/stellar_mass'].value[self.mask_f0_evolving_11_halos], self.t_dynamical[self.mask_f1_evolving_11_halos] )
t_mpeak = cosmoMD.age( 1. / self.f1['/halo_properties/Mpeak_scale'].value[self.mask_f1_evolving_11_halos] - 1. ).to(u.yr).value
# case 1. mdot = mdot at tpeak
quench_1 = (quenching) & (self.f1.attrs['age_yr'] >= t_mpeak ) & ( self.f1.attrs['age_yr'] < t_mpeak + t_quench)
if len(quench_1.nonzero()[0])>0:
print("quenching1")
self.star_formation_rate[self.mask_f1_evolving_11_halos][quench_1] = n.ones_like(self.star_formation_rate[self.mask_f1_evolving_11_halos][quench_1])*self.f0['/emerge_data/stellar_mass'].value[self.mask_f0_evolving_11_halos][quench_1]
self.stellar_mass[self.mask_f1_evolving_11_halos][quench_1] = self.star_formation_rate[self.mask_f1_evolving_11_halos][quench_1] * (self.f1.attrs['age_yr']-self.f0.attrs['age_yr']) + self.f0['/emerge_data/stellar_mass'].value[self.mask_f0_evolving_11_halos][quench_1]
# case 2. m dot =0
quench_2 = (quenching) &(self.f1.attrs['age_yr'] >= t_mpeak + t_quench )
if len(quench_2.nonzero()[0])>0:
print("quenching2")
self.star_formation_rate[self.mask_f1_evolving_11_halos][quench_2] = n.zeros_like(self.star_formation_rate[self.mask_f1_evolving_11_halos][quench_2])
self.stellar_mass[self.mask_f1_evolving_11_halos][quench_2] = self.f0['/emerge_data/stellar_mass'].value[self.mask_f0_evolving_11_halos][quench_2]
# stripping, case 1
# negative growth value self.dMdt[self.mask_f1_evolving_11_halos] => 0
stripping_1 = (self.dMdt[self.mask_f1_evolving_11_halos] < 0)
# stripping, case 2
# after reaching its peak mass,
# if M < 0.122 * Mpeak, all mass goes to ICM, m=0, mdot=0
stripping_2 = (self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos] < 0.122*self.f1['/halo_properties/Mpeak'].value[self.mask_f1_evolving_11_halos]) & (self.f1['/halo_properties/Mpeak_scale'].value[self.mask_f1_evolving_11_halos] < float(self.f1_scale))
# both cases together
stripping = (stripping_1) | (stripping_1)
if len(stripping.nonzero()[0])>0:
print("stripping")
self.m_icm[self.mask_f1_evolving_11_halos][stripping] += self.f0['/emerge_data/stellar_mass'].value[self.mask_f0_evolving_11_halos][stripping]
self.stellar_mass[self.mask_f1_evolving_11_halos][stripping] = n.zeros_like(self.stellar_mass[self.mask_f1_evolving_11_halos][stripping])
self.star_formation_rate[self.mask_f1_evolving_11_halos][stripping] = n.zeros_like(self.star_formation_rate[self.mask_f1_evolving_11_halos][stripping])
def get_position_merger_players(self, merger_id):
"""
Given the identifier of the merger
:param merger_id: id of the parent halo of the merger at the later time. One integer.
Outputs the position on the f0 and f1 arrays of the hosts and of the merging systems
returns :
position_f1_host [int], position_f0_host [int], position_f0_merging [list]
"""
# about the host at t1
#print(merger_id)
mask_f1_host = (self.f1['/halo_properties/id'].value == merger_id)
#print(mask_f1_host)
position_f1_host = self.positions_f1[mask_f1_host]
#print(position_f1_host)
# about the host and merging subhalos at t0
mask_f0_all = (self.f0['/halo_properties/desc_id'].value == merger_id)
#print(mask_f0_all)
id_f0_all = self.f0['/halo_properties/id'].value[mask_f0_all]
#print(id_f0_all)
# the host at t1 is flagged at t0 as the most massive progenitor
#print(n.unique(self.f0['/halo_properties/Future_merger_MMP_ID'].value[mask_f0_all]))
#print(n.in1d(id_f0_all, n.unique(self.f0['/halo_properties/Future_merger_MMP_ID'].value[mask_f0_all])))
#print(id_f0_all[n.in1d(id_f0_all, n.unique(self.f0['/halo_properties/Future_merger_MMP_ID'].value[mask_f0_all]))])
f0_host_id = id_f0_all[n.in1d(id_f0_all, n.unique(self.f0['/halo_properties/Future_merger_MMP_ID'].value[mask_f0_all]))][0]
mask_f0_host = (mask_f0_all) & (self.f0['/halo_properties/id'].value == f0_host_id)
mask_f0_merging = (mask_f0_all) & (self.f0['/halo_properties/id'].value != f0_host_id)
position_f0_host = self.positions_f0[mask_f0_host]
position_f0_merging = self.positions_f0[mask_f0_merging]
return position_f1_host, position_f0_host, position_f0_merging
def merging_single_system(self, merger_id):
"""
:param merger_id: id of the parent halo of the merger at the later time. One integer.
Merging goes as follows. Assume escape fraction: f_esc = 0.388, then
* m_star_satellite x f_esc goes to m_host_ICM
* m_star_satellite x (1-f_esc) goes to m_star_host
returns :
parameters of the emerge model of the galaxies undergoing merger at this point.
[ mvir_dot, rvir_dot, dMdt, dmdt_star, dmdt_star_accretion, stellar_mass, star_formation_rate, m_icm ]
"""
position_f1_host, position_f0_host, position_f0_merging = self.get_position_merger_players(merger_id)
mvir_dot = (self.f1['/halo_properties/mvir'].value[position_f1_host]-self.f0['/halo_properties/mvir'].value[position_f0_host]) / (self.f1.attrs['age_yr'] - self.f0.attrs['age_yr'])
rvir_dot = (self.f1['/halo_properties/rvir'].value[position_f1_host]-self.f0['/halo_properties/rvir'].value[position_f0_host]) / (self.f1.attrs['age_yr'] - self.f0.attrs['age_yr'])
c = self.f1['/halo_properties/rvir'].value[position_f1_host] / self.f1['/halo_properties/rs'].value[position_f1_host]
rho_nfw = self.f1['/halo_properties/mvir'].value[position_f1_host] / (self.f1['/halo_properties/rs'].value[position_f1_host]**3. * 4. * n.pi * c * (1+c)**2. * (n.log(1.+c)-c/(1.+c)))
pseudo_evolution_correction = 4.*n.pi*self.f1['/halo_properties/rvir'].value[position_f1_host] *self.f1['/halo_properties/rvir'].value[position_f1_host] * rvir_dot * rho_nfw
dMdt = mvir_dot - pseudo_evolution_correction
# initialize the ICM mass to the previous value
m_icm = self.f0['/emerge_data/m_icm'].value[position_f0_host]
# Direct estimates of stellar mass and SFR
dmdt_star = model.f_b * dMdt * model.epsilon(self.f1['/halo_properties/mvir'].value[position_f1_host], self.f1.attrs['redshift'] * n.ones_like(self.f1['/halo_properties/mvir'].value[position_f1_host]))
# evaluate accretion: 0 in this first step
dmdt_star_accretion = n.zeros_like(dmdt_star)
# evaluate equation (11)
f_lost = f_loss(self.f1.attrs['age_yr']-self.f0.attrs['age_yr'])
# evaluate stellar mass
star_formation_rate = dmdt_star * (1. - f_lost) + dmdt_star_accretion
stellar_mass = star_formation_rate * (self.f1.attrs['age_yr']-self.f0.attrs['age_yr']) + self.f0['/emerge_data/stellar_mass'].value[position_f0_host]
# merging
# m_star_sat x f_esc => m_host_ICM
# m_star_sat x (1-f_esc) => m_star_host
# f_esc = 0.388
#Time_to_future_merger: Time (in Gyr) until the given halo merges into a larger halo. (-1 if no future merger happens)
#Future_merger_MMP_ID: most-massive progenitor of the halo into which the given halo merges. (-1 if the main progenitor of the future merger halo does not exist at the given scale factor.)
stellar_mass += (1.-0.388)*n.sum(self.f0['/emerge_data/stellar_mass'].value[position_f0_merging])
m_icm += 0.388*n.sum(self.f0['/emerge_data/stellar_mass'].value[position_f0_merging])
return mvir_dot, rvir_dot, dMdt, dmdt_star, dmdt_star_accretion, stellar_mass, star_formation_rate, m_icm
def merging_set_of_system(self, merger_ids):
"""
Loops over self.merging_single_system over a list of ids and returns a merged output array
"""
return n.hstack(( n.array([self.merging_single_system(merger_id) for merger_id in merger_ids]) ))
def compute_qtys_merging_halos(self):
"""
computes all quantities for merging halos
"""
pool = Pool(processes=12)
self.out3 = pool.map(self.merging_set_of_system, self.f1['/halo_properties/id'].value[ self.mask_f1_in_a_merging ])
#self.out3 = p.starmap(self.merging_set_of_system, self.f1['/halo_properties/id'].value[ self.mask_f1_in_a_merging ])
"""
if __name__ == '__main__':
import EmergeIterate
iterate = EmergeIterate.EmergeIterate(22, 'MD10')
iterate.open_snapshots()
iterate.map_halos_between_snapshots()
iterate.init_new_quantities()
if len((iterate.mask_f1_new_halos).nonzero()[0]) > 0 :
# computes the new quantitiess
pool = Pool(processes=12)
DATA = n.transpose([iterate.f1['/halo_properties/mvir'].value[iterate.mask_f1_new_halos], rvir=iterate.f1['/halo_properties/rvir'].value[iterate.mask_f1_new_halos], iterate.f1.attrs['redshift']*n.ones_like(iterate.f1['/halo_properties/mvir'].value[iterate.mask_f1_new_halos]), iterate.f1.attrs['age_yr']*n.ones_like(iterate.f1['/halo_properties/mvir'].value[iterate.mask_f1_new_halos]) ])
out = p.starmap(iterate.compute_qtys_new_halos_pk, DATA)
mvir_dot, rvir_dot, dMdt, dmdt_star, star_formation_rate, stellar_mass = out
#, f_b=model.f_b, epsilon = model.epsilon(mvir, redshift * n.ones_like(mvir)), f_lost = f_loss(iterate.f1.attrs['age_yr']))
# updates the initiated array with the results
iterate.mvir_dot[iterate.mask_f1_new_halos] = mvir_dot
iterate.rvir_dot[iterate.mask_f1_new_halos] = rvir_dot
iterate.dMdt[iterate.mask_f1_new_halos] = dMdt
iterate.dmdt_star[iterate.mask_f1_new_halos] = dmdt_star
iterate.star_formation_rate[iterate.mask_f1_new_halos] = star_formation_rate
iterate.stellar_mass[iterate.mask_f1_new_halos] = stellar_mass
#iterate.compute_qtys_new_halos()
if len((iterate.mask_f0_evolving_11_halos).nonzero()[0]) > 0 :
iterate.compute_qtys_evolving_halos()
if len(iterate.mask_f1_in_a_merging.nonzero()[0]) > 0 :
iterate.compute_qtys_merging_halos()
# iterate.write_results()
"""
|
JohanComparat/pyEmerge
|
python/EmergeIterate.py
|
Python
|
unlicense
| 30,464
|
[
"Galaxy"
] |
ebf6aa686327d97aa3ac07a2026fcf0f13d9c5137ff3a3003d34feb58fdfce45
|
#
# @file TestListOf.py
# @brief ListOf unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestListOf.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestListOf(unittest.TestCase):
def test_ListOf_append(self):
m = libsbml.Model(2,4)
m.createCompartment()
loc = m.getListOfCompartments()
self.assert_( loc.size() == 1 )
c = libsbml.Compartment(2,4)
i = loc.append(c)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( loc.size() == 2 )
sp = libsbml.Species(2,4)
i = loc.append(sp)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
self.assert_( loc.size() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
_dummyList = [ sp ]; _dummyList[:] = []; del _dummyList
pass
def test_ListOf_clear(self):
lo = libsbml.ListOf(2,4)
sp = libsbml.Species(2,4)
lo.append(sp)
lo.append(sp)
lo.append(sp)
lo.append(sp)
lo.append(sp)
self.assert_( lo.size() == 5 )
lo.clear(True)
self.assert_( lo.size() == 0 )
lo.append(sp)
lo.append(sp)
lo.append(sp)
lo.append(sp)
lo.appendAndOwn(sp)
self.assert_( lo.size() == 5 )
elem = lo.get(0)
_dummyList = [ elem ]; _dummyList[:] = []; del _dummyList
elem = lo.get(1)
_dummyList = [ elem ]; _dummyList[:] = []; del _dummyList
elem = lo.get(2)
_dummyList = [ elem ]; _dummyList[:] = []; del _dummyList
elem = lo.get(3)
_dummyList = [ elem ]; _dummyList[:] = []; del _dummyList
elem = lo.get(4)
_dummyList = [ elem ]; _dummyList[:] = []; del _dummyList
lo.clear(False)
self.assert_( lo.size() == 0 )
_dummyList = [ lo ]; _dummyList[:] = []; del _dummyList
pass
def test_ListOf_create(self):
lo = libsbml.ListOf(2,4)
self.assert_( lo.getTypeCode() == libsbml.SBML_LIST_OF )
self.assert_( lo.getNotes() == None )
self.assert_( lo.getAnnotation() == None )
self.assert_( lo.getMetaId() == "" )
self.assert_( lo.size() == 0 )
_dummyList = [ lo ]; _dummyList[:] = []; del _dummyList
pass
def test_ListOf_free_NULL(self):
_dummyList = [ None ]; _dummyList[:] = []; del _dummyList
pass
def test_ListOf_remove(self):
lo = libsbml.ListOf(2,4)
sp = libsbml.Species(2,4)
self.assert_( lo.size() == 0 )
lo.append(sp)
lo.append(sp)
lo.append(sp)
lo.append(sp)
lo.append(sp)
self.assert_( lo.size() == 5 )
elem = lo.remove(0)
_dummyList = [ elem ]; _dummyList[:] = []; del _dummyList
elem = lo.remove(0)
_dummyList = [ elem ]; _dummyList[:] = []; del _dummyList
elem = lo.remove(0)
_dummyList = [ elem ]; _dummyList[:] = []; del _dummyList
elem = lo.remove(0)
_dummyList = [ elem ]; _dummyList[:] = []; del _dummyList
elem = lo.remove(0)
_dummyList = [ elem ]; _dummyList[:] = []; del _dummyList
self.assert_( lo.size() == 0 )
lo.append(sp)
lo.append(sp)
lo.append(sp)
lo.append(sp)
lo.appendAndOwn(sp)
self.assert_( lo.size() == 5 )
_dummyList = [ lo ]; _dummyList[:] = []; del _dummyList
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestListOf))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
TheCoSMoCompany/biopredyn
|
Prototype/src/libsbml-5.10.0/src/bindings/python/test/sbml/TestListOf.py
|
Python
|
bsd-3-clause
| 4,604
|
[
"VisIt"
] |
2275fd04f1613877f1c3f0e87e947b7c52088511e1a2b39f50cae4a5815ac2dc
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import mock
import six
from openstack import exceptions
from openstack import format
from openstack import resource2
from openstack import session
from openstack.tests.unit import base
class TestComponent(base.TestCase):
class ExampleComponent(resource2._BaseComponent):
key = "_example"
# Since we're testing ExampleComponent, which is as isolated as we
# can test _BaseComponent due to it's needing to be a data member
# of a class that has an attribute on the parent class named `key`,
# each test has to implement a class with a name that is the same
# as ExampleComponent.key, which should be a dict containing the
# keys and values to test against.
def test_implementations(self):
self.assertEqual("_body", resource2.Body.key)
self.assertEqual("_header", resource2.Header.key)
self.assertEqual("_uri", resource2.URI.key)
def test_creation(self):
sot = resource2._BaseComponent("name", type=int, default=1,
alternate_id=True)
self.assertEqual("name", sot.name)
self.assertEqual(int, sot.type)
self.assertEqual(1, sot.default)
self.assertTrue(sot.alternate_id)
def test_get_no_instance(self):
sot = resource2._BaseComponent("test")
# Test that we short-circuit everything when given no instance.
result = sot.__get__(None, None)
self.assertIsNone(result)
# NOTE: Some tests will use a default=1 setting when testing result
# values that should be None because the default-for-default is also None.
def test_get_name_None(self):
name = "name"
class Parent(object):
_example = {name: None}
instance = Parent()
sot = TestComponent.ExampleComponent(name, default=1)
# Test that we short-circuit any typing of a None value.
result = sot.__get__(instance, None)
self.assertIsNone(result)
def test_get_default(self):
expected_result = 123
class Parent(object):
_example = {}
instance = Parent()
# NOTE: type=dict but the default value is an int. If we didn't
# short-circuit the typing part of __get__ it would fail.
sot = TestComponent.ExampleComponent("name", type=dict,
default=expected_result)
# Test that we directly return any default value.
result = sot.__get__(instance, None)
self.assertEqual(expected_result, result)
def test_get_name_untyped(self):
name = "name"
expected_result = 123
class Parent(object):
_example = {name: expected_result}
instance = Parent()
sot = TestComponent.ExampleComponent("name")
# Test that we return any the value as it is set.
result = sot.__get__(instance, None)
self.assertEqual(expected_result, result)
# The code path for typing after a raw value has been found is the same.
def test_get_name_typed(self):
name = "name"
value = "123"
class Parent(object):
_example = {name: value}
instance = Parent()
sot = TestComponent.ExampleComponent("name", type=int)
# Test that we run the underlying value through type conversion.
result = sot.__get__(instance, None)
self.assertEqual(int(value), result)
def test_get_name_formatter(self):
name = "name"
value = "123"
expected_result = "one hundred twenty three"
class Parent(object):
_example = {name: value}
class FakeFormatter(object):
@classmethod
def deserialize(cls, value):
return expected_result
instance = Parent()
sot = TestComponent.ExampleComponent("name", type=FakeFormatter)
# Mock out issubclass rather than having an actual format.Formatter
# This can't be mocked via decorator, isolate it to wrapping the call.
mock_issubclass = mock.Mock(return_value=True)
module = six.moves.builtins.__name__
with mock.patch("%s.issubclass" % module, mock_issubclass):
result = sot.__get__(instance, None)
self.assertEqual(expected_result, result)
def test_set_name_untyped(self):
name = "name"
expected_value = "123"
class Parent(object):
_example = {}
instance = Parent()
sot = TestComponent.ExampleComponent("name")
# Test that we don't run the value through type conversion.
sot.__set__(instance, expected_value)
self.assertEqual(expected_value, instance._example[name])
def test_set_name_typed(self):
expected_value = "123"
class Parent(object):
_example = {}
instance = Parent()
# The type we give to ExampleComponent has to be an actual type,
# not an instance, so we can't get the niceties of a mock.Mock
# instance that would allow us to call `assert_called_once_with` to
# ensure that we're sending the value through the type.
# Instead, we use this tiny version of a similar thing.
class FakeType(object):
calls = []
def __init__(self, arg):
FakeType.calls.append(arg)
sot = TestComponent.ExampleComponent("name", type=FakeType)
# Test that we run the value through type conversion.
sot.__set__(instance, expected_value)
self.assertEqual([expected_value], FakeType.calls)
def test_set_name_formatter(self):
expected_value = "123"
class Parent(object):
_example = {}
instance = Parent()
# As with test_set_name_typed, create a pseudo-Mock to track what
# gets called on the type.
class FakeFormatter(format.Formatter):
calls = []
@classmethod
def serialize(cls, arg):
FakeFormatter.calls.append(arg)
sot = TestComponent.ExampleComponent("name", type=FakeFormatter)
# Test that we run the value through type conversion.
sot.__set__(instance, expected_value)
self.assertEqual([expected_value], FakeFormatter.calls)
def test_delete_name(self):
name = "name"
expected_value = "123"
class Parent(object):
_example = {name: expected_value}
instance = Parent()
sot = TestComponent.ExampleComponent("name")
sot.__delete__(instance)
self.assertNotIn(name, instance._example)
def test_delete_name_doesnt_exist(self):
name = "name"
expected_value = "123"
class Parent(object):
_example = {"what": expected_value}
instance = Parent()
sot = TestComponent.ExampleComponent(name)
sot.__delete__(instance)
self.assertNotIn(name, instance._example)
class TestComponentManager(base.TestCase):
def test_create_basic(self):
sot = resource2._ComponentManager()
self.assertEqual(dict(), sot.attributes)
self.assertEqual(set(), sot._dirty)
def test_create_unsynced(self):
attrs = {"hey": 1, "hi": 2, "hello": 3}
sync = False
sot = resource2._ComponentManager(attributes=attrs, synchronized=sync)
self.assertEqual(attrs, sot.attributes)
self.assertEqual(set(attrs.keys()), sot._dirty)
def test_create_synced(self):
attrs = {"hey": 1, "hi": 2, "hello": 3}
sync = True
sot = resource2._ComponentManager(attributes=attrs, synchronized=sync)
self.assertEqual(attrs, sot.attributes)
self.assertEqual(set(), sot._dirty)
def test_getitem(self):
key = "key"
value = "value"
attrs = {key: value}
sot = resource2._ComponentManager(attributes=attrs)
self.assertEqual(value, sot.__getitem__(key))
def test_setitem_new(self):
key = "key"
value = "value"
sot = resource2._ComponentManager()
sot.__setitem__(key, value)
self.assertIn(key, sot.attributes)
self.assertIn(key, sot.dirty)
def test_setitem_unchanged(self):
key = "key"
value = "value"
attrs = {key: value}
sot = resource2._ComponentManager(attributes=attrs, synchronized=True)
# This shouldn't end up in the dirty list since we're just re-setting.
sot.__setitem__(key, value)
self.assertEqual(value, sot.attributes[key])
self.assertNotIn(key, sot.dirty)
def test_delitem(self):
key = "key"
value = "value"
attrs = {key: value}
sot = resource2._ComponentManager(attributes=attrs, synchronized=True)
sot.__delitem__(key)
self.assertIsNone(sot.dirty[key])
def test_iter(self):
attrs = {"key": "value"}
sot = resource2._ComponentManager(attributes=attrs)
self.assertItemsEqual(iter(attrs), sot.__iter__())
def test_len(self):
attrs = {"key": "value"}
sot = resource2._ComponentManager(attributes=attrs)
self.assertEqual(len(attrs), sot.__len__())
def test_dirty(self):
key = "key"
key2 = "key2"
value = "value"
attrs = {key: value}
sot = resource2._ComponentManager(attributes=attrs, synchronized=False)
self.assertEqual({key: value}, sot.dirty)
sot.__setitem__(key2, value)
self.assertEqual({key: value, key2: value}, sot.dirty)
def test_clean(self):
key = "key"
value = "value"
attrs = {key: value}
sot = resource2._ComponentManager(attributes=attrs, synchronized=False)
self.assertEqual(attrs, sot.dirty)
sot.clean()
self.assertEqual(dict(), sot.dirty)
class Test_Request(base.TestCase):
def test_create(self):
uri = 1
body = 2
headers = 3
sot = resource2._Request(uri, body, headers)
self.assertEqual(uri, sot.uri)
self.assertEqual(body, sot.body)
self.assertEqual(headers, sot.headers)
class TestQueryParameters(base.TestCase):
def test_create(self):
location = "location"
mapping = {"first_name": "first-name"}
sot = resource2.QueryParameters(location, **mapping)
self.assertEqual({"location": "location",
"first_name": "first-name",
"limit": "limit",
"marker": "marker"},
sot._mapping)
def test_transpose_unmapped(self):
location = "location"
mapping = {"first_name": "first-name"}
sot = resource2.QueryParameters(location, **mapping)
result = sot._transpose({"location": "Brooklyn",
"first_name": "Brian",
"last_name": "Curtin"})
# last_name isn't mapped and shouldn't be included
self.assertEqual({"location": "Brooklyn", "first-name": "Brian"},
result)
def test_transpose_not_in_query(self):
location = "location"
mapping = {"first_name": "first-name"}
sot = resource2.QueryParameters(location, **mapping)
result = sot._transpose({"location": "Brooklyn"})
# first_name not being in the query shouldn't affect results
self.assertEqual({"location": "Brooklyn"},
result)
class TestResource(base.TestCase):
def test_initialize_basic(self):
body = {"body": 1}
header = {"header": 2, "Location": "somewhere"}
uri = {"uri": 3}
everything = dict(itertools.chain(body.items(), header.items(),
uri.items()))
mock_collect = mock.Mock()
mock_collect.return_value = body, header, uri
with mock.patch.object(resource2.Resource,
"_collect_attrs", mock_collect):
sot = resource2.Resource(_synchronized=False, **everything)
mock_collect.assert_called_once_with(everything)
self.assertEqual("somewhere", sot.location)
self.assertIsInstance(sot._body, resource2._ComponentManager)
self.assertEqual(body, sot._body.dirty)
self.assertIsInstance(sot._header, resource2._ComponentManager)
self.assertEqual(header, sot._header.dirty)
self.assertIsInstance(sot._uri, resource2._ComponentManager)
self.assertEqual(uri, sot._uri.dirty)
self.assertFalse(sot.allow_create)
self.assertFalse(sot.allow_get)
self.assertFalse(sot.allow_update)
self.assertFalse(sot.allow_delete)
self.assertFalse(sot.allow_list)
self.assertFalse(sot.allow_head)
self.assertFalse(sot.patch_update)
self.assertFalse(sot.put_create)
def test_repr(self):
a = {"a": 1}
b = {"b": 2}
c = {"c": 3}
class Test(resource2.Resource):
def __init__(self):
self._body = mock.Mock()
self._body.attributes.items = mock.Mock(
return_value=a.items())
self._header = mock.Mock()
self._header.attributes.items = mock.Mock(
return_value=b.items())
self._uri = mock.Mock()
self._uri.attributes.items = mock.Mock(
return_value=c.items())
the_repr = repr(Test())
# Don't test the arguments all together since the dictionary order
# they're rendered in can't be depended on, nor does it matter.
self.assertIn("openstack.tests.unit.test_resource2.Test", the_repr)
self.assertIn("a=1", the_repr)
self.assertIn("b=2", the_repr)
self.assertIn("c=3", the_repr)
def test_equality(self):
class Example(resource2.Resource):
x = resource2.Body("x")
y = resource2.Header("y")
z = resource2.URI("z")
e1 = Example(x=1, y=2, z=3)
e2 = Example(x=1, y=2, z=3)
e3 = Example(x=0, y=0, z=0)
self.assertEqual(e1, e2)
self.assertNotEqual(e1, e3)
def test__update(self):
sot = resource2.Resource()
body = "body"
header = "header"
uri = "uri"
sot._collect_attrs = mock.Mock(return_value=(body, header, uri))
sot._body.update = mock.Mock()
sot._header.update = mock.Mock()
sot._uri.update = mock.Mock()
args = {"arg": 1}
sot._update(**args)
sot._collect_attrs.assert_called_once_with(args)
sot._body.update.assert_called_once_with(body)
sot._header.update.assert_called_once_with(header)
sot._uri.update.assert_called_once_with(uri)
def test__collect_attrs(self):
sot = resource2.Resource()
expected_attrs = ["body", "header", "uri"]
sot._consume_attrs = mock.Mock()
sot._consume_attrs.side_effect = expected_attrs
# It'll get passed an empty dict at the least.
actual_attrs = sot._collect_attrs(dict())
self.assertItemsEqual(expected_attrs, actual_attrs)
def test__consume_attrs(self):
serverside_key1 = "someKey1"
clientside_key1 = "some_key1"
serverside_key2 = "someKey2"
clientside_key2 = "some_key2"
value1 = "value1"
value2 = "value2"
mapping = {clientside_key1: serverside_key1,
clientside_key2: serverside_key2}
other_key = "otherKey"
other_value = "other"
attrs = {clientside_key1: value1,
serverside_key2: value2,
other_key: other_value}
sot = resource2.Resource()
result = sot._consume_attrs(mapping, attrs)
# Make sure that the expected key was consumed and we're only
# left with the other stuff.
self.assertDictEqual({other_key: other_value}, attrs)
# Make sure that after we've popped our relevant client-side
# key off that we are returning it keyed off of its server-side
# name.
self.assertDictEqual({serverside_key1: value1,
serverside_key2: value2}, result)
def test__mapping_defaults(self):
# Check that even on an empty class, we get the expected
# built-in attributes.
self.assertIn("location", resource2.Resource._header_mapping())
self.assertIn("name", resource2.Resource._body_mapping())
self.assertIn("id", resource2.Resource._body_mapping())
def test__mapping_overrides(self):
# Iterating through the MRO used to wipe out overrides of mappings
# found in base classes.
new_name = "MyName"
new_id = "MyID"
class Test(resource2.Resource):
name = resource2.Body(new_name)
id = resource2.Body(new_id)
mapping = Test._body_mapping()
self.assertEqual(new_name, mapping["name"])
self.assertEqual(new_id, mapping["id"])
def test__body_mapping(self):
class Test(resource2.Resource):
x = resource2.Body("x")
y = resource2.Body("y")
z = resource2.Body("z")
self.assertIn("x", Test._body_mapping())
self.assertIn("y", Test._body_mapping())
self.assertIn("z", Test._body_mapping())
def test__header_mapping(self):
class Test(resource2.Resource):
x = resource2.Header("x")
y = resource2.Header("y")
z = resource2.Header("z")
self.assertIn("x", Test._header_mapping())
self.assertIn("y", Test._header_mapping())
self.assertIn("z", Test._header_mapping())
def test__uri_mapping(self):
class Test(resource2.Resource):
x = resource2.URI("x")
y = resource2.URI("y")
z = resource2.URI("z")
self.assertIn("x", Test._uri_mapping())
self.assertIn("y", Test._uri_mapping())
self.assertIn("z", Test._uri_mapping())
def test__getattribute__id_in_body(self):
id = "lol"
sot = resource2.Resource(id=id)
result = getattr(sot, "id")
self.assertEqual(result, id)
def test__getattribute__id_with_alternate(self):
id = "lol"
class Test(resource2.Resource):
blah = resource2.Body("blah", alternate_id=True)
sot = Test(blah=id)
result = getattr(sot, "id")
self.assertEqual(result, id)
def test__getattribute__id_without_alternate(self):
class Test(resource2.Resource):
id = None
sot = Test()
self.assertIsNone(sot.id)
def test__alternate_id_None(self):
self.assertEqual("", resource2.Resource._alternate_id())
def test__alternate_id(self):
class Test(resource2.Resource):
alt = resource2.Body("the_alt", alternate_id=True)
self.assertTrue("the_alt", Test._alternate_id())
value1 = "lol"
sot = Test(alt=value1)
self.assertEqual(sot.alt, value1)
self.assertEqual(sot.id, value1)
value2 = "rofl"
sot = Test(the_alt=value2)
self.assertEqual(sot.alt, value2)
self.assertEqual(sot.id, value2)
def test__get_id_instance(self):
class Test(resource2.Resource):
id = resource2.Body("id")
value = "id"
sot = Test(id=value)
self.assertEqual(value, sot._get_id(sot))
def test__get_id_instance_alternate(self):
class Test(resource2.Resource):
attr = resource2.Body("attr", alternate_id=True)
value = "id"
sot = Test(attr=value)
self.assertEqual(value, sot._get_id(sot))
def test__get_id_value(self):
value = "id"
self.assertEqual(value, resource2.Resource._get_id(value))
def test_to_dict(self):
class Test(resource2.Resource):
foo = resource2.Header('foo')
bar = resource2.Body('bar')
res = Test(id='FAKE_ID')
expected = {
'id': 'FAKE_ID',
'name': None,
'location': None,
'foo': None,
'bar': None
}
self.assertEqual(expected, res.to_dict())
def test_to_dict_no_body(self):
class Test(resource2.Resource):
foo = resource2.Header('foo')
bar = resource2.Body('bar')
res = Test(id='FAKE_ID')
expected = {
'location': None,
'foo': None,
}
self.assertEqual(expected, res.to_dict(body=False))
def test_to_dict_no_header(self):
class Test(resource2.Resource):
foo = resource2.Header('foo')
bar = resource2.Body('bar')
res = Test(id='FAKE_ID')
expected = {
'id': 'FAKE_ID',
'name': None,
'bar': None
}
self.assertEqual(expected, res.to_dict(headers=False))
def test_to_dict_ignore_none(self):
class Test(resource2.Resource):
foo = resource2.Header('foo')
bar = resource2.Body('bar')
res = Test(id='FAKE_ID', bar='BAR')
expected = {
'id': 'FAKE_ID',
'bar': 'BAR',
}
self.assertEqual(expected, res.to_dict(ignore_none=True))
def test_to_dict_with_mro(self):
class Parent(resource2.Resource):
foo = resource2.Header('foo')
bar = resource2.Body('bar')
class Child(Parent):
foo_new = resource2.Header('foo_baz_server')
bar_new = resource2.Body('bar_baz_server')
res = Child(id='FAKE_ID')
expected = {
'foo': None,
'bar': None,
'foo_new': None,
'bar_new': None,
'id': 'FAKE_ID',
'location': None,
'name': None
}
self.assertEqual(expected, res.to_dict())
def test_to_dict_value_error(self):
class Test(resource2.Resource):
foo = resource2.Header('foo')
bar = resource2.Body('bar')
res = Test(id='FAKE_ID')
err = self.assertRaises(ValueError,
res.to_dict, body=False, headers=False)
self.assertEqual('At least one of `body` or `headers` must be True',
six.text_type(err))
def test_to_dict_with_mro_no_override(self):
class Parent(resource2.Resource):
header = resource2.Header('HEADER')
body = resource2.Body('BODY')
class Child(Parent):
# The following two properties are not supposed to be overridden
# by the parent class property values.
header = resource2.Header('ANOTHER_HEADER')
body = resource2.Body('ANOTHER_BODY')
res = Child(id='FAKE_ID', body='BODY_VALUE', header='HEADER_VALUE')
expected = {
'body': 'BODY_VALUE',
'header': 'HEADER_VALUE',
'id': 'FAKE_ID',
'location': None,
'name': None
}
self.assertEqual(expected, res.to_dict())
def test_new(self):
class Test(resource2.Resource):
attr = resource2.Body("attr")
value = "value"
sot = Test.new(attr=value)
self.assertIn("attr", sot._body.dirty)
self.assertEqual(value, sot.attr)
def test_existing(self):
class Test(resource2.Resource):
attr = resource2.Body("attr")
value = "value"
sot = Test.existing(attr=value)
self.assertNotIn("attr", sot._body.dirty)
self.assertEqual(value, sot.attr)
def test__prepare_request_with_id(self):
class Test(resource2.Resource):
base_path = "/something"
body_attr = resource2.Body("x")
header_attr = resource2.Header("y")
the_id = "id"
body_value = "body"
header_value = "header"
sot = Test(id=the_id, body_attr=body_value, header_attr=header_value,
_synchronized=False)
result = sot._prepare_request(requires_id=True)
self.assertEqual("something/id", result.uri)
self.assertEqual({"x": body_value, "id": the_id}, result.body)
self.assertEqual({"y": header_value}, result.headers)
def test__prepare_request_missing_id(self):
sot = resource2.Resource(id=None)
self.assertRaises(exceptions.InvalidRequest,
sot._prepare_request, requires_id=True)
def test__prepare_request_with_key(self):
key = "key"
class Test(resource2.Resource):
base_path = "/something"
resource_key = key
body_attr = resource2.Body("x")
header_attr = resource2.Header("y")
body_value = "body"
header_value = "header"
sot = Test(body_attr=body_value, header_attr=header_value,
_synchronized=False)
result = sot._prepare_request(requires_id=False, prepend_key=True)
self.assertEqual("/something", result.uri)
self.assertEqual({key: {"x": body_value}}, result.body)
self.assertEqual({"y": header_value}, result.headers)
def test__filter_component(self):
client_name = "client_name"
server_name = "serverName"
value = "value"
# Include something in the mapping that we don't receive
# so the branch that looks at existence in the compoment is checked.
mapping = {client_name: server_name, "other": "blah"}
component = {server_name: value, "something": "else"}
sot = resource2.Resource()
result = sot._filter_component(component, mapping)
# The something:else mapping should not make it into here.
self.assertEqual({server_name: value}, result)
def test__translate_response_no_body(self):
class Test(resource2.Resource):
attr = resource2.Header("attr")
response = mock.Mock()
response.headers = dict()
sot = Test()
sot._filter_component = mock.Mock(return_value={"attr": "value"})
sot._translate_response(response, has_body=False)
self.assertEqual(dict(), sot._header.dirty)
self.assertEqual("value", sot.attr)
def test__translate_response_with_body_no_resource_key(self):
class Test(resource2.Resource):
attr = resource2.Body("attr")
body = {"attr": "value"}
response = mock.Mock()
response.headers = dict()
response.json.return_value = body
sot = Test()
sot._filter_component = mock.Mock(side_effect=[body, dict()])
sot._translate_response(response, has_body=True)
self.assertEqual("value", sot.attr)
self.assertEqual(dict(), sot._body.dirty)
self.assertEqual(dict(), sot._header.dirty)
def test__translate_response_with_body_with_resource_key(self):
key = "key"
class Test(resource2.Resource):
resource_key = key
attr = resource2.Body("attr")
body = {"attr": "value"}
response = mock.Mock()
response.headers = dict()
response.json.return_value = {key: body}
sot = Test()
sot._filter_component = mock.Mock(side_effect=[body, dict()])
sot._translate_response(response, has_body=True)
self.assertEqual("value", sot.attr)
self.assertEqual(dict(), sot._body.dirty)
self.assertEqual(dict(), sot._header.dirty)
def test_cant_do_anything(self):
class Test(resource2.Resource):
allow_create = False
allow_get = False
allow_update = False
allow_delete = False
allow_head = False
allow_list = False
sot = Test()
# The first argument to all of these operations is the session,
# but we raise before we get to it so just pass anything in.
self.assertRaises(exceptions.MethodNotSupported, sot.create, "")
self.assertRaises(exceptions.MethodNotSupported, sot.get, "")
self.assertRaises(exceptions.MethodNotSupported, sot.delete, "")
self.assertRaises(exceptions.MethodNotSupported, sot.head, "")
# list is a generator so you need to begin consuming
# it in order to exercise the failure.
the_list = sot.list("")
self.assertRaises(exceptions.MethodNotSupported, next, the_list)
# Update checks the dirty list first before even trying to see
# if the call can be made, so fake a dirty list.
sot._body = mock.Mock()
sot._body.dirty = mock.Mock(return_value={"x": "y"})
self.assertRaises(exceptions.MethodNotSupported, sot.update, "")
class TestResourceActions(base.TestCase):
def setUp(self):
super(TestResourceActions, self).setUp()
self.service_name = "service"
self.base_path = "base_path"
class Test(resource2.Resource):
service = self.service_name
base_path = self.base_path
allow_create = True
allow_get = True
allow_head = True
allow_update = True
allow_delete = True
allow_list = True
self.test_class = Test
self.request = mock.Mock(spec=resource2._Request)
self.request.uri = "uri"
self.request.body = "body"
self.request.headers = "headers"
self.response = mock.Mock()
self.sot = Test(id="id")
self.sot._prepare_request = mock.Mock(return_value=self.request)
self.sot._translate_response = mock.Mock()
self.session = mock.Mock(spec=session.Session)
self.session.create = mock.Mock(return_value=self.response)
self.session.get = mock.Mock(return_value=self.response)
self.session.put = mock.Mock(return_value=self.response)
self.session.patch = mock.Mock(return_value=self.response)
self.session.post = mock.Mock(return_value=self.response)
self.session.delete = mock.Mock(return_value=self.response)
self.session.head = mock.Mock(return_value=self.response)
def _test_create(self, cls, requires_id=False, prepend_key=False):
id = "id" if requires_id else None
sot = cls(id=id)
sot._prepare_request = mock.Mock(return_value=self.request)
sot._translate_response = mock.Mock()
result = sot.create(self.session, prepend_key=prepend_key)
sot._prepare_request.assert_called_once_with(
requires_id=requires_id, prepend_key=prepend_key)
if requires_id:
self.session.put.assert_called_once_with(
self.request.uri,
endpoint_filter=self.service_name,
json=self.request.body, headers=self.request.headers)
else:
self.session.post.assert_called_once_with(
self.request.uri,
endpoint_filter=self.service_name,
json=self.request.body, headers=self.request.headers)
sot._translate_response.assert_called_once_with(self.response)
self.assertEqual(result, sot)
def test_put_create(self):
class Test(resource2.Resource):
service = self.service_name
base_path = self.base_path
allow_create = True
put_create = True
self._test_create(Test, requires_id=True, prepend_key=True)
def test_post_create(self):
class Test(resource2.Resource):
service = self.service_name
base_path = self.base_path
allow_create = True
put_create = False
self._test_create(Test, requires_id=False, prepend_key=True)
def test_get(self):
result = self.sot.get(self.session)
self.sot._prepare_request.assert_called_once_with(requires_id=True)
self.session.get.assert_called_once_with(
self.request.uri, endpoint_filter=self.service_name)
self.sot._translate_response.assert_called_once_with(self.response)
self.assertEqual(result, self.sot)
def test_get_not_requires_id(self):
result = self.sot.get(self.session, False)
self.sot._prepare_request.assert_called_once_with(requires_id=False)
self.session.get.assert_called_once_with(
self.request.uri, endpoint_filter=self.service_name)
self.sot._translate_response.assert_called_once_with(self.response)
self.assertEqual(result, self.sot)
def test_head(self):
result = self.sot.head(self.session)
self.sot._prepare_request.assert_called_once_with()
self.session.head.assert_called_once_with(
self.request.uri,
endpoint_filter=self.service_name,
headers={"Accept": ""})
self.sot._translate_response.assert_called_once_with(self.response)
self.assertEqual(result, self.sot)
def _test_update(self, patch_update=False, prepend_key=True,
has_body=True):
self.sot.patch_update = patch_update
# Need to make sot look dirty so we can attempt an update
self.sot._body = mock.Mock()
self.sot._body.dirty = mock.Mock(return_value={"x": "y"})
self.sot.update(self.session, prepend_key=prepend_key,
has_body=has_body)
self.sot._prepare_request.assert_called_once_with(
prepend_key=prepend_key)
if patch_update:
self.session.patch.assert_called_once_with(
self.request.uri,
endpoint_filter=self.service_name,
json=self.request.body, headers=self.request.headers)
else:
self.session.put.assert_called_once_with(
self.request.uri,
endpoint_filter=self.service_name,
json=self.request.body, headers=self.request.headers)
self.sot._translate_response.assert_called_once_with(
self.response, has_body=has_body)
def test_update_put(self):
self._test_update(patch_update=False, prepend_key=True, has_body=True)
def test_update_patch(self):
self._test_update(patch_update=True, prepend_key=False, has_body=False)
def test_update_not_dirty(self):
self.sot._body = mock.Mock()
self.sot._body.dirty = dict()
self.sot._header = mock.Mock()
self.sot._header.dirty = dict()
self.sot.update(self.session)
self.session.put.assert_not_called()
def test_delete(self):
result = self.sot.delete(self.session)
self.sot._prepare_request.assert_called_once_with()
self.session.delete.assert_called_once_with(
self.request.uri,
endpoint_filter=self.service_name,
headers={"Accept": ""})
self.sot._translate_response.assert_called_once_with(
self.response, has_body=False)
self.assertEqual(result, self.sot)
# NOTE: As list returns a generator, testing it requires consuming
# the generator. Wrap calls to self.sot.list in a `list`
# and then test the results as a list of responses.
def test_list_empty_response(self):
mock_response = mock.Mock()
mock_response.json.return_value = []
self.session.get.return_value = mock_response
result = list(self.sot.list(self.session))
self.session.get.assert_called_once_with(
self.base_path,
endpoint_filter=self.service_name,
headers={"Accept": "application/json"},
params={})
self.assertEqual([], result)
def test_list_one_page_response_paginated(self):
id_value = 1
mock_response = mock.Mock()
mock_response.json.side_effect = [[{"id": id_value}],
[]]
self.session.get.return_value = mock_response
# Ensure that we break out of the loop on a paginated call
# that still only results in one page of data.
results = list(self.sot.list(self.session, paginated=True))
self.assertEqual(1, len(results))
# Look at the `params` argument to each of the get calls that
# were made.
self.session.get.call_args_list[0][1]["params"] = {}
self.session.get.call_args_list[1][1]["params"] = {"marker": id_value}
self.assertEqual(id_value, results[0].id)
self.assertIsInstance(results[0], self.test_class)
def test_list_one_page_response_not_paginated(self):
id_value = 1
mock_response = mock.Mock()
mock_response.json.return_value = [{"id": id_value}]
self.session.get.return_value = mock_response
results = list(self.sot.list(self.session, paginated=False))
self.session.get.assert_called_once_with(
self.base_path,
endpoint_filter=self.service_name,
headers={"Accept": "application/json"},
params={})
self.assertEqual(1, len(results))
self.assertEqual(id_value, results[0].id)
self.assertIsInstance(results[0], self.test_class)
def test_list_one_page_response_resources_key(self):
key = "resources"
class Test(self.test_class):
resources_key = key
id_value = 1
mock_response = mock.Mock()
mock_response.json.return_value = {key: [{"id": id_value}]}
self.session.get.return_value = mock_response
sot = Test()
results = list(sot.list(self.session))
self.session.get.assert_called_once_with(
self.base_path,
endpoint_filter=self.service_name,
headers={"Accept": "application/json"},
params={})
self.assertEqual(1, len(results))
self.assertEqual(id_value, results[0].id)
self.assertIsInstance(results[0], self.test_class)
def test_list_multi_page_response_not_paginated(self):
ids = [1, 2]
mock_response = mock.Mock()
mock_response.json.side_effect = [[{"id": ids[0]}],
[{"id": ids[1]}]]
self.session.get.return_value = mock_response
results = list(self.sot.list(self.session, paginated=False))
self.assertEqual(1, len(results))
self.assertEqual(ids[0], results[0].id)
self.assertIsInstance(results[0], self.test_class)
def test_list_query_params(self):
id = 1
qp = "query param!"
qp_name = "query-param"
uri_param = "uri param!"
mock_response = mock.Mock()
mock_response.json.side_effect = [[{"id": id}],
[]]
self.session.get.return_value = mock_response
class Test(self.test_class):
_query_mapping = resource2.QueryParameters(query_param=qp_name)
base_path = "/%(something)s/blah"
something = resource2.URI("something")
results = list(Test.list(self.session, paginated=True,
query_param=qp, something=uri_param))
self.assertEqual(1, len(results))
# Look at the `params` argument to each of the get calls that
# were made.
self.session.get.call_args_list[0][1]["params"] = {qp_name: qp}
self.assertEqual(self.session.get.call_args_list[0][0][0],
Test.base_path % {"something": uri_param})
def test_list_multi_page_response_paginated(self):
# This tests our ability to stop making calls once
# we've received all of the data. However, this tests
# the case that we always receive full pages of data
# and then the signal that there is no more data - an empty list.
# In this case, we need to make one extra request beyond
# the end of data to ensure we've received it all.
ids = [1, 2]
resp1 = mock.Mock()
resp1.json.return_value = [{"id": ids[0]}]
resp2 = mock.Mock()
resp2.json.return_value = [{"id": ids[1]}]
resp3 = mock.Mock()
resp3.json.return_value = []
self.session.get.side_effect = [resp1, resp2, resp3]
results = self.sot.list(self.session, paginated=True)
result0 = next(results)
self.assertEqual(result0.id, ids[0])
self.session.get.assert_called_with(
self.base_path,
endpoint_filter=self.service_name,
headers={"Accept": "application/json"},
params={})
result1 = next(results)
self.assertEqual(result1.id, ids[1])
self.session.get.assert_called_with(
self.base_path,
endpoint_filter=self.service_name,
headers={"Accept": "application/json"},
params={"limit": 1, "marker": 1})
self.assertRaises(StopIteration, next, results)
self.session.get.assert_called_with(
self.base_path,
endpoint_filter=self.service_name,
headers={"Accept": "application/json"},
params={"limit": 1, "marker": 2})
def test_list_multi_page_early_termination(self):
# This tests our ability to be somewhat smart when evaluating
# the contents of the responses. When we receive a full page
# of data, we can be smart about terminating our responses
# once we see that we've received a page with less data than
# expected, saving one request.
ids = [1, 2, 3]
resp1 = mock.Mock()
resp1.json.return_value = [{"id": ids[0]}, {"id": ids[1]}]
resp2 = mock.Mock()
resp2.json.return_value = [{"id": ids[2]}]
self.session.get.side_effect = [resp1, resp2]
results = self.sot.list(self.session, paginated=True)
# Get the first page's two items
result0 = next(results)
self.assertEqual(result0.id, ids[0])
result1 = next(results)
self.assertEqual(result1.id, ids[1])
self.session.get.assert_called_with(
self.base_path,
endpoint_filter=self.service_name,
headers={"Accept": "application/json"},
params={})
# Second page only has one item
result2 = next(results)
self.assertEqual(result2.id, ids[2])
self.session.get.assert_called_with(
self.base_path,
endpoint_filter=self.service_name,
headers={"Accept": "application/json"},
params={"limit": 2, "marker": 2})
# Ensure we're done after those three items
self.assertRaises(StopIteration, next, results)
# Ensure we only made two calls to get this done
self.assertEqual(2, len(self.session.get.call_args_list))
class TestResourceFind(base.TestCase):
def setUp(self):
super(TestResourceFind, self).setUp()
self.result = 1
class Base(resource2.Resource):
@classmethod
def existing(cls, **kwargs):
raise exceptions.NotFoundException
@classmethod
def list(cls, session):
return None
class OneResult(Base):
@classmethod
def _get_one_match(cls, *args):
return self.result
class NoResults(Base):
@classmethod
def _get_one_match(cls, *args):
return None
self.no_results = NoResults
self.one_result = OneResult
def test_find_short_circuit(self):
value = 1
class Test(resource2.Resource):
@classmethod
def existing(cls, **kwargs):
mock_match = mock.Mock()
mock_match.get.return_value = value
return mock_match
result = Test.find("session", "name")
self.assertEqual(result, value)
def test_no_match_raise(self):
self.assertRaises(exceptions.ResourceNotFound, self.no_results.find,
"session", "name", ignore_missing=False)
def test_no_match_return(self):
self.assertIsNone(
self.no_results.find("session", "name", ignore_missing=True))
def test_find_result(self):
self.assertEqual(self.result, self.one_result.find("session", "name"))
def test_match_empty_results(self):
self.assertIsNone(resource2.Resource._get_one_match("name", []))
def test_no_match_by_name(self):
the_name = "Brian"
match = mock.Mock(spec=resource2.Resource)
match.name = the_name
result = resource2.Resource._get_one_match("Richard", [match])
self.assertIsNone(result, match)
def test_single_match_by_name(self):
the_name = "Brian"
match = mock.Mock(spec=resource2.Resource)
match.name = the_name
result = resource2.Resource._get_one_match(the_name, [match])
self.assertIs(result, match)
def test_single_match_by_id(self):
the_id = "Brian"
match = mock.Mock(spec=resource2.Resource)
match.id = the_id
result = resource2.Resource._get_one_match(the_id, [match])
self.assertIs(result, match)
def test_single_match_by_alternate_id(self):
the_id = "Richard"
class Test(resource2.Resource):
other_id = resource2.Body("other_id", alternate_id=True)
match = Test(other_id=the_id)
result = Test._get_one_match(the_id, [match])
self.assertIs(result, match)
def test_multiple_matches(self):
the_id = "Brian"
match = mock.Mock(spec=resource2.Resource)
match.id = the_id
self.assertRaises(
exceptions.DuplicateResource,
resource2.Resource._get_one_match, the_id, [match, match])
class TestWaitForStatus(base.TestCase):
def test_immediate_status(self):
status = "loling"
resource = mock.Mock()
resource.status = status
result = resource2.wait_for_status("session", resource, status,
"failures", "interval", "wait")
self.assertEqual(result, resource)
@mock.patch("time.sleep", return_value=None)
def test_status_match(self, mock_sleep):
status = "loling"
resource = mock.Mock()
# other gets past the first check, two anothers gets through
# the sleep loop, and the third matches
statuses = ["other", "another", "another", status]
type(resource).status = mock.PropertyMock(side_effect=statuses)
result = resource2.wait_for_status("session", resource, status,
None, 1, 5)
self.assertEqual(result, resource)
@mock.patch("time.sleep", return_value=None)
def test_status_fails(self, mock_sleep):
status = "loling"
failure = "crying"
resource = mock.Mock()
# other gets past the first check, the first failure doesn't
# match the expected, the third matches the failure,
# the fourth is used in creating the exception message
statuses = ["other", failure, failure, failure]
type(resource).status = mock.PropertyMock(side_effect=statuses)
self.assertRaises(exceptions.ResourceFailure,
resource2.wait_for_status,
"session", resource, status, [failure], 1, 5)
@mock.patch("time.sleep", return_value=None)
def test_timeout(self, mock_sleep):
status = "loling"
resource = mock.Mock()
# The first "other" gets past the first check, and then three
# pairs of "other" statuses run through the sleep counter loop,
# after which time should be up. This is because we have a
# one second interval and three second waiting period.
statuses = ["other"] * 7
type(resource).status = mock.PropertyMock(side_effect=statuses)
self.assertRaises(exceptions.ResourceTimeout,
resource2.wait_for_status,
"session", resource, status, None, 1, 3)
def test_no_sleep(self):
resource = mock.Mock()
statuses = ["other"]
type(resource).status = mock.PropertyMock(side_effect=statuses)
self.assertRaises(exceptions.ResourceTimeout,
resource2.wait_for_status,
"session", resource, "status", None, 0, -1)
class TestWaitForDelete(base.TestCase):
@mock.patch("time.sleep", return_value=None)
def test_success(self, mock_sleep):
resource = mock.Mock()
resource.get.side_effect = [None, None, exceptions.NotFoundException]
result = resource2.wait_for_delete("session", resource, 1, 3)
self.assertEqual(result, resource)
@mock.patch("time.sleep", return_value=None)
def test_timeout(self, mock_sleep):
resource = mock.Mock()
resource.get.side_effect = [None, None, None]
self.assertRaises(exceptions.ResourceTimeout,
resource2.wait_for_delete,
"session", resource, 1, 3)
|
briancurtin/python-openstacksdk
|
openstack/tests/unit/test_resource2.py
|
Python
|
apache-2.0
| 49,714
|
[
"Brian"
] |
2e3fa91e444a7ed301e8e0434c95f72470a59987ed6afd46113efe38df130379
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = ['executable', 'shared_library', 'loadable_module']
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError, "Unknown AST node at key path '" + '.'.join(keypath) + \
"': " + repr(node)
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, data,
aux_data, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
# Save the keys so we can return data that changed.
data_keys = set(data)
aux_data_keys = set(aux_data)
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, data,
aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
data_out = {}
for key in data:
if key == 'target_build_files':
continue
if key not in data_keys:
data_out[key] = data[key]
aux_data_out = {}
for key in aux_data:
if key not in aux_data_keys:
aux_data_out[key] = aux_data[key]
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
data_out,
aux_data_out,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The "aux_data" dict that was passed to LoadTargetBuildFileParallel
self.aux_data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, data0, aux_data0, dependencies0) = result
self.data['target_build_files'].add(build_file_path0)
for key in data0:
self.data[key] = data0[key]
for key in aux_data0:
self.aux_data[key] = aux_data0[key]
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, aux_data,
variables, includes, depth, check,
generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
parallel_state.aux_data = aux_data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
data_in = {}
data_in['target_build_files'] = data['target_build_files']
aux_data_in = {}
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
data_in, aux_data_in,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
'(?P<command_string>[-a-zA-Z0-9_.]+)?'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
'(?P<command_string>[-a-zA-Z0-9_.]+)?'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
'(?P<command_string>[-a-zA-Z0-9_.]+)?'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d." %
(contents, p.returncode))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) != 2 and len(condition) != 3:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be length 2 or 3, not ' + str(len(condition)))
[cond_expr, true_dict] = condition[0:2]
false_dict = None
if len(condition) == 3:
false_dict = condition[2]
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError, 'Unknown type ' + value.__class__.__name__ + \
' for ' + key
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError, \
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index
elif type(item) is not int:
raise TypeError, 'Unknown type ' + item.__class__.__name__ + \
' at index ' + index
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependencies.add(dependency.ref)
dependency.DeepDependencies(dependencies)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables and loadable modules are already fully and finally linked.
# Nothing else can be a link dependency of them, there can only be
# dependencies in the sense that a dependent target might run an
# executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError, \
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError, \
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError, \
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')'
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError, \
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError, name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError, name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation]
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError, 'Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
aux_data = {}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, aux_data,
variables, includes, depth, check,
generator_input_info)
else:
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
|
cloudpbl-senrigan/combinator
|
tools/gyp/pylib/gyp/input.py
|
Python
|
mit
| 113,529
|
[
"VisIt"
] |
8f4a86f75bd4dcd7d56e9cdf0a71a3c2cee6cda0f8b6511d5d5f2edc2d61aacf
|
# -*- coding: utf-8 -*-
################################################################################
###
### ReadLAMMPSdatafile
###
################################################################################
import numpy as np
def get_box(filename):
"""Return the box edges and volume from a LAMMPS Data file."""
box = np.zeros((2, 3))
with open(filename, 'r') as f:
while True:
line = f.readline().split()
if 'xlo' in line:
box[:, 0] = [float(line[0]), float(line[1])]
if 'ylo' in line:
box[:, 1] = [float(line[0]), float(line[1])]
if 'zlo' in line:
box[:, 2] = [float(line[0]), float(line[1])]
break
volume = np.prod(box[1, :] - box[0, :])
return box, volume
## ** SHOULD ADD ADDITIONAL OF READ FUNCTIONS
|
lorisercole/thermocepstrum
|
sportran/i_o/read_lammps_datafile.py
|
Python
|
gpl-3.0
| 864
|
[
"LAMMPS"
] |
f52777ae7555d5f00fe89b8327d8e8e8bc5769c1a94fbcc2aebbaaa13b7df265
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkImageAnisotropicDiffusion3D(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkImageAnisotropicDiffusion3D(), 'Processing.',
('vtkImageData',), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkImageAnisotropicDiffusion3D.py
|
Python
|
bsd-3-clause
| 517
|
[
"VTK"
] |
0fecf7e9016f955a8842bfb374e1dc09c212b7bb9cd2e02ebba832711d18f687
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import print_function
import os
import sys
import time
import subprocess
if len(sys.argv) not in [5, 6, 7, 8, 9, 10]:
print("""Usage: %s input_file logfile doperltest top_srcdir doreaptest alt_output_file alt_psi4_exe alt_psi4datadir""" % (sys.argv[0]))
sys.exit(1)
# extract run condition from arguments
python_exec = sys.argv[0]
infile = sys.argv[1]
logfile = sys.argv[2]
psiautotest = sys.argv[3]
top_srcdir = sys.argv[4]
sowreap = sys.argv[5]
if len(sys.argv) >= 7:
outfile = sys.argv[6]
else:
outfile = 'output.dat'
if len(sys.argv) >= 8:
psi = sys.argv[7]
else:
psi = '../../bin/psi4'
if len(sys.argv) >= 9:
psidatadir = sys.argv[8]
else:
psidatadir = os.path.dirname(os.path.realpath(psi)) + '/../share/psi4'
if len(sys.argv) >= 10:
psilibdir = sys.argv[9] + os.path.sep
else:
psilibdir = os.path.abspath('/../')
# open logfile and print test case header
try:
loghandle = open(logfile, 'a')
except IOError as e:
print("""I can't write to %s: %s""" % (logfile, e))
loghandle.write("""\n%s\n%s\n""" % (os.path.dirname(infile).split(os.sep)[-1], time.strftime("%Y-%m-%d %H:%M")))
def backtick(exelist):
"""Executes the command-argument list in *exelist*, directing the
standard output to screen and file logfile and string p4out. Returns
the system status of the call.
"""
try:
retcode = subprocess.Popen(exelist, bufsize=0, stdout=subprocess.PIPE, universal_newlines=True)
except OSError as e:
sys.stderr.write('Command %s execution failed: %s\n' % (exelist, e.strerror))
sys.exit(1)
p4out = ''
while True:
data = retcode.stdout.readline()
if not data:
break
sys.stdout.write(data) # screen
loghandle.write(data) # file
loghandle.flush()
p4out += data # string
while True:
retcode.poll()
exstat = retcode.returncode
if exstat is not None:
return exstat
time.sleep(0.1)
loghandle.close()
# not sure why 2nd while loop needed, as 1st while loop has always
# been adequate for driver interfaces. nevertheless, to collect
# the proper exit code, 2nd while loop very necessary.
# run psi4 and collect testing status from any compare_* in input file
if os.path.isfile(infile):
pyexitcode = backtick([psi, infile, outfile, '-l', psidatadir])
elif os.path.isfile(infile.replace(".dat", ".py")):
infile = infile.replace(".dat", ".py")
os.environ["PYTHONPATH"] = psilibdir
outfile = os.path.dirname(infile) + os.path.sep + outfile
pyexitcode = backtick(["python", infile, " > ", outfile])
else:
raise Exception("\n\nError: Input file %s not found\n" % infile)
if sowreap == 'true':
try:
retcode = subprocess.Popen([sys.executable, '%s/tests/reap.py' %
(top_srcdir), infile, outfile, logfile, psi, psidatadir])
except OSError as e:
print("""Can't find reap script: %s """ % (e))
while True:
retcode.poll()
exstat = retcode.returncode
if exstat is not None:
reapexitcode = exstat
break
time.sleep(0.1)
else:
reapexitcode = None
# additionally invoke autotest script comparing output.dat to output.ref
if psiautotest == 'true':
os.environ['SRCDIR'] = os.path.dirname(infile)
try:
retcode = subprocess.Popen(['perl', '%s/tests/psitest.pl' % (top_srcdir), infile, logfile])
except IOError as e:
print("""Can't find psitest script: %s""" % (e))
while True:
retcode.poll()
exstat = retcode.returncode
if exstat is not None:
plexitcode = exstat
break
time.sleep(0.1)
else:
plexitcode = None
# combine, print, and return (0/1) testing status
exitcode = 0 if (pyexitcode == 0 and (plexitcode is None or plexitcode == 0) and (reapexitcode is None or reapexitcode == 0)) else 1
print('Exit Status: infile (', pyexitcode, '); autotest (', plexitcode, '); sowreap (', reapexitcode, '); overall (', exitcode, ')')
sys.exit(exitcode)
|
kratman/psi4public
|
tests/runtest.py
|
Python
|
gpl-2.0
| 5,050
|
[
"Psi4"
] |
632f4856d6198123141ac97391388b0bbd9178d9c177654af4b98f158bed90a6
|
"""
This module contains deprecation messages and bits of code used elsewhere in the codebase
that is planned to be removed in the next pytest release.
Keeping it in a central location makes it easy to track what is deprecated and should
be removed when the time comes.
All constants defined in this module should be either PytestWarning instances or UnformattedWarning
in case of warnings which need to format their messages.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from _pytest.warning_types import PytestDeprecationWarning
from _pytest.warning_types import RemovedInPytest4Warning
from _pytest.warning_types import UnformattedWarning
YIELD_TESTS = "yield tests were removed in pytest 4.0 - {name} will be ignored"
FIXTURE_FUNCTION_CALL = (
'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\n'
"but are created automatically when test functions request them as parameters.\n"
"See https://docs.pytest.org/en/latest/fixture.html for more information about fixtures, and\n"
"https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly about how to update your code."
)
FIXTURE_NAMED_REQUEST = PytestDeprecationWarning(
"'request' is a reserved name for fixtures and will raise an error in future versions"
)
CFG_PYTEST_SECTION = "[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead."
GETFUNCARGVALUE = RemovedInPytest4Warning(
"getfuncargvalue is deprecated, use getfixturevalue"
)
RAISES_MESSAGE_PARAMETER = PytestDeprecationWarning(
"The 'message' parameter is deprecated.\n"
"(did you mean to use `match='some regex'` to check the exception message?)\n"
"Please comment on https://github.com/pytest-dev/pytest/issues/3974 "
"if you have concerns about removal of this parameter."
)
RESULT_LOG = PytestDeprecationWarning(
"--result-log is deprecated and scheduled for removal in pytest 5.0.\n"
"See https://docs.pytest.org/en/latest/deprecations.html#result-log-result-log for more information."
)
MARK_INFO_ATTRIBUTE = RemovedInPytest4Warning(
"MarkInfo objects are deprecated as they contain merged marks which are hard to deal with correctly.\n"
"Please use node.get_closest_marker(name) or node.iter_markers(name).\n"
"Docs: https://docs.pytest.org/en/latest/mark.html#updating-code"
)
RAISES_EXEC = PytestDeprecationWarning(
"raises(..., 'code(as_a_string)') is deprecated, use the context manager form or use `exec()` directly\n\n"
"See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec"
)
WARNS_EXEC = PytestDeprecationWarning(
"warns(..., 'code(as_a_string)') is deprecated, use the context manager form or use `exec()` directly.\n\n"
"See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec"
)
PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST = (
"Defining 'pytest_plugins' in a non-top-level conftest is no longer supported "
"because it affects the entire directory tree in a non-explicit way.\n"
" {}\n"
"Please move it to a top level conftest file at the rootdir:\n"
" {}\n"
"For more information, visit:\n"
" https://docs.pytest.org/en/latest/deprecations.html#pytest-plugins-in-non-top-level-conftest-files"
)
PYTEST_CONFIG_GLOBAL = PytestDeprecationWarning(
"the `pytest.config` global is deprecated. Please use `request.config` "
"or `pytest_configure` (if you're a pytest plugin) instead."
)
PYTEST_ENSURETEMP = RemovedInPytest4Warning(
"pytest/tmpdir_factory.ensuretemp is deprecated, \n"
"please use the tmp_path fixture or tmp_path_factory.mktemp"
)
PYTEST_LOGWARNING = PytestDeprecationWarning(
"pytest_logwarning is deprecated, no longer being called, and will be removed soon\n"
"please use pytest_warning_captured instead"
)
PYTEST_WARNS_UNKNOWN_KWARGS = UnformattedWarning(
PytestDeprecationWarning,
"pytest.warns() got unexpected keyword arguments: {args!r}.\n"
"This will be an error in future versions.",
)
|
lmregus/Portfolio
|
python/design_patterns/env/lib/python3.7/site-packages/_pytest/deprecated.py
|
Python
|
mit
| 4,095
|
[
"VisIt"
] |
3b97ae5b10ed5f4b28122fc67a2f0c08cada27bd5f325523b76b190a2f89bfaa
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.