text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
Assesment of Generalized Estimating Equations using simulation.
This script checks Gaussian models.
See the generated file "gee_gaussian_simulation_check.txt" for
results.
"""
from statsmodels.compat.python import range, lrange, zip
import scipy
import numpy as np
from itertools import product
from statsmodels.genmod.families import Gaussian
from statsmodels.genmod.generalized_estimating_equations import GEE
from statsmodels.genmod.cov_struct import Autoregressive, Nested
class GEE_simulator(object):
#
# Parameters that must be defined
#
# Number of groups
ngroups = None
# Standard deviation of the pure errors
error_sd = None
# The regression coefficients
params = None
# The parameters defining the dependence structure
dep_params = None
# The true scale parameter
scale = None
#
# Output parameters
#
# Matrix of exogeneous data (rows are cases, columns are
# variables)
exog = None
# Matrix of endogeneous data (len(endog) = exog.shape[0])
endog = None
# Matrix of time information (time.shape[0] = len(endog))
time = None
# Group labels (len(groups) = len(endog))
group = None
# Group sizes are random within this range
group_size_range = [4, 11]
# dparams_est is dparams with scale_inv appended
def print_dparams(self, dparams_est):
raise NotImplementedError
class AR_simulator(GEE_simulator):
# The distance function for determining AR correlations.
distfun = [lambda x, y: np.sqrt(np.sum((x-y)**2)),]
def print_dparams(self, dparams_est):
OUT.write("AR coefficient estimate: %8.4f\n" %
dparams_est[0])
OUT.write("AR coefficient truth: %8.4f\n" %
self.dep_params[0])
OUT.write("Error variance estimate: %8.4f\n" %
dparams_est[1])
OUT.write("Error variance truth: %8.4f\n" %
self.error_sd**2)
OUT.write("\n")
def simulate(self):
endog, exog, group, time = [], [], [], []
for i in range(self.ngroups):
gsize = np.random.randint(self.group_size_range[0],
self.group_size_range[1])
group.append([i,] * gsize)
time1 = np.random.normal(size=(gsize,2))
time.append(time1)
exog1 = np.random.normal(size=(gsize, 5))
exog1[:,0] = 1
exog.append(exog1)
# Pairwise distances within the cluster
distances = scipy.spatial.distance.cdist(time1, time1,
self.distfun[0])
# Pairwise correlations within the cluster
correlations = self.dep_params[0]**distances
correlations_sr = np.linalg.cholesky(correlations)
errors = np.dot(correlations_sr, np.random.normal(size=gsize))
endog1 = np.dot(exog1, self.params) + errors * self.error_sd
endog.append(endog1)
self.exog = np.concatenate(exog, axis=0)
self.endog = np.concatenate(endog)
self.time = np.concatenate(time, axis=0)
self.group = np.concatenate(group)
class Nested_simulator(GEE_simulator):
# Vector containing list of nest sizes (used instead of
# group_size_range).
nest_sizes = None
# Matrix of nest id's (an output parameter)
id_matrix = None
def print_dparams(self, dparams_est):
for j in range(len(self.nest_sizes)):
OUT.write("Nest %d variance estimate: %8.4f\n" % \
(j+1, dparams_est[j]))
OUT.write("Nest %d variance truth: %8.4f\n" % \
(j+1, self.dep_params[j]))
OUT.write("Error variance estimate: %8.4f\n" % \
(dparams_est[-1] - sum(dparams_est[0:-1])))
OUT.write("Error variance truth: %8.4f\n" %
self.error_sd**2)
OUT.write("\n")
def simulate(self):
group_effect_var = self.dep_params[0]
vcomp = self.dep_params[1:]
vcomp.append(0)
endog, exog, group, id_matrix = [], [], [], []
for i in range(self.ngroups):
iterators = [lrange(n) for n in self.nest_sizes]
# The random effects
variances = [np.sqrt(v)*np.random.normal(size=n)
for v,n in zip(vcomp, self.nest_sizes)]
gpe = np.random.normal() * np.sqrt(group_effect_var)
nest_all = []
for j in self.nest_sizes:
nest_all.append(set())
for nest in product(*iterators):
group.append(i)
# The sum of all random effects that apply to this
# unit
ref = gpe + sum([v[j] for v,j in zip(variances, nest)])
exog1 = np.random.normal(size=5)
exog1[0] = 1
exog.append(exog1)
error = ref + self.error_sd * np.random.normal()
endog1 = np.dot(exog1, self.params) + error
endog.append(endog1)
for j in range(len(nest)):
nest_all[j].add(tuple(nest[0:j+1]))
nest1 = [len(x)-1 for x in nest_all]
id_matrix.append(nest1[0:-1])
self.exog = np.array(exog)
self.endog = np.array(endog)
self.group = np.array(group)
self.id_matrix = np.array(id_matrix)
self.time = np.zeros_like(self.endog)
def gen_gendat_ar0(ar):
def gendat_ar0(msg = False):
ars = AR_simulator()
ars.ngroups = 200
ars.params = np.r_[0, -1, 1, 0, 0.5]
ars.error_sd = 2
ars.dep_params = [ar,]
ars.simulate()
return ars, Autoregressive()
return gendat_ar0
def gen_gendat_ar1(ar):
def gendat_ar1():
ars = AR_simulator()
ars.ngroups = 200
ars.params = np.r_[0, -0.8, 1.2, 0, 0.5]
ars.error_sd = 2
ars.dep_params = [ar,]
ars.simulate()
return ars, Autoregressive()
return gendat_ar1
def gendat_nested0():
ns = Nested_simulator()
ns.error_sd = 1.
ns.params = np.r_[0., 1, 1, -1, -1]
ns.ngroups = 50
ns.nest_sizes = [10, 5]
ns.dep_params = [2., 1.]
ns.simulate()
return ns, Nested(ns.id_matrix)
def gendat_nested1():
ns = Nested_simulator()
ns.error_sd = 2.
ns.params = np.r_[0, 1, 1.3, -0.8, -1.2]
ns.ngroups = 50
ns.nest_sizes = [10, 5]
ns.dep_params = [1., 3.]
ns.simulate()
return ns, Nested(ns.id_matrix)
if __name__ == "__main__":
try:
np.set_printoptions(formatter={'all': lambda x: "%8.3f" % x},
suppress=True)
except TypeError:
# older numpy versions do not have formatter option
pass
OUT = open("gee_gaussian_simulation_check.txt", "w")
nrep = 100
gendats = [gen_gendat_ar0(ar) for ar in (0, 0.3, 0.6)]
gendats.extend([gen_gendat_ar1(ar) for ar in (0, 0.3, 0.6)])
gendats.extend([gendat_nested0, gendat_nested1])
lhs = np.array([[0., 1, 1, 0, 0],])
rhs = np.r_[0.,]
# Loop over data generating models
for gendat in gendats:
pvalues = []
params = []
std_errors = []
dep_params = []
for j in range(nrep):
da,va = gendat()
ga = Gaussian()
md = GEE(da.endog, da.exog, da.group, da.time, ga, va)
mdf = md.fit()
scale_inv = 1 / md.estimate_scale()
dep_params.append(np.r_[va.dep_params, scale_inv])
params.append(np.asarray(mdf.params))
std_errors.append(np.asarray(mdf.standard_errors()))
da,va = gendat()
ga = Gaussian()
md = GEE(da.endog, da.exog, da.group, da.time, ga, va,
constraint=(lhs, rhs))
mdf = md.fit()
score = md.score_test_results
pvalue = score["p-value"]
pvalues.append(pvalue)
dparams_mean = np.array(sum(dep_params) / len(dep_params))
OUT.write("Checking dependence parameters:\n")
da.print_dparams(dparams_mean)
params = np.array(params)
eparams = params.mean(0)
sdparams = params.std(0)
std_errors = np.array(std_errors)
std_errors = std_errors.mean(0)
OUT.write("Checking parameter values:\n")
OUT.write("Observed: ")
OUT.write(np.array_str(eparams) + "\n")
OUT.write("Expected: ")
OUT.write(np.array_str(da.params) + "\n")
OUT.write("Absolute difference: ")
OUT.write(np.array_str(eparams - da.params) + "\n")
OUT.write("Relative difference: ")
OUT.write(np.array_str((eparams - da.params) / da.params)
+ "\n")
OUT.write("\n")
OUT.write("Checking standard errors\n")
OUT.write("Observed: ")
OUT.write(np.array_str(sdparams) + "\n")
OUT.write("Expected: ")
OUT.write(np.array_str(std_errors) + "\n")
OUT.write("Absolute difference: ")
OUT.write(np.array_str(sdparams - std_errors) + "\n")
OUT.write("Relative difference: ")
OUT.write(np.array_str((sdparams - std_errors) / std_errors)
+ "\n")
OUT.write("\n")
pvalues.sort()
OUT.write("Checking constrained estimation:\n")
OUT.write("Left hand side:\n")
OUT.write(np.array_str(lhs) + "\n")
OUT.write("Right hand side:\n")
OUT.write(np.array_str(rhs) + "\n")
OUT.write("Observed p-values Expected Null p-values\n")
for q in np.arange(0.1, 0.91, 0.1):
OUT.write("%20.3f %20.3f\n" %
(pvalues[int(q*len(pvalues))], q))
OUT.write("=" * 80 + "\n\n")
OUT.close()
|
hlin117/statsmodels
|
statsmodels/genmod/tests/gee_gaussian_simulation_check.py
|
Python
|
bsd-3-clause
| 9,914
|
[
"Gaussian"
] |
e467c1e7a5229c11c6ffc39a8e71ab079c58cc6fa7a9f9e21d06b18256d3ddc8
|
'''Apport package hook for the ubiquity live CD installer.
Copyright (C) 2009 Canonical Ltd.
Authors: Colin Watson <cjwatson@ubuntu.com>,
Brian Murray <brian@ubuntu.com>'''
import apport.hookutils
import os.path
import re
def add_installation_log(report, ident, name):
if os.path.exists('/var/log/installer/%s' % name):
f = '/var/log/installer/%s' % name
elif os.path.exists('/var/log/%s' % name):
f = '/var/log/%s' % name
else:
return
if os.access(f, os.R_OK):
report[ident] = open(f, 'r').read()
elif os.path.exists(f):
apport.hookutils.attach_root_command_outputs(report,
{ident: "cat '%s'" % f})
def prepare_duplicate_signature(syslog, collect_grub, collect_trace):
collect = ''
for line in syslog.split('\n'):
if collect_grub:
if 'grub-installer:' in line and collect == "":
collect = ' '.join(line.split(' ')[4:]) + '\n'
continue
elif 'grub-installer:' in line and collect != "":
collect += ' '.join(line.split(' ')[4:]) + '\n'
continue
if not collect_trace and collect != '':
return collect
if 'Traceback (most recent call last):' in line and \
collect_grub:
collect += ' '.join(line.split(' ')[5:]) + '\n'
continue
if 'Traceback (most recent call last):' in line and \
not collect_grub:
collect = ' '.join(line.split(' ')[5:]) + '\n'
continue
if len(line.split(' ')[5:]) == 1 and 'Traceback' in collect:
if collect != '':
return collect
if not 'Traceback' in collect:
continue
collect += ' '.join(line.split(' ')[5:]) + '\n'
def add_info(report, ui):
add_installation_log(report, 'UbiquitySyslog', 'syslog')
syslog = report['UbiquitySyslog']
if 'Buffer I/O error on device' in syslog:
if re.search('Attached .* CD-ROM (\w+)', syslog):
cd_drive = re.search('Attached .* CD-ROM (\w+)', syslog).group(1)
cd_error = re.search('Buffer I/O error on device %s' % cd_drive,
syslog)
if cd_error:
ui.information("The system log from your installation contains an error. The specific error commonly occurs when there is an issue with the media from which you were installing. This can happen when your media is dirty or damaged or when you've burned the media at a high speed. Please try cleaning the media and or burning new media at a lower speed. In the event that you continue to encounter these errors it may be an issue with your CD / DVD drive.")
raise StopIteration
if 'SQUASHFS error: Unable to read' in syslog:
ui.information("The system log from your installation contains an error. The specific error commonly occurs when there is an issue with the media from which you were installing. This can happen when your media is dirty or damaged or when you've burned the media at a high speed. Please try cleaning the media and or burning new media at a lower speed. In the event that you continue to encounter these errors it may be an issue with your CD / DVD drive.")
raise StopIteration
if report['ProblemType'] != 'Bug' and not 'Traceback' in report:
collect_grub = False
collect_trace = False
if not 'grub-installer: Installation finished. No error reported' in syslog and 'grub-installer:' in syslog:
collect_grub = True
if 'Traceback' in syslog:
collect_trace = True
if collect_grub or collect_trace:
duplicate_signature = prepare_duplicate_signature(syslog,
collect_grub, collect_trace)
if duplicate_signature:
report['DuplicateSignature'] = duplicate_signature
if collect_grub:
report['SourcePackage'] = 'grub-installer'
match = re.search('ubiquity.*Ubiquity (.*)\n', report['UbiquitySyslog'])
if match:
match = match.group(1)
report.setdefault('Tags', '')
if match:
report['Tags'] += ' ubiquity-%s' % match.split()[0]
add_installation_log(report, 'UbiquityPartman', 'partman')
if os.path.exists('/var/log/installer/debug'):
response = ui.yesno("The debug log file from your installation would help us a lot but includes the password you used for your user when installing Ubuntu. Do you want to include this log file?")
if response is None:
raise StopIteration
if response:
add_installation_log(report, 'UbiquityDebug', 'debug')
add_installation_log(report, 'UbiquityDm', 'dm')
add_installation_log(report, 'Casper', 'casper.log')
add_installation_log(report, 'OemConfigLog', 'oem-config.log')
if 'OemConfigLog' in report:
report['Tags'] += ' oem-config'
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/apport/package-hooks/source_ubiquity.py
|
Python
|
gpl-3.0
| 4,922
|
[
"Brian"
] |
65b9d5cd0eb3019bc58f3b0ff70c7c4d35f8c401ee96b757d511137252bfa582
|
from compliance_checker.base import BaseCheck, TestCtx, Result
from compliance_checker import MemoizedDataset
from compliance_checker.cf.cf_1_7 import CF1_7Check
from netCDF4 import Dataset
from compliance_checker.base import BaseCheck, BaseNCCheck, Result, TestCtx
import requests
from lxml import etree
from shapely.geometry import Polygon
import numpy as np
import re
from compliance_checker.cf.util import reference_attr_variables, string_from_var_type
import itertools
import warnings
from shapely.geometry import (MultiPoint, LineString, MultiLineString, Polygon,
MultiPolygon)
from compliance_checker.cf.util import reference_attr_variables
"""
What's new in CF-1.8
--------------------
2.7. Groups
2.7.1. Scope
2.7.2. Application of attributes
6.1.2. Taxon Names and Identifiers
7.5. Geometries
"""
class CF1_8Check(CF1_7Check):
"""Implementation for CF v1.8. Inherits from CF1_7Check."""
# things that are specific to 1.8
_cc_spec_version = "1.8"
_cc_url = "http://cfconventions.org/Data/cf-conventions/cf-conventions-1.8/cf-conventions.html"
ROOT_GROUP_ONLY_ATTRS = ["Conventions", "external_variables"]
NON_ROOT_GROUP_OPT = ["title", "history"]
def __init__(self, options=None):
super(CF1_8Check, self).__init__(options)
self.section_titles.update({"2.7":
"§2.7 Groups",
"6.1.2":
"§6.1.2 Taxon Names and Identifiers",
"7.5": "§7.5 Geometries"})
def check_groups(self, ds: MemoizedDataset):
"""
2.7.2. Application of attributes
The following attributes are optional for non-root groups. They are allowed in order to
provide additional provenance and description of the subsidiary data. They do not override
attributes from parent groups.
- title
- history
If these attributes are present, they may be applied additively to the parent attributes of
the same name. If a file containing groups is modified, the user or application need only
update these attributes in the root group, rather than traversing all groups and updating
all attributes that are found with the same name. In the case of conflicts, the root group
attribute takes precedence over per-group instances of these attributes.
The following attributes MAY ONLY be used in the root group and SHALL NOT be duplicated or
overridden in child groups:
- Conventions
- external_variables
Furthermore, per-variable attributes MUST be attached to the variables to which they refer.
They MAY NOT be attached to a group, even if all variables within that group use the same
attribute and value.
If attributes are present within groups without being attached to a variable, these
attributes apply to the group where they are defined, and to that group’s descendants, but
not to ancestor or sibling groups. If a group attribute is defined in a parent group, and
one of the child group redefines the same attribute, the definition within the child group
applies for the child and all of its descendants.
"""
results = []
ctx_hi = TestCtx(BaseCheck.HIGH, self.section_titles["2.7"])
ctx_lo = TestCtx(BaseCheck.LOW, self.section_titles["2.7"])
# Make sure `Conventions` & `external_variables` attributes are only present in the
# root group.
for gname in ds.groups:
ginstance = ds.createGroup(
gname
) # returns existing Group; doesn't create a new one
for attr in ginstance.ncattrs():
if attr in CF1_8Check.ROOT_GROUP_ONLY_ATTRS:
ctx_hi.messages.append(
f'§2.7.2 Attribute "{ attr }" MAY ONLY be used in the root group '
"and SHALL NOT be duplicated or overridden in child groups."
)
results.append(ctx_hi.to_result())
elif attr in CF1_8Check.NON_ROOT_GROUP_OPT:
ctx_lo.messages.append(
f"§2.7.2 Note: attribute '{ attr }' found on non-root group '{ gname }'. "
"This is optional for non-root groups. It is allowed in order to provide additional "
"provenance and description of the subsidiary data. It does not override "
"attributes from parent groups."
)
results.append(ctx_lo.to_result())
return results
def check_geometry(self, ds: Dataset):
"""Runs any necessary checks for geometry well-formedness
:param netCDF4.Dataset ds: An open netCDF dataset
:returns list: List of error messages
"""
vars_with_geometry = ds.get_variables_by_attributes(
geometry=lambda g: g is not None)
results = []
unique_geometry_var_names = {var.geometry for var in vars_with_geometry}
if unique_geometry_var_names:
geom_valid = TestCtx(BaseCheck.MEDIUM, self.section_titles["7.5"])
geom_valid.out_of += 1
for geometry_var_name in unique_geometry_var_names:
if geometry_var_name not in ds.variables:
geom_valid.messages.append("Cannot find geometry variable "
f"named {geometry_var_name}")
results.append(geom_valid.to_result())
continue
else:
geometry_var = ds.variables[geometry_var_name]
geometry_type = getattr(geometry_var, "geometry_type")
valid_geometry_types = {"point", "line", "polygon"}
try:
node_coord_var_names = geometry_var.node_coordinates
except AttributeError as e:
geom_valid.messsages.append('Could not find required attribute '
'"node_coordinates" in geometry '
f'variable "{geometry_var_name}"')
results.append(geom_valid.to_result())
if not isinstance(node_coord_var_names, str):
geom_valid.messages.append(
'Attribute "node_coordinates" in geometry '
f'variable "{geometry_var_name}" must be '
'a string')
results.append(geom_valid.to_result())
continue
split_coord_names = node_coord_var_names.strip().split(" ")
node_coord_vars, not_found_node_vars = [], []
for coord_var_name in split_coord_names:
try:
node_coord_vars.append(ds.variables[coord_var_name])
except KeyError:
not_found_node_vars.append(coord_var_name)
# If any variables weren't found, we can't continue
if not_found_node_vars:
geom_valid.messages.append(
"The following referenced node coordinate"
"variables for geometry variable"
f'"{geometry_var_name}" were not found: '
f'{not_found_node_vars}')
results.append(geom_valid.to_result())
continue
return error_msgs
node_count = reference_attr_variables(ds,
getattr(geometry_var, "node_count", None))
# multipart lines and polygons only
part_node_count = reference_attr_variables(ds,
getattr(geometry_var, "part_node_count", None))
# polygons with interior geometry only
interior_ring = reference_attr_variables(ds,
getattr(geometry_var, "interior_ring", None))
if geometry_type == "point":
geometry = PointGeometry(node_coord_vars, node_count)
elif geometry_type == "line":
geometry = LineGeometry(node_coord_vars, node_count,
part_node_count)
elif geometry_type == "polygon":
geometry = PolygonGeometry(node_coord_vars, node_count,
part_node_count,
interior_ring)
else:
geom_valid.messages.append(
f'For geometry variable "{geometry_var_name}'
'the attribute "geometry_type" must exist'
'and have one of the following values:'
'"point", "line", "polygon"')
results.append(geom_valid.to_result())
continue
# check geometry
messages = geometry.check_geometry()
if not messages:
geom_valid.score += 1
else:
geom_valid.messages.extend(messages)
results.append(geom_valid.to_result())
return results
def check_taxa(self, ds: Dataset):
"""
6.1.2. Taxon Names and Identifiers
A taxon is a named level within a biological classification, such as a class, genus and
species. QUANTITIES DEPENDENT ON TAXA HAVE GENERIC STANDARD NAMES CONTAINING THE PHRASE
"organisms_in_taxon", AND THE TAXA ARE IDENTIFIED BY AUXILIARY COORDINATE VARIABLES.
The taxon auxiliary coordinate variables are string-valued. The plain-language name of the
taxon MUST be contained in a variable with standard_name of 'biological_taxon_name'. A Life
Science Identifier (LSID) may be contained in a variable with standard_name of
biological_taxon_lsid. This is a URN with the syntax
"urn:lsid:<Authority>:<Namespace>:<ObjectID>[:<Version>]". This includes the reference
classification in the <Authority> element and these are restricted by the LSID governance.
It is strongly recommended in CF that the authority chosen is World Register of Marine
Species (WoRMS) for oceanographic data and Integrated Taxonomic Information System (ITIS)
for freshwater and terrestrial data. WoRMS LSIDs are built from the WoRMS AphiaID taxon
identifier such as "urn:lsid:marinespecies.org:taxname:104464" for AphiaID 104464. This may
be converted to a URL by adding prefixes such as http://www.lsid.info/. ITIS LSIDs are
built from the ITIS Taxonomic Serial Number (TSN), such as
"urn:lsid:itis.gov:itis_tsn:180543".
The biological_taxon_name auxiliary coordinate variable included for human readability is
MANDATORY. The biological_taxon_lsid auxliary coordinate variable included for software
agent readability is optional, but strongly recommended. If both are present then each
biological_taxon_name coordinate must exactly match the name resolved from the
biological_taxon_lsid coordinate. If LSIDs are available for some taxa in a dataset then
the biological_taxon_lsid auxiliary coordinate variable should be included and missing data
given for those taxa that do not have an identifier.
"""
ret_val = []
# taxa identification variables
taxa_name_variables = ds.get_variables_by_attributes(
standard_name="biological_taxon_name")
taxa_lsid_variables = ds.get_variables_by_attributes(
standard_name="biological_taxon_identifier")
def match_taxa_standard_names(standard_name_string):
"""
Match variables which are standard_names related to taxa, but
are not the taxon identifiers or LSIDs themselves.
"""
return (
standard_name_string is not None
and "taxon" in standard_name_string
and
# exclude the identifiers we just looked at
standard_name_string
not in {"biological_taxon_lsid", "biological_taxon_name"}
and standard_name_string in self._std_names
)
taxa_quantifier_variables = ds.get_variables_by_attributes(
standard_name=match_taxa_standard_names
)
# If there are no matches, there either are no taxa variables
# or the standard names are not appropriate, which will be picked up
# by the standard_name check
if not taxa_quantifier_variables:
return
for taxon_quantifier_variable in taxa_quantifier_variables:
valid_taxa = TestCtx(BaseCheck.HIGH, self.section_titles["6.1.2"])
if not isinstance(
getattr(taxon_quantifier_variable, "coordinates", None), str
):
valid_taxa.add_failure(
f'{taxon_quantifier_variable.name} must have a string valued "coordinates" attribute'
)
continue
coordinate_var_names = taxon_quantifier_variable.coordinates.split(" ")
invalid_coord_vars = set(coordinate_var_names) - ds.variables.keys()
if invalid_coord_vars:
valid_taxa.add_failure(
'The following values for "coordinates" attributes were not found in the dataset\'s variables '
f"{invalid_coord_vars}"
)
if len(coordinate_var_names) > 2:
valid_taxa.add_failure(
"coordinates attribute for taxon data must either reference one or two variable names"
)
continue
coordinate_vars = [
ds.variables[var_name] for var_name in coordinate_var_names
]
coord_var_standard_names = {
var: getattr(var, "standard_name", None) for var in coordinate_vars
}
# if we have no authority, we can't check validity of the name -- assume it's OK
standard_name_set = set(coord_var_standard_names.values())
if set(coord_var_standard_names.keys()) == {"biological_taxon_name"}:
# TODO: Check for at least binomial nomenclature?
continue
# check against WoRMS or ITIS if applicable
elif standard_name_set == {
"biological_taxon_name",
"biological_taxon_lsid",
}:
inverted_dict = {v: k for k, v in coord_var_standard_names.items()}
taxon_lsid_var = inverted_dict["biological_taxon_lsid"]
taxon_name_var = inverted_dict["biological_taxon_name"]
lsid_messages = self.handle_lsid(taxon_lsid_var, taxon_name_var)
valid_taxa.out_of += 1
if lsid_messages:
valid_taxa.messages.extend(lsid_messages)
else:
valid_taxa.score += 1
else:
valid_taxa.add_failure(
f"coordinates attribute for variable {taxon_quantifier_variable} must consist of "
'variables containing standard names of either just "biological_taxon_name", or "biological_taxon_name" and "biological_taxon_identifier"'
)
ret_val.append(valid_taxa.to_result())
return ret_val
def handle_lsid(self, taxon_lsid_variable, taxon_name_variable):
"""
Checks if LSID is well formed and present in the LSID database,
and then attempts to delegate to WoRMS or ITIS, the LSID is applicable.
If the LSID does not check the above authorities, it is not
currently checked for correctness.
"""
messages = []
match_str = (
r"(?:http://(?:www\.)?lsid.info/)?urn:lsid:"
r"(?P<authority>[^:]+):(?P<namespace>[^:]+):"
r"(?P<object_id>\w+)(?::(?P<version>\w+))?"
)
for taxon_lsid, taxon_name in zip(
taxon_lsid_variable[:], taxon_name_variable[:]
):
# TODO: handle case where LSID is not present. This can happen
# if the species is not present in the database desired.
taxon_name_str = string_from_var_type(taxon_name)
lsid_str = string_from_var_type(taxon_lsid)
# if nodata/empty string for LSID, skip validity check
if lsid_str == "":
continue
taxon_match = re.fullmatch(match_str, lsid_str)
if not taxon_match:
messages.append(
"Taxon id must match one of the following forms:\n"
"- urn:lsid:<authority>:<namespace>:<object_id>\n"
"- urn:lsid:<authority>:<namespace>:<object_id>:<version>\n"
"- www.lsid.info/urn:lsid.info:<authority>:<namespace>/<object_id>\n"
"- www.lsid.info/urn:lsid.info:<authority>:<namespace>/<object_id>:<version>\n"
"- lsid.info/urn:lsid.info:<authority>:<namespace>/<object_id>\n"
"- lsid.info/urn:lsid.info:<authority>:<namespace>/<object_id>:<version>\n"
"- http://lsid.info/urn:lsid.info:<authority>:<namespace>/<object_id>\n"
"- http://lsid.info/urn:lsid.info:<authority>:<namespace>/<object_id>:<version>\n"
"- http://www.lsid.info/urn:lsid.info:<authority>:<namespace>/<object_id>\n"
"- http://www.lsid.info/urn:lsid.info:<authority>:<namespace>/<object_id>:<version>"
)
continue
if lsid_str.startswith("urn"):
lsid_url = f"http://www.lsid.info/{lsid_str}"
else:
lsid_url = lsid_str
try:
response = requests.get(lsid_url, timeout=10)
response.raise_for_status()
except requests.exceptions.RequestException as e:
# 400 error code indicates something is malformed on client's
# end
if response.status_code == 400:
tree = etree.HTML(response.text)
problem_text = tree.find("./body/p").text
messages.append(
"http://lsid.info returned an error message "
f"for submitted LSID string '{lsid_str}': "
f"{problem_text}"
)
else:
messages.append(
"Error occurred attempting to check LSID "
f"'{lsid_str}': {str(e)}"
)
continue
# WoRMS -- marine bio data
if (
taxon_match["authority"] == "marinespecies.org"
and taxon_match["namespace"] == "taxname"
):
try:
response = requests.get(
f"http://www.marinespecies.org/rest/AphiaRecordByAphiaID/{taxon_match['object_id']}",
timeout=15,
)
response.raise_for_status()
except requests.exceptions.RequestException as e:
messages.append(
"Aphia ID {taxon_match['object_id'] returned "
"other error: {str(e)}"
)
# record not found in database
if response.status_code == 204:
messages.append(
"Aphia ID {taxon_match['object_id'] "
"not found in WoRMS database"
)
# good case, parse JSON
elif response.status_code == 200:
valid_name = response.json()["valid_name"]
if valid_name != taxon_name_str:
messages.append(
"Supplied taxon name and WoRMS valid name do not match. "
f"Supplied taxon name is '{taxon_name_str}', WoRMS valid name "
f"is '{valid_name}.'"
)
# Misc non-error code. Should not reach here.
else:
messages.append(
f"Aphia ID {taxon_match['object_id']}"
"returned an unhandled HTTP status "
f"code {response.status_code}"
)
continue
# ITIS -- freshwater bio data
elif (
taxon_match["authority"] == "itis.gov"
and taxon_match["namespace"] == "itis_tsn"
):
itis_url = f"https://www.itis.gov/ITISWebService/jsonservice/getFullRecordFromTSN?tsn={taxon_match['object_id']}"
try:
itis_response = requests.get(itis_url, timeout=15)
itis_response.raise_for_status()
except requests.exceptions.RequestException as e:
if itis_response.status_code == 404:
messages.append(
"itis.gov TSN " f"{taxon_match['object_id']} not found."
)
continue
else:
messages.append(
"itis.gov identifier returned other " f"error: {str(e)}"
)
continue
json_contents = itis_response.json()
combined_name = json_contents["scientificName"]["combinedName"]
if taxon_name_str != combined_name:
messages.append(
"Supplied taxon name and ITIS scientific name do not match. "
f"Supplied taxon name is '{taxon_name_str}', ITIS scientific name "
f"for TSN {taxon_match['object_id']} is '{combined_name}.'"
)
else:
warnings.warn(
"Compliance checker only supports checking valid "
"LSID URNs of the form "
"'urn:lsid:marinespecies.org:taxname:<AphiaID>' or "
"'urn:lsid:itis.gov:itis_tsn:<TSN>'. Assuming "
"pass condition"
)
return messages
class GeometryStorage(object):
"""Abstract base class for geometries"""
def __init__(self, coord_vars, node_count):
self.coord_vars = coord_vars
self.node_count = node_count
self.errors = []
# geometry is later parsed after sanity checks are run
self.geometry = None
def _split_mulitpart_geometry(self):
arr_extents_filt = self.part_node_count[self.part_node_count > 0]
splits = np.split(np.vstack(self.coord_vars).T,
arr_extents_filt.cumsum()[:-1])
return splits
class PointGeometry(GeometryStorage):
"""Class for validating Point/MultiPoint geometries"""
def check_geometry(self):
super().check_geometry()
# non-multipoint should have exactly one feature
if self.node_count is None:
expected_node_count = 1
else:
expected_node_count = self.node_count
if all(len(cv.dimensions) != 0 for cv in self.coord_vars):
same_dim_group = itertools.groupby(self.coord_vars,
lambda x: x.dimensions)
same_dim = (next(same_dim_group, True) and
not next(same_dim_group, False))
if not same_dim:
self.errors.append("For a point geometry, coordinate "
"variables must be the same length as "
"node_count defined, or must be "
"length 1 if node_count is not set")
return self.errors
class LineGeometry(GeometryStorage):
"""Class for validating Line/MultiLine geometries"""
def __init__(self, coord_vars, node_count, part_node_count):
super().__init__(coord_vars, node_count)
self.part_node_count = part_node_count
if not np.issubdtype(self.node_count.dtype, np.integer):
raise TypeError("For line geometries, node_count must be an integer")
def check_geometry(self):
geom_errors = []
same_dim_group = itertools.groupby(self.coord_vars,
lambda x: x.dimensions)
same_dim = (next(same_dim_group, True) and
not next(same_dim_group, False))
if not same_dim:
raise IndexError("Coordinate variables must be the same length. "
"If node_count is specified, this value must "
"also sum to the length of the coordinate "
"variables.")
# if a multipart
if self.node_count is not None:
same_length = len(self.coord_vars[0]) == self.node_count[:].sum()
if not same_length:
geom_errors.append("Coordinate variables must be the same "
"length. If node_count is specified, this "
"value must also sum to the length of the "
"coordinate variables.")
if self.part_node_count is not None:
if not np.issubdtype(self.part_node_count.dtype, np.integer):
geom_errors.append("when part_node_count is specified, it must "
"be an array of integers")
same_node_count = len(self.coord_vars[0]) == self.node_count[:].sum()
if not same_node_count:
geom_errors.append("The sum of part_node_count must be equal "
"to the value of node_count")
return geom_errors
class PolygonGeometry(LineGeometry):
"""Class for validating Line/MultiLine geometries"""
# TODO/clarify: Should polygons be simple, i.e. non-self intersecting?
# Presumably
def __init__(self, coord_vars, node_count, part_node_count,
interior_ring):
super().__init__(coord_vars, node_count, part_node_count)
self.part_node_count = part_node_count
self.interior_ring = interior_ring
def check_polygon_orientation(self, transposed_coords, interior=False):
"""
Checks that the polygon orientation is counter-clockwise if an
exterior ring, otherwise clockwise if an interior ring. Orientation
is indicated by the `interior` boolean variable with False for an
exterior ring and True for an interior ring (hole), defaulting to False.
This function operates piecewise on individual interior/exterior
polygons as well as multipart polygons
:param np.array transposed_coords: A 2-by-n array of x and y coordinates
:param bool interior: A boolean defaulting to False which has False
indicating a counter-clockwise or exterior polygon, and True
indicating a clockwise or interior polygon.
:rtype bool:
:returns: True if the polygon follows the proper orientation,
False if it fails the orientation test.
"""
try:
polygon = Polygon(transposed_coords.tolist())
except ValueError:
raise ValueError("Polygon contains too few points to perform orientation test")
ccw = polygon.exterior.is_ccw
return not ccw if interior else ccw
def check_geometry(self):
messages = super().check_geometry()
# If any errors occurred within the preliminary checks, they preclude
# running checks against the geometry here.
if messages:
return messages
if self.part_node_count is not None:
extents = np.concatenate([np.array([0]),
self.part_node_count[:].cumsum()])
if self.interior_ring is not None:
ring_orientation = self.interior_ring[:].astype(bool)
else:
ring_orientation = np.zeros(len(self.part_count), dtype=bool)
current_node_count = self.node_count[:].copy()
node_indexer_len = len(self.part_node_count)
else:
extents = np.concatenate([np.array([0]),
self.node_count[:].cumsum()])
node_indexer_len = len(self.node_count)
ring_orientation = np.zeros(node_indexer_len, dtype=bool)
# TODO: is it necessary to check whether part_node_count "consumes"
# node_count in the polygon, i.e. first (3, 3, 3) will consume
# a node part of 9, follow by next 3 will consume a node part of
# 3 after consuming
for i in range(node_indexer_len):
extent_slice = slice(extents[i], extents[i+1])
poly_sliced = np.vstack([cv[extent_slice] for cv in
self.coord_vars]).T
pass_orientation = (self.check_polygon_orientation(
poly_sliced,
ring_orientation[i]))
if not pass_orientation:
orient_fix = (("exterior", "counterclockwise")
if not ring_orientation[i] else
("interior", "clockwise"))
message = (f"An {orient_fix[0]} polygon referred to by "
f"coordinates ({poly_sliced}) must have coordinates "
f"in {orient_fix[1]} order")
messages.append(message)
return messages
def check_geometry(self, ds: Dataset):
"""Runs any necessary checks for geometry well-formedness
:param netCDF4.Dataset ds: An open netCDF dataset
:returns list: List of error messages
"""
vars_with_geometry = ds.get_variables_by_attributes(
geometry=lambda g: g is not None
)
results = []
unique_geometry_var_names = {var.geometry for var in vars_with_geometry}
if unique_geometry_var_names:
geom_valid = TestCtx(BaseCheck.MEDIUM, self.section_titles["7.5"])
geom_valid.out_of += 1
for geometry_var_name in unique_geometry_var_names:
if geometry_var_name not in ds.variables:
geom_valid.messages.append(
"Cannot find geometry variable " f"named {geometry_var_name}"
)
results.append(geom_valid.to_result())
continue
else:
geometry_var = ds.variables[geometry_var_name]
geometry_type = getattr(geometry_var, "geometry_type")
try:
node_coord_var_names = geometry_var.node_coordinates
except AttributeError:
geom_valid.messages.append(
"Could not find required attribute "
'"node_coordinates" in geometry '
f'variable "{geometry_var_name}"'
)
results.append(geom_valid.to_result())
continue
if not isinstance(node_coord_var_names, str):
geom_valid.messages.append(
'Attribute "node_coordinates" in geometry '
f'variable "{geometry_var_name}" must be '
"a string"
)
results.append(geom_valid.to_result())
continue
split_coord_names = node_coord_var_names.strip().split(" ")
node_coord_vars, not_found_node_vars = [], []
for coord_var_name in split_coord_names:
try:
node_coord_vars.append(ds.variables[coord_var_name])
except KeyError:
not_found_node_vars.append(coord_var_name)
# If any variables weren't found, we can't continue
if not_found_node_vars:
geom_valid.messages.append(
"The following referenced node coordinate"
"variables for geometry variable"
f'"{geometry_var_name}" were not found: '
f"{not_found_node_vars}"
)
results.append(geom_valid.to_result())
continue
node_count = reference_attr_variables(
ds, getattr(geometry_var, "node_count", None)
)
# multipart lines and polygons only
part_node_count = reference_attr_variables(
ds, getattr(geometry_var, "part_node_count", None)
)
# polygons with interior geometry only
interior_ring = reference_attr_variables(
ds, getattr(geometry_var, "interior_ring", None)
)
if geometry_type == "point":
geometry = PointGeometry(node_coord_vars, node_count)
elif geometry_type == "line":
geometry = LineGeometry(node_coord_vars, node_count, part_node_count)
elif geometry_type == "polygon":
geometry = PolygonGeometry(
node_coord_vars, node_count, part_node_count, interior_ring
)
else:
geom_valid.messages.append(
f'For geometry variable "{geometry_var_name}'
'the attribute "geometry_type" must exist'
"and have one of the following values:"
'"point", "line", "polygon"'
)
results.append(geom_valid.to_result())
continue
# check geometry
geometry.check_geometry()
if not geometry.errors: # geom_valid.messages:
geom_valid.score += 1
results.append(geom_valid.to_result())
return results
class GeometryStorage(object):
"""Abstract base class for geometries"""
def __init__(self, coord_vars, node_count):
self.coord_vars = coord_vars
self.node_count = node_count
self.errors = []
# geometry is later parsed after sanity checks are run
self.geometry = None
def check_geometry(self):
invalid_vars = []
for coord_var in self.coord_vars:
if not np.issubdtype(coord_var, np.float):
invalid_vars.append(coord_var.name)
# can't continue if the geometry variables are not the correct size
if invalid_vars:
self.errors.append(
"The following geometry variables "
f"have non-numeric contents: {invalid_vars}"
)
def _split_mulitpart_geometry(self):
arr_extents_filt = self.part_node_count[self.part_node_count > 0]
splits = np.split(np.vstack(self.coord_vars).T, arr_extents_filt.cumsum()[:-1])
return splits
class PointGeometry(GeometryStorage):
"""Class for validating Point/MultiPoint geometries"""
def check_geometry(self):
super().check_geometry()
if all(len(cv.dimensions) != 0 for cv in self.coord_vars):
same_dim_group = itertools.groupby(self.coord_vars, lambda x: x.dimensions)
same_dim = next(same_dim_group, True) and not next(same_dim_group, False)
if not same_dim:
self.errors.append(
"For a point geometry, coordinate "
"variables must be the same length as "
"node_count defined, or must be "
"length 1 if node_count is not set"
)
return self.errors
class LineGeometry(GeometryStorage):
"""Class for validating Line/MultiLine geometries"""
def __init__(self, coord_vars, node_count, part_node_count):
super().__init__(coord_vars, node_count)
self.part_node_count = part_node_count
if not np.issubdtype(self.node_count.dtype, np.integer):
raise TypeError("For line geometries, node_count must be an integer")
def check_geometry(self):
geom_errors = []
same_dim_group = itertools.groupby(self.coord_vars, lambda x: x.dimensions)
same_dim = next(same_dim_group, True) and not next(same_dim_group, False)
if not same_dim:
raise IndexError(
"Coordinate variables must be the same length. "
"If node_count is specified, this value must "
"also sum to the length of the coordinate "
"variables."
)
# if a multipart
if self.node_count is not None:
same_length = len(self.coord_vars[0]) == self.node_count[:].sum()
if not same_length:
geom_errors.append(
"Coordinate variables must be the same "
"length. If node_count is specified, this "
"value must also sum to the length of the "
"coordinate variables."
)
if self.part_node_count is not None:
if not np.issubdtype(self.part_node_count.dtype, np.integer):
geom_errors.append(
"when part_node_count is specified, it must "
"be an array of integers"
)
same_node_count = len(self.coord_vars[0]) == self.node_count[:].sum()
if not same_node_count:
geom_errors.append(
"The sum of part_node_count must be equal "
"to the value of node_count"
)
return geom_errors
class PolygonGeometry(LineGeometry):
"""Class for validating Line/MultiLine geometries"""
# TODO/clarify: Should polygons be simple, i.e. non-self intersecting?
# Presumably
def __init__(self, coord_vars, node_count, part_node_count, interior_ring):
super().__init__(coord_vars, node_count, part_node_count)
self.part_node_count = part_node_count
self.interior_ring = interior_ring
def check_polygon_orientation(self, transposed_coords, interior=False):
"""
Checks that the polygon orientation is counter-clockwise if an
exterior ring, otherwise clockwise if an interior ring. Orientation
is indicated by the `interior` boolean variable with False for an
exterior ring and True for an interior ring (hole), defaulting to False.
This function operates piecewise on individual interior/exterior
polygons as well as multipart polygons
:param np.array transposed_coords: A 2-by-n array of x and y coordinates
:param bool interior: A boolean defaulting to False which has False
indicating a counter-clockwise or exterior polygon, and True
indicating a clockwise or interior polygon.
:rtype bool:
:returns: True if the polygon follows the proper orientation,
False if it fails the orientation test.
"""
try:
polygon = Polygon(transposed_coords.tolist())
except ValueError:
raise ValueError(
"Polygon contains too few points to perform orientation test"
)
ccw = polygon.exterior.is_ccw
return not ccw if interior else ccw
def check_geometry(self):
messages = super().check_geometry()
# If any errors occurred within the preliminary checks, they preclude
# running checks against the geometry here.
if messages:
return messages
if self.part_node_count is not None:
extents = np.concatenate([np.array([0]), self.part_node_count[:].cumsum()])
if self.interior_ring is not None:
ring_orientation = self.interior_ring[:].astype(bool)
else:
ring_orientation = np.zeros(len(self.part_count), dtype=bool)
node_indexer_len = len(self.part_node_count)
else:
extents = np.concatenate([np.array([0]), self.node_count[:].cumsum()])
node_indexer_len = len(self.node_count)
ring_orientation = np.zeros(node_indexer_len, dtype=bool)
# TODO: is it necessary to check whether part_node_count "consumes"
# node_count in the polygon, i.e. first (3, 3, 3) will consume
# a node part of 9, follow by next 3 will consume a node part of
# 3 after consuming
for i in range(node_indexer_len):
extent_slice = slice(extents[i], extents[i + 1])
poly_sliced = np.vstack([cv[extent_slice] for cv in self.coord_vars]).T
pass_orientation = self.check_polygon_orientation(
poly_sliced, ring_orientation[i]
)
if not pass_orientation:
orient_fix = (
("exterior", "counterclockwise")
if not ring_orientation[i]
else ("interior", "clockwise")
)
message = (
f"An {orient_fix[0]} polygon referred to by "
f"coordinates ({poly_sliced}) must have coordinates "
f"in {orient_fix[1]} order"
)
messages.append(message)
return messages
|
ioos/compliance-checker
|
compliance_checker/cf/cf_1_8.py
|
Python
|
apache-2.0
| 41,902
|
[
"NetCDF"
] |
f93c7b806f451845745d5048f1df20856af08fe723e89fca7c1d211049e777e4
|
#!/usr/bin/env python
#
# Copyright (C) 2010-2012 ABINIT Group (Yann Pouillon)
#
# This file is part of the ABINIT software package. For license information,
# please see the COPYING file in the top-level directory of the ABINIT source
# distribution.
#
# FIXME: detect duplicate definitions
from ConfigParser import ConfigParser,NoOptionError
from time import gmtime,strftime
import commands
import os
import re
import sys
class MyConfigParser(ConfigParser):
def optionxform(self,option):
return str(option)
# ---------------------------------------------------------------------------- #
#
# Functions
#
env_ignore = ["DEFS"]
opt_ignore = [
"enable_config_file",
"fcflags_opt_.*",
"group_.*",
"prefix",
"with_config_file"]
val_ignore = [".*-fallback"]
def is_ignored(keyword):
for opt in env_ignore + opt_ignore:
if ( "*" in opt ):
if ( re.match(opt,keyword) ):
return True
elif ( opt == keyword ):
return True
return False
def is_ignored_val(keyword):
for val in val_ignore:
if ( "*" in val ):
if ( re.match(val,keyword) ):
return True
elif ( val == keyword ):
return True
return False
# ---------------------------------------------------------------------------- #
#
# Main program
#
# Check if we are in the top of the ABINIT source tree
if ( not os.path.exists("configure.ac") or
not os.path.exists("src/98_main/abinit.F90") ):
print "%s: You must be in the top of an ABINIT source tree." % my_name
print "%s: Aborting now." % my_name
sys.exit(1)
# Init
re_env = re.compile("^#[A-Z][0-9A-Z_]*=")
re_opt = re.compile("^#[a-z][0-9a-z_]*=")
# Extract environment variables from config file
cnf_env = MyConfigParser()
cnf_env.read("config/specs/environment.conf")
env_config = list()
for env in cnf_env.sections():
if ( cnf_env.get(env,"reset") == "no" ):
if ( not is_ignored(env) ):
env_config.append(env)
env_config.sort()
# Extract options from config file
cnf_opt = MyConfigParser()
cnf_opt.read("config/specs/options.conf")
opt_config = list()
opt_removed = list()
for opt in cnf_opt.sections():
tmp_sta = cnf_opt.get(opt,"status")
if ( tmp_sta == "removed" or tmp_sta == "dropped" ):
opt_removed.append(opt)
elif ( "renamed" in tmp_sta ):
opt_removed.append(tmp_sta.split()[1])
if ( not is_ignored(opt) ):
opt_config.append(opt)
elif ( tmp_sta == "hidden" ):
opt_ignore.append(opt)
else:
if ( not is_ignored(opt) ):
opt_config.append(opt)
opt_config.sort()
opt_removed.sort()
# Extract information from template
env_template = list()
opt_template = list()
for line in file("doc/config/build-config.ac","r").readlines():
if ( re_env.match(line) ):
tmp_env = re.sub("=.*","",line[1:-1])
if ( not is_ignored(tmp_env) ):
if ( not tmp_env in env_template ):
env_template.append(tmp_env)
if ( re_opt.match(line) ):
tmp_opt = re.sub("=.*","",line[1:-1])
if ( not is_ignored(tmp_opt) ):
if ( not tmp_opt in opt_template ):
opt_template.append(tmp_opt)
env_template.sort()
opt_template.sort()
# Check whether non-trivial option values are found in template
tpl_data = file("doc/config/build-config.ac","r").read()
opt_values = dict()
for opt in cnf_opt.sections():
try:
tmp_values = cnf_opt.get(opt,"values").split()
if ( "no" in tmp_values ):
tmp_values.remove("no")
if ( "yes" in tmp_values ):
tmp_values.remove("yes")
except NoOptionError:
tmp_values = list()
for val in tmp_values:
if ( (val[0] != "@") and (not is_ignored_val(val)) ):
if ( not re.search("\\* %s" % (val),tpl_data,re.MULTILINE) ):
if ( not opt in opt_values ):
opt_values[opt] = list()
opt_values[opt].append(val)
opt_valkeys = opt_values.keys()
opt_valkeys.sort()
# Compare environment and options
denv_config = [env for env in env_config if not env in env_template]
denv_template = [env for env in env_template if not env in env_config]
dopt_config = [opt for opt in opt_config if not opt in opt_template]
dopt_values = [opt for opt in opt_valkeys if opt not in dopt_config]
dopt_template = [opt for opt in opt_template if not opt in opt_config + opt_removed]
dopt_removed = [opt for opt in opt_removed if opt in opt_template]
# Report any mismatch
nerr = len(denv_config) + len(denv_template) + len(dopt_config) + \
+ len(dopt_values) + len(dopt_template) + len(dopt_removed)
if ( nerr > 0 ):
sys.stderr.write("%s: reporting environment and option mismatches\n\n" % \
(os.path.basename(sys.argv[0])))
sys.stderr.write("X: D=Documentation / I=Instance / R=Removed / U=Undefined\n\n")
sys.stderr.write("%s %-48s %-24s\n" % \
("X","Option","Outdated file"))
sys.stderr.write("%s %s %s\n" % ("-","-" * 48,"-" * 24))
for env in denv_config:
sys.stderr.write("%s %-48s %-24s\n" % \
("I",env,"build-config.ac"))
for env in denv_template:
sys.stderr.write("%s %-48s %-24s\n" % \
("U",env,"environment.conf"))
for opt in dopt_config:
sys.stderr.write("%s %-48s %-24s\n" % \
("I",opt,"build-config.ac"))
for opt in dopt_values:
for val in opt_values[opt]:
sys.stderr.write("%s %-48s %-24s\n" % \
("D","%s='%s'" % (opt,val),"build-config.ac"))
for opt in dopt_template:
sys.stderr.write("%s %-48s %-24s\n" % \
("U",opt,"options.conf"))
for opt in dopt_removed:
sys.stderr.write("%s %-48s %-24s\n" % \
("R",opt,"build-config.ac"))
sys.stderr.write("\n")
sys.exit(1)
else:
sys.exit(0)
|
qsnake/abinit
|
util/maintainers/check-build-config.py
|
Python
|
gpl-3.0
| 5,556
|
[
"ABINIT"
] |
360b1ea2aae730e4319c25ecdfa621dcd7a8f08baea661d5c65846409923bd72
|
#!/usr/bin/python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Test 'yank prepare'.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import tempfile
from nose.plugins.attrib import attr
from docopt import docopt
from yank.commands.prepare import usage
from yank import version, utils
#=============================================================================================
# UNIT TESTS
#=============================================================================================
def notest_prepare_amber_implicit(verbose=False):
store_directory = tempfile.mkdtemp()
examples_path = utils.get_data_filename("../examples/benzene-toluene-implicit/setup/") # Could only figure out how to install things like yank.egg/examples/, rather than yank.egg/yank/examples/
command = 'yank prepare binding amber --setupdir=%(examples_path)s --store=%(store_directory)s --iterations=1 --restraints=Harmonic --gbsa=OBC2 --temperature=300*kelvin' % vars()
if verbose: command += ' --verbose'
argv = command.split()
argv.append('--ligand=resname TOL') # if included in the command string it is split in two
args = docopt(usage, version=version.version, argv=argv[1:])
from yank.commands import prepare
prepare.dispatch(args)
@attr('slow') # Skip on Travis-CI
def notest_prepare_amber_explicit(verbose=False):
store_directory = tempfile.mkdtemp()
examples_path = utils.get_data_filename("../examples/benzene-toluene-explicit/setup/") # Could only figure out how to install things like yank.egg/examples/, rather than yank.egg/yank/examples/
command = 'yank prepare binding amber --setupdir=%(examples_path)s --store=%(store_directory)s --iterations=1 --nbmethod=CutoffPeriodic --temperature=300*kelvin --pressure=1*atmospheres' % vars()
if verbose: command += ' --verbose'
argv = command.split()
argv.append('--ligand=resname TOL') # if included in the command string it is split in two
args = docopt(usage, version=version.version, argv=argv[1:])
from yank.commands import prepare
prepare.dispatch(args)
@attr('slow') # Skip on Travis-CI
def notest_prepare_gromacs_explicit(verbose=False):
store_directory = tempfile.mkdtemp()
examples_path = utils.get_data_filename("../examples/p-xylene-gromacs-example/setup/")
command = 'yank prepare binding gromacs --setupdir=%(examples_path)s --store=%(store_directory)s --iterations=1 --nbmethod=CutoffPeriodic --temperature=300*kelvin --pressure=1*atmospheres --cutoff=1*nanometer' % vars()
if verbose: command += ' --verbose'
argv = command.split()
argv.append("--ligand=resname 'p-xylene'") # if included in the command string it is split in two
args = docopt(usage, version=version.version, argv=argv[1:])
from yank.commands import prepare
prepare.dispatch(args)
#=============================================================================================
# MAIN
#=============================================================================================
if __name__ == '__main__':
#test_prepare_amber_implicit(verbose=True)
#test_prepare_amber_explicit(verbose=True)
notest_prepare_gromacs_explicit(verbose=True)
|
andrrizzi/yank
|
Yank/tests/test_prepare.py
|
Python
|
mit
| 3,523
|
[
"Amber",
"Gromacs"
] |
f7375bef3c85dd7f0ed271739319da9022e4da56154938a734a1eb2ccdc39eed
|
"""
.. module:: compute_season_stati_score_spatial_distribution.py
:synopsis: This programme is to calculate te various statistical scores
corresponding to each lat-long location,(i.e spatially).
Example:
Let 'TS'(Threat Score) is a statistical score,
we are calculate this spatially.
Written by: Dileepkumar R
JRF- IIT DELHI
Date: 02.09.2011;
Updated By : Arulalan.T
Date : 17.09.2011
Date : 06.10.2011
"""
import os
import sys
import numpy
import cdms2
import ctgfunction
# setting the absolute path of the previous directory
# getting the this py module path by __file__ variable
# pass that __file__ to the os.path.dirname, returns the path of this module
__diagnosisDir__ = os.path.dirname(__file__)
previousDir = os.path.abspath(os.path.join(__diagnosisDir__, '..'))
# adding the previous path to python path
sys.path.append(previousDir)
from diag_setup.varsdict import variables
from diag_setup.globalconfig import models, threshold, processfilesPath, \
obsrainfalls
import diag_setup.netcdf_settings
from datetime import datetime
def genStatisticalScorePath(modelname, modelhour, rainfallPath,
rainfallXmlName=None):
"""
:func:`genStatisticalScorePath`: It should make the existing path of
process files statistical score. Also if that is correct path means, it
should calls the function `genStatisticalScoreSpatialDistribution` to
compute the statistical score in spatially distributed way.
Inputs : modelname and modelhour are the part of the directory hierarchy
structure.
"""
procStatiPath = os.path.join(processfilesPath, modelname, 'StatiScore')
statiYears = os.listdir(procStatiPath)
for year in statiYears:
statiPath = os.path.join(procStatiPath, year, 'Season')
statiSeasons = os.listdir(statiPath)
for season in statiSeasons:
statiSeasonPath = os.path.join(statiPath, season)
statiHr = os.path.join(statiSeasonPath, modelhour[0])
fcstRainfall = [nc for nc in os.listdir(statiHr)
if nc.endswith('fcst_rainfall.nc')]
if fcstRainfall:
# this season hours has '*fcst_rainfall.nc'. So calling fn
# if we passed lt, lon values, then we are shrinking the model
# lat,lon while doing statistical score spatially distribution
genStatisticalScoreSpatialDistribution(modelname, modelhour,
season, year, statiSeasonPath,
rainfallPath, rainfallXmlName),
#lat = (0, 40), lon = (60, 100))
else:
print "%s directory doesn't have the file endswith \
'fcst_rainfall.nc'. So skipping %s season" % (statiHr, season)
# end of if fcstRainfall:
# end of for season in statiSeasons:
# end of for year in statiYears:
# end of def genStatisticalScorePath(modelname, modelhour):
def genStatisticalScoreSpatialDistribution(modelname, modelhour, season,
year, statiSeasonPath, rainfallPath,
rainfallXmlName=None, lat=None, lon=None):
"""
:func: `genStatisticalScoreSpatialDistribution` : It should compute the
statistical scores like " Threat Score, Equitable Threat Score,
Accuracy(Hit Rate), Bias Score, Probability Of Detection, Odds Ratio,
False Alarm Rate, Probability Of False Detection, Kuipers Skill Score,
Log Odd Ratio, Heidke Skill Score, Odd Ratio Skill Score, &
Extreame Dependency Score" in spatially distributed way (i.e compute
scores in each and every lat & lon points) by accessing the
observation and forecast data.
Inputs : modelname, modelhour, season, year are helps to generate the
path. statiSeasonPath is the partial path of process statistical
score season path.
rainfallPath is the path of the observation rainfall.
rainfallXmlName is the name of the xml file name, it is an
optional one. By default it takes 'rainfall_regrided.xml'.
lat, lon takes tuple args. If we passed it, then the model lat,
lon should be shrinked according to the passed lat,lon.
Some times it may helpful to do statistical score in spatially
distributed in particular region among the global lat,lon.
Outputs : It should store the computed statistical scores in spatially
distributed way for all the modelhour(s) as nc files in the
appropriate directory hierarchy structure.
"""
Threshold = cdms2.createAxis(threshold)
Threshold.id = 'threshold'
# get the model and obs variable name from the global variables, which
# has set in the global 'vars.txt' file.
totalvars = variables.get(modelname)
obsVar = totalvars.get('rain').obs_var
fcstVar = totalvars.get('rain').model_var
# generate the observation rainfall xml file path
if rainfallXmlName:
# If user set rainfallXmlName in the global config.txt settings
ObsRainfall = rainfallPath + '/' + rainfallXmlName
else:
# If user didnt set rainfallXmlName in the global config.txt settings
# then it should takes default xml filename.
ObsRainfall = rainfallPath + '/' + 'rainfall_regrided.xml'
obsfile = cdms2.open(ObsRainfall)
# get lataxis, lonaxis of obs
Lat = obsfile[obsVar].getLatitude()
Lon = obsfile[obsVar].getLongitude()
# Calculating lat & lon resolution(we are assuming that our grid is
# uniform resolution)
lat_model_resolution = Lat[:][1] - Lat[:][0]
lon_model_resolution = Lon[:][1] - Lon[:][0]
if lat and lon:
# Assaining new lat & lon in 'newlat'& 'newlon'
newlat = []
newlon = []
# The logic is substacting the our input region lat & lon from model/
# lat lon array, then find the arg. of lest positive number in that list
for ind in [0, 1]:
latdiff = Lat-lat[ind]
myarg_la = numpy.argwhere(latdiff >= 0)[0]
print myarg_la
newlat.append(Lat[myarg_la])
londiff = Lon-lon[ind]
myarg_lo = numpy.argwhere(londiff >= 0)[0]
print myarg_lo
newlon.append(Lon[myarg_lo])
newlat=numpy.unique1d(numpy.array(newlat))
newlon=numpy.unique1d(numpy.array(newlon))
print "latitude shape before slice w.r.t arg lat", Lat.shape
print "longitude shpe before slice w.r.t arg lon", Lon.shape
# slicing the lat,lon with respect to argument lat,lon
Lat = Lat[:]
Lon = Lon[:]
Lat = Lat.compress(Lat >= newlat[0])
Lat = Lat.compress(Lat <= newlat[1])
Lon = Lon.compress(Lon >= newlon[0])
Lon = Lon.compress(Lon <= newlon[1])
Lat = cdms2.createAxis(Lat)
Lat.id = 'latitude'
Lat.designateLatitude()
Lon = cdms2.createAxis(Lon)
Lon.id = 'longitude'
Lon.designateLongitude()
print "latitude shape after slice w.r.t arg lat", Lat.shape
print "longitude shpe after slice w.r.t arg lon", Lon.shape
print "\naccoring to the new shape the statistical score spatially \
distribution takes place\n"
# end of if lat and lon:
print "The model latitude resolution is ", lat_model_resolution
print "The model longitude resolution is ", lon_model_resolution
print "Depence on the lat lon resolution this computational time will be\
increase to find out the statistical score in spatially \
distributed way"
latlen = len(Lat)
lonlen = len(Lon)
thlen = len(threshold)
print "Started to compute statistical spatially distributed at", str(datetime.now())
# get the obs data.
obs = obsfile(obsVar)
for i in range(len(modelhour)):
statihour = os.path.join(statiSeasonPath, modelhour[i])
fcstRainfall = [nc for nc in os.listdir(statihour)
if nc.endswith('fcst_rainfall.nc')]
if fcstRainfall:
fcstRainfall = statihour +'/' + fcstRainfall[0]
else:
print "%s directory doesn't have the file endswith \
'fcst_rainfall.nc'. So skipping %s hour " % (statihour, modelhour[i])
continue
# end of if fcstRainfall:
fcstfile = cdms2.open(fcstRainfall)
stati_ncfile = 'stati_spatial_distribution_score''_%shr_%s_%s_%s.nc' \
% (modelhour[i], season, year, modelname)
print "Calculating statistical score in spatially distributed way \
for %s hour of %s season %s %s model" % (modelhour[i],
season, year, modelname)
# creating dummy numpy array to store the scores
dummy = numpy.zeros((thlen, latlen, lonlen), dtype = numpy.float32)
TS_list = dummy.copy()
ETS_list = dummy.copy()
HR_list = dummy.copy()
BS_list = dummy.copy()
POD_list = dummy.copy()
FAR_list = dummy.copy()
POFD_list = dummy.copy()
KSS_list = dummy.copy()
HSS_list = dummy.copy()
ODR_list = dummy.copy()
LODR_list = dummy.copy()
ORSS_list = dummy.copy()
EDS_list = dummy.copy()
# make free memory
del dummy
# get the fcst data.
fcst = fcstfile(fcstVar)
fcst_lat = fcst.getLatitude()
fcst_lon = fcst.getLongitude()
fcst_time = fcst.getTime()
# convert fcst data, by replacing -ve into 0 values if
# present. We are replacing all negative value to 0, by
# giving the condition as 'fcst>0' for replacing
# negative value by '0'----Why?---->{We are givig all
# value as 0 if it is 'false', if we give condition as
# 'fcst<0' then all values which are 'false' replaced
# by '0' thats why we givig the condition as
# 'fcst>0'}
fcst = numpy.ma.where(fcst > 0, fcst, 0)
fcst = cdms2.createVariable(fcst)
fcst.setAxisList([fcst_time, fcst_lat, fcst_lon])
print "%s modelhour spatially statistical started at %s\n" % (modelhour[i], str(datetime.now()))
for j in range(latlen):
for k in range(lonlen):
# # get the obs data.
# obsData = obsfile(obsVar, latitude = Lat[j],
# longitude = Lon[k], squeeze = 1)
# obs = numpy.array(obsData)
# # make free memory
# del obsData
# # get the fcst data.
# fcstData = fcstfile(fcstVar, latitude = Lat[j],
# longitude = Lon[k], squeeze = 1)
# fcst = numpy.array(fcstData)
# # make free memory
# del fcstData
OBS = obs(latitude = Lat[j], longitude = Lon[k], squeeze = 1)
OBS = numpy.array(OBS)
FCST = fcst(latitude = Lat[j], longitude = Lon[k], squeeze = 1)
FCST = numpy.array(FCST)
for ths in range(thlen):
th = threshold[ths]
#----CONTIGENCY TABLE----#
ctgTable = ctgfunction.contingency_table_2x2(OBS, FCST, th)
#----BIAS SCORE----#
BS = ctgfunction.bias_score(ctg_table = ctgTable)
BS_list[ths, j, k] = BS
#-----ODDS RATIO------#
ODR = ctgfunction.odr(ctg_table = ctgTable)
ODR_list[ths, j, k] = ODR
#-----LOG ODD RATIO----#
LODR = ctgfunction.logodr(ctg_table = ctgTable)
LODR_list[ths, j, k] = LODR
#-----EXTREAME DEPENDENCY SCORE----#
EDS = ctgfunction.eds(ctg_table = ctgTable)
EDS_list[ths, j, k] = EDS
#----THREAT SCORE------#
threat_score = ctgfunction.ts(ctg_table = ctgTable)
TS_list[ths, j, k] = threat_score
#----EQUITABLE THREAT SCORE------#
ETS = ctgfunction.ets(ctg_table = ctgTable)
ETS_list[ths, j, k] = ETS
#----ACCURACY(HIT RATE)------#
Accuracy = ctgfunction.accuracy(ctg_table = ctgTable)
HR_list[ths, j, k] = Accuracy
#-----PROBABILITY OF DETECTION------#
POD = ctgfunction.pod(ctg_table = ctgTable)
POD_list[ths, j, k] = POD
#-----FALSE ALARM RATE--------#
FAR = ctgfunction.far(ctg_table = ctgTable)
FAR_list[ths, j, k] = FAR
#-----PROBABILITY OF FALSE DETECTION----#
POFD = ctgfunction.pofd(ctg_table = ctgTable)
POFD_list[ths, j, k] = POFD
#-----KUIPERS SKILL SCORE----#
KSS = ctgfunction.kss(ctg_table = ctgTable)
KSS_list[ths, j, k] = KSS
#-----HEIDKE SKILL SCORE-----#
HSS = ctgfunction.hss(ctg_table = ctgTable)
HSS_list[ths, j, k] = HSS
#-----ODD RATIO SKILL SCORE-----#
ORSS = ctgfunction.orss(ctg_table = ctgTable)
ORSS_list[ths, j, k] = ORSS
# end of for ths in xrange(thlen):
# make free memory
del OBS, FCST, th
# end of for k in range(lonlen):
# end of for j in range(latlen):
# make memory free
del fcst
print "%s modelhour spatially statistical ended at %s \n" % (modelhour[i], str(datetime.now()))
print "Storing the computed scores variables into nc file\n"
# open the nc file to write region wise statistical score
F_nc = cdms2.open(statihour +'/' + stati_ncfile, 'w')
# Threat Score Variable
TS = cdms2.createVariable(TS_list)
TS.id = 'ts'
TS.long_name = 'Threat Score'
TS.setAxisList([Threshold, Lat, Lon])
# write this variables into nc file
F_nc.write(TS)
# make free memory
del TS_list, TS
# Equitable Threat Score Variable
ETS = cdms2.createVariable(ETS_list)
ETS.id = 'ets'
ETS.long_name = 'Equitable Threat Score'
ETS.setAxisList([Threshold, Lat, Lon])
# write this variables into nc file
F_nc.write(ETS)
# make free memory
del ETS_list, ETS
# Hit Rate Variable
HR = cdms2.createVariable(HR_list)
HR.id = 'hr'
HR.long_name = 'Hit Rate'
HR.setAxisList([Threshold, Lat, Lon])
# write this variables into nc file
F_nc.write(HR)
# make free memory
del HR_list, HR
# Bias Score Variable
BS = cdms2.createVariable(BS_list)
BS.id = 'bs'
BS.long_name = 'Bias Score'
BS.setAxisList([Threshold, Lat, Lon])
# write this variables into nc file
F_nc.write(BS)
# make free memory
del BS_list, BS
# Probability of Detectio Variable
POD = cdms2.createVariable(POD_list)
POD.id = 'pod'
POD.long_name = 'Probability Of Detection'
POD.setAxisList([Threshold, Lat, Lon])
# write this variables into nc file
F_nc.write(POD)
# make free memory
del POD_list, POD
# False Alarm Rate Variable
FAR = cdms2.createVariable(FAR_list)
FAR.id = 'far'
FAR.long_name = 'False Alarm Rate'
FAR.setAxisList([Threshold, Lat, Lon])
# write this variables into nc file
F_nc.write(FAR)
# make free memory
del FAR_list, FAR
# Probability of False Detection Variable
POFD = cdms2.createVariable(POFD_list)
POFD.id = 'pofd'
POFD.long_name = 'Probability of False Detection'
POFD.setAxisList([Threshold, Lat, Lon])
# write this variables into nc file
F_nc.write(POFD)
# make free memory
del POFD_list, POFD
# Kuipers Skill Score Variable
KSS = cdms2.createVariable(KSS_list)
KSS.id = 'kss'
KSS.long_name = 'Kuipers Skill Score'
KSS.setAxisList([Threshold, Lat, Lon])
# write this variables into nc file
F_nc.write(KSS)
# make free memory
del KSS_list, KSS
# Heidke Skill Score Variable
HSS = cdms2.createVariable(HSS_list)
HSS.id = 'hss'
HSS.long_name = 'Heidke Skill Score'
HSS.setAxisList([Threshold, Lat, Lon])
# write this variables into nc file
F_nc.write(HSS)
# make free memory
del HSS_list, HSS
# Odds Ratio Variable
ODR = cdms2.createVariable(ODR_list)
ODR.id = 'odr'
ODR.long_name = 'Odds Ratio'
ODR.setAxisList([Threshold, Lat, Lon])
# write this variables into nc file
F_nc.write(ODR)
# make free memory
del ODR_list, ODR
# Log Odd Ratio Variable
LODR = cdms2.createVariable(LODR_list)
LODR.id = 'lodr'
LODR.long_name = 'Log Odd Ratio'
LODR.setAxisList([Threshold, Lat, Lon])
# write this variables into nc file
F_nc.write(LODR)
# make free memory
del LODR_list, LODR
# Odds Ratio Skill Score Variable
ORSS = cdms2.createVariable(ORSS_list)
ORSS.id = 'orss'
ORSS.long_name = 'Odds Ratio Skill Score'
ORSS.setAxisList([Threshold, Lat, Lon])
# write this variables into nc file
F_nc.write(ORSS)
# make free memory
del ORSS_list, ORSS
# Extreame Dependency Score Variable
EDS = cdms2.createVariable(EDS_list)
EDS.id = 'eds'
EDS.long_name = 'Extreame Dependency Score'
EDS.setAxisList([Threshold, Lat, Lon])
# write this variables into nc file
F_nc.write(EDS)
# make free memory
del EDS_list, EDS
# close nc file
F_nc.close()
print "Wrinting spatially distributed statistical score in %s/%s" % \
(statihour, stati_ncfile)
# end of for i in range(len(modelhour)):
# make memory free
del obs
# end of def genStatisticalScoreSpatialDistribution(...):
if __name__ == '__main__':
if len(models) == len(obsrainfalls) == 1:
print "Obtained one model and one obsrainfall "
elif len(models) == len(obsrainfalls):
print "Obtained %d models and obsrainfalls" % len(models)
else:
print "Obtained %d models and %d obsrainfalls" % (len(models), len(obsrainfalls))
for model in models:
for obsrainfall in obsrainfalls:
if model.count == obsrainfall.count:
if obsrainfall.regrid == 'yes':
# generate regridded obsrainfall directory w.r.t
# obsrainfall name in the
# processfilesPath, modelname, Regrid, ObsRain directory.
obsrainPath = os.path.join(processfilesPath,
model.name, 'Regrid',
'ObsRain', obsrainfall.name)
elif obsrainfall.regrid == 'no':
# user passed 'no' option. It means the obsrainfall.path
# obsrainfall data is w.r.t to model fcst data.
obsrainPath = obsrainfall.path
else:
pass
# calling the genStatisticalScoreDirs function to do process
genStatisticalScorePath(model.name, model.hour,
obsrainPath, obsrainfall.xml)
else:
pass
# obsrainfall configuration and model data configuration are not equal in the text file
# handle this case, in diff manner. The same loop should works.
# But need to check all the cases.
print "Done! Creation of Statistical scores spatially distribution netCdf Files"
# end of if __name__ == '__main__':
|
arulalant/mmDiagnosis
|
diagnosis1/diagnosis/compute_season_stati_score_spatial_distribution.py
|
Python
|
gpl-3.0
| 20,517
|
[
"NetCDF"
] |
2d90bc4a8bb21d6a09208e804069b5f88086df591d5581ade020cedb7ae17896
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grd_reader package'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import unittest
import six
from six import StringIO
from grit import exception
from grit import grd_reader
from grit import util
from grit.node import empty
from grit.node import message
class GrdReaderUnittest(unittest.TestCase):
def testParsingAndXmlOutput(self):
input = u'''<?xml version="1.0" encoding="UTF-8"?>
<grit base_dir="." current_release="3" latest_public_release="2" source_lang_id="en-US">
<release seq="3">
<includes>
<include file="images/logo.gif" name="ID_LOGO" type="gif" />
</includes>
<messages>
<if expr="True">
<message desc="Printed to greet the currently logged in user" name="IDS_GREETING">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
</if>
</messages>
<structures>
<structure file="rc_files/dialogs.rc" name="IDD_NARROW_DIALOG" type="dialog">
<skeleton expr="lang == 'fr-FR'" file="bla.rc" variant_of_revision="3" />
</structure>
<structure file="rc_files/version.rc" name="VS_VERSION_INFO" type="version" />
</structures>
</release>
<translations>
<file lang="nl" path="nl_translations.xtb" />
</translations>
<outputs>
<output filename="resource.h" type="rc_header" />
<output filename="resource.rc" lang="en-US" type="rc_all" />
</outputs>
</grit>'''
pseudo_file = StringIO(input)
tree = grd_reader.Parse(pseudo_file, '.')
output = six.text_type(tree)
expected_output = input.replace(u' base_dir="."', u'')
self.assertEqual(expected_output, output)
self.failUnless(tree.GetNodeById('IDS_GREETING'))
def testStopAfter(self):
input = u'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<outputs>
<output filename="resource.h" type="rc_header" />
<output filename="resource.rc" lang="en-US" type="rc_all" />
</outputs>
<release seq="3">
<includes>
<include type="gif" name="ID_LOGO" file="images/logo.gif"/>
</includes>
</release>
</grit>'''
pseudo_file = StringIO(input)
tree = grd_reader.Parse(pseudo_file, '.', stop_after='outputs')
# only an <outputs> child
self.failUnless(len(tree.children) == 1)
self.failUnless(tree.children[0].name == 'outputs')
def testLongLinesWithComments(self):
input = u'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
This is a very long line with no linebreaks yes yes it stretches on <!--
-->and on <!--
-->and on!
</message>
</messages>
</release>
</grit>'''
pseudo_file = StringIO(input)
tree = grd_reader.Parse(pseudo_file, '.')
greeting = tree.GetNodeById('IDS_GREETING')
self.failUnless(greeting.GetCliques()[0].GetMessage().GetRealContent() ==
'This is a very long line with no linebreaks yes yes it '
'stretches on and on and on!')
def doTestAssignFirstIds(self, first_ids_path):
input = u'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3"
base_dir="." first_ids_file="%s">
<release seq="3">
<messages>
<message name="IDS_TEST" desc="test">
test
</message>
</messages>
</release>
</grit>''' % first_ids_path
pseudo_file = StringIO(input)
grit_root_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'..')
fake_input_path = os.path.join(
grit_root_dir, "grit/testdata/chrome/app/generated_resources.grd")
root = grd_reader.Parse(pseudo_file, os.path.split(fake_input_path)[0])
root.AssignFirstIds(fake_input_path, {})
messages_node = root.children[0].children[0]
self.failUnless(isinstance(messages_node, empty.MessagesNode))
self.failUnless(messages_node.attrs["first_id"] !=
empty.MessagesNode().DefaultAttributes()["first_id"])
def testAssignFirstIds(self):
self.doTestAssignFirstIds("../../tools/grit/resource_ids")
def testAssignFirstIdsUseGritDir(self):
self.doTestAssignFirstIds("GRIT_DIR/grit/testdata/tools/grit/resource_ids")
def testAssignFirstIdsMultipleMessages(self):
"""If there are multiple messages sections, the resource_ids file
needs to list multiple first_id values."""
input = u'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3"
base_dir="." first_ids_file="resource_ids">
<release seq="3">
<messages>
<message name="IDS_TEST" desc="test">
test
</message>
</messages>
<messages>
<message name="IDS_TEST2" desc="test">
test2
</message>
</messages>
</release>
</grit>'''
pseudo_file = StringIO(input)
grit_root_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'..')
fake_input_path = os.path.join(grit_root_dir, "grit/testdata/test.grd")
root = grd_reader.Parse(pseudo_file, os.path.split(fake_input_path)[0])
root.AssignFirstIds(fake_input_path, {})
messages_node = root.children[0].children[0]
self.assertTrue(isinstance(messages_node, empty.MessagesNode))
self.assertEqual('100', messages_node.attrs["first_id"])
messages_node = root.children[0].children[1]
self.assertTrue(isinstance(messages_node, empty.MessagesNode))
self.assertEqual('10000', messages_node.attrs["first_id"])
def testUseNameForIdAndPpIfdef(self):
input = u'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<if expr="pp_ifdef('hello')">
<message name="IDS_HELLO" use_name_for_id="true">
Hello!
</message>
</if>
</messages>
</release>
</grit>'''
pseudo_file = StringIO(input)
root = grd_reader.Parse(pseudo_file, '.', defines={'hello': '1'})
# Check if the ID is set to the name. In the past, there was a bug
# that caused the ID to be a generated number.
hello = root.GetNodeById('IDS_HELLO')
self.failUnless(hello.GetCliques()[0].GetId() == 'IDS_HELLO')
def testUseNameForIdWithIfElse(self):
input = u'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<if expr="pp_ifdef('hello')">
<then>
<message name="IDS_HELLO" use_name_for_id="true">
Hello!
</message>
</then>
<else>
<message name="IDS_HELLO" use_name_for_id="true">
Yellow!
</message>
</else>
</if>
</messages>
</release>
</grit>'''
pseudo_file = StringIO(input)
root = grd_reader.Parse(pseudo_file, '.', defines={'hello': '1'})
# Check if the ID is set to the name. In the past, there was a bug
# that caused the ID to be a generated number.
hello = root.GetNodeById('IDS_HELLO')
self.failUnless(hello.GetCliques()[0].GetId() == 'IDS_HELLO')
def testPartInclusionAndCorrectSource(self):
arbitrary_path_grd = u'''\
<grit-part>
<message name="IDS_TEST5" desc="test5">test5</message>
</grit-part>'''
tmp_dir = util.TempDir({'arbitrary_path.grp': arbitrary_path_grd})
arbitrary_path_grd_file = tmp_dir.GetPath('arbitrary_path.grp')
top_grd = u'''\
<grit latest_public_release="2" current_release="3">
<release seq="3">
<messages>
<message name="IDS_TEST" desc="test">
test
</message>
<part file="sub.grp" />
<part file="%s" />
</messages>
</release>
</grit>''' % arbitrary_path_grd_file
sub_grd = u'''\
<grit-part>
<message name="IDS_TEST2" desc="test2">test2</message>
<part file="subsub.grp" />
<message name="IDS_TEST3" desc="test3">test3</message>
</grit-part>'''
subsub_grd = u'''\
<grit-part>
<message name="IDS_TEST4" desc="test4">test4</message>
</grit-part>'''
expected_output = u'''\
<grit current_release="3" latest_public_release="2">
<release seq="3">
<messages>
<message desc="test" name="IDS_TEST">
test
</message>
<part file="sub.grp">
<message desc="test2" name="IDS_TEST2">
test2
</message>
<part file="subsub.grp">
<message desc="test4" name="IDS_TEST4">
test4
</message>
</part>
<message desc="test3" name="IDS_TEST3">
test3
</message>
</part>
<part file="%s">
<message desc="test5" name="IDS_TEST5">
test5
</message>
</part>
</messages>
</release>
</grit>''' % arbitrary_path_grd_file
with util.TempDir({'sub.grp': sub_grd,
'subsub.grp': subsub_grd}) as tmp_sub_dir:
output = grd_reader.Parse(StringIO(top_grd),
tmp_sub_dir.GetPath())
correct_sources = {
'IDS_TEST': None,
'IDS_TEST2': tmp_sub_dir.GetPath('sub.grp'),
'IDS_TEST3': tmp_sub_dir.GetPath('sub.grp'),
'IDS_TEST4': tmp_sub_dir.GetPath('subsub.grp'),
'IDS_TEST5': arbitrary_path_grd_file,
}
for node in output.ActiveDescendants():
with node:
if isinstance(node, message.MessageNode):
self.assertEqual(correct_sources[node.attrs.get('name')], node.source)
self.assertEqual(expected_output.split(), output.FormatXml().split())
tmp_dir.CleanUp()
def testPartInclusionFailure(self):
template = u'''
<grit latest_public_release="2" current_release="3">
<outputs>
%s
</outputs>
</grit>'''
part_failures = [
(exception.UnexpectedContent, u'<part file="x">fnord</part>'),
(exception.UnexpectedChild,
u'<part file="x"><output filename="x" type="y" /></part>'),
(exception.FileNotFound, u'<part file="yet_created_x" />'),
]
for raises, data in part_failures:
data = StringIO(template % data)
self.assertRaises(raises, grd_reader.Parse, data, '.')
gritpart_failures = [
(exception.UnexpectedAttribute, u'<grit-part file="xyz"></grit-part>'),
(exception.MissingElement, u'<output filename="x" type="y" />'),
]
for raises, data in gritpart_failures:
top_grd = StringIO(template % u'<part file="bad.grp" />')
with util.TempDir({'bad.grp': data}) as temp_dir:
self.assertRaises(raises, grd_reader.Parse, top_grd, temp_dir.GetPath())
def testEarlyEnoughPlatformSpecification(self):
# This is a regression test for issue
# https://code.google.com/p/grit-i18n/issues/detail?id=23
grd_text = u'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="1" current_release="1">
<release seq="1">
<messages>
<if expr="not pp_ifdef('use_titlecase')">
<message name="IDS_XYZ">foo</message>
</if>
<!-- The assumption is that use_titlecase is never true for
this platform. When the platform isn't set to 'android'
early enough, we get a duplicate message name. -->
<if expr="os == '%s'">
<message name="IDS_XYZ">boo</message>
</if>
</messages>
</release>
</grit>''' % sys.platform
with util.TempDir({}) as temp_dir:
grd_reader.Parse(StringIO(grd_text), temp_dir.GetPath(),
target_platform='android')
if __name__ == '__main__':
unittest.main()
|
endlessm/chromium-browser
|
tools/grit/grit/grd_reader_unittest.py
|
Python
|
bsd-3-clause
| 12,542
|
[
"xTB"
] |
5171a15018d4c2103aac7fecb4350c59a231bb77fc82b0b05d0bf90dfa17764d
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#
# Project: wb97xdDsC-optim
# FileName: make_input
# Creation: Jul 14, 2015
#
"""Building Gamess input file for single point.
Building Gamess input file for single point used in the wb97x-dDsC optimization.
The input will be created reading an xyz file where the comment line contains
the charge and multiplicity.
"""
import utils as uts
import re
import logging as lg
import time
# Try determining the version from git:
try:
import subprocess
git_v = subprocess.check_output(['git', 'describe'],
stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
git_v = 'Not Yet Tagged!'
__author__ = 'Riccardo Petraglia'
__credits__ = ['Riccardo Petraglia']
__updated__ = "2015-07-15"
__license__ = 'GPLv2'
__version__ = git_v
__maintainer__ = 'Riccardo Petraglia'
__email__ = 'riccardo.petraglia@gmail.com'
__status__ = 'development'
class Input(object):
def __init__(self, filep):
uts.file_exists(filep)
# self.config = dict(basis_set='6-31G')
self.atoms = []
self.x = []
self.y = []
self.z = []
self.charge = ''
self.multiplicity = ''
self.title = 'Should be setted!'
# self._template()
# self._basis_set_conversion(self.config['basis_set'])
self._read_xyz(filep)
self._template()
# def _basis_set_conversion(self, name):
# pople_reg = re.compile(r'([36])\-([23]1\d?)G(\*?\*?)')
# matching_pople = pople_reg.match(name)
# if matching_pople:
# self.gamess['BASIS']['GBASIS'] = 'N' + str(matching_pople.group(2))
# self.gamess['BASIS']['NGAUSS'] = str(matching_pople.group(1))
# if matching_pople.group(3):
# lg_msg = 'Polarized pople basis function not defined'
# lg.critical(lg_msg)
# raise(NotImplementedError(lg_msg))
# else:
# lg_msg = 'Basis function not implemented in the script'
# lg.critical(lg_msg)
# raise(NotImplementedError(lg_msg))
#
def _read_xyz(self, filep):
with open(filep, 'r') as xyzf:
xyzf.readline()
self.charge, self.multiplicity = xyzf.readline().split()
for line in xyzf:
if len(line.split()) == 4:
line = line.strip()
atom, xt, yt, zt = line.split()
self.atoms.append(atom)
self.x.append(xt)
self.y.append(yt)
self.z.append(zt)
elif (len(line.split()) > 0):
lg_msg = '{} is not an xyz file'.format(filep)
lg.critical(lg_msg)
raise(TypeError(lg_msg))
else:
break
def mult(self):
return self.multiplicity
def _building_data(self):
self.gamess['DATA'] = [' ' + self.title, ' C1']
for i, atom in enumerate(self.atoms):
txt = ' {:3s} {:6.2f} {:12.6f} {:12.6f} {:12.6f}'.format(atom,
float(atnum(atom)), float(self.x[i]), float(self.y[i]),
float(self.z[i]))
self.gamess['DATA'].append(txt)
def write(self, filep):
self._building_data()
self._set_keyword_based_on_structures()
txt = []
for kw in self.gamess:
if kw == 'DATA':
txt.append(' $' + kw)
txt.append('\n'.join(self.gamess['DATA']))
txt.append(' $END')
continue
txt_line = (' $' + kw + ' ')
for k, v in self.gamess[kw].items():
txt_line += str(k) + '=' + str(v) + ' '
if len(txt_line) > 60:
txt.append(txt_line)
txt_line = ''
txt_line += '$END'
txt.append(txt_line)
m = re.search('S-022', str(filep))
if m is not None:
with open('/dev/shm/afabrizi/basisS22','r') as basis:
line=(" "+"\n".join(map(str,[line.strip() for line in basis])))
line2=' '.join(line.splitlines(True))
txt.append(line2)
else:
with open('/dev/shm/afabrizi/basis','r') as basis:
line=(" "+"\n".join(map(str,[line.strip() for line in basis])))
line2=' '.join(line.splitlines(True))
txt.append(line2)
with open(filep, 'w') as outfp:
outfp.write('\n'.join(txt))
time.sleep(.5)
def _set_keyword_based_on_structures(self):
self.gamess['CONTRL']['ICHARG'] = self.charge
self.gamess['CONTRL']['MULT'] = self.multiplicity
if int(self.multiplicity) < 2:
self.gamess['CONTRL']['SCFTYP'] = 'RHF'
else:
if self.atoms[0].upper() == 'AL' or self.atoms[0].upper() == 'C' or self.atoms[0].upper() == 'SI' or self.atoms[0].upper() == 'S' or self.atoms[0].upper() == 'MG':
if len(self.atoms) < 2:
self.gamess['CONTRL']['SCFTYP'] = 'ROHF'
else:
self.gamess['CONTRL']['SCFTYP'] = 'UHF'
# elif len(self.atoms) >= 2 :
# self.gamess['CONTRL']['SCFTYP'] = 'ROHF'
else:
self.gamess['CONTRL']['SCFTYP'] = 'UHF'
def _template(self):
#strAt_=','.join(self.atoms)
self.gamess = {#'BASIS': {"BASNAM(1)": strAt_},
'BASIS' : dict(GBASIS='N31',
NGAUSS='6',
NDFUNC='1',
DIFFSP='.TRUE.',
POLAR='POPN31'),
'CONTRL': dict(EXETYP='RUN',
SCFTYP='UHF',
RUNTYP='ENERGY',
DFTTYP='wB97X',
MAXIT='200',
ISPHER='1',
ICHARG='',
MULT=''),
'DATA': [],
'DFT': dict(DDSC='.t.',
SWOFF='1.0E-3',
#NRAD='99',
#NLEB='590'),
SG1='.TRUE.'),
'SYSTEM': dict(MWORDS='8'),
'SCF': dict(DIRSCF='.t.')}
def atnum(atom_label):
at_num = dict(O=8,
H=1,
C=6,
S=16,
N=7,
He=2,
Mg=12,
Al=13,
Cl=17,
F=9,
B=5,
Be=4,
Si=14,
Li=3,
Na=11,
P=15)
if not (atom_label in at_num):
lg_msg = 'Number of electons not defined for {}.'.format(atom_label)
lg.critical(lg_msg)
raise(NotImplementedError(lg_msg))
return at_num[atom_label]
if __name__ == '__main__':
def test_read_xyz():
print('**** Testing read_xyz ****')
xyzc = """3
0 1
O 0. 0. 0.
H 0. 0. 1.
H 0. .7 .4
"""
with open('test.xyz', 'w') as testxyz:
testxyz.write(xyzc)
newinput = Input('test.xyz')
print(newinput.atoms)
if newinput.atoms == ['O', 'H', 'H']:
print(' ** Test Passed ** ')
return newinput
def test_building_data():
newinput = test_read_xyz()
newinput.write('asd.inp')
tests = [test_read_xyz, test_building_data]
for test in tests:
test()
|
grhawk/wb97xdDsC-optim
|
src/master/make_input.py
|
Python
|
gpl-2.0
| 7,794
|
[
"GAMESS"
] |
29fd6e2de22fb13c50058b9a38ad29ed6c5520cad126f75ee8ff99e88a3dbb5b
|
# Original Author: Travis Oliphant 2002
# Bug-fixes in 2006 by Tim Leslie
from __future__ import division, print_function, absolute_import
import numpy
from numpy import asarray, tan, exp, ones, squeeze, sign, \
all, log, sqrt, pi, shape, array, minimum, where, random
from .optimize import Result, _check_unknown_options
from scipy.lib.six.moves import xrange
__all__ = ['anneal']
_double_min = numpy.finfo(float).min
_double_max = numpy.finfo(float).max
class base_schedule(object):
def __init__(self):
self.dwell = 20
self.learn_rate = 0.5
self.lower = -10
self.upper = 10
self.Ninit = 50
self.accepted = 0
self.tests = 0
self.feval = 0
self.k = 0
self.T = None
def init(self, **options):
self.__dict__.update(options)
self.lower = asarray(self.lower)
self.lower = where(self.lower == numpy.NINF, -_double_max, self.lower)
self.upper = asarray(self.upper)
self.upper = where(self.upper == numpy.PINF, _double_max, self.upper)
self.k = 0
self.accepted = 0
self.feval = 0
self.tests = 0
def getstart_temp(self, best_state):
""" Find a matching starting temperature and starting parameters vector
i.e. find x0 such that func(x0) = T0.
Parameters
----------
best_state : _state
A _state object to store the function value and x0 found.
Returns
-------
x0 : array
The starting parameters vector.
"""
assert(not self.dims is None)
lrange = self.lower
urange = self.upper
fmax = _double_min
fmin = _double_max
for _ in range(self.Ninit):
x0 = random.uniform(size=self.dims)*(urange-lrange) + lrange
fval = self.func(x0, *self.args)
self.feval += 1
if fval > fmax:
fmax = fval
if fval < fmin:
fmin = fval
best_state.cost = fval
best_state.x = array(x0)
self.T0 = (fmax-fmin)*1.5
return best_state.x
def accept_test(self, dE):
T = self.T
self.tests += 1
if dE < 0:
self.accepted += 1
return 1
p = exp(-dE*1.0/self.boltzmann/T)
if (p > random.uniform(0.0, 1.0)):
self.accepted += 1
return 1
return 0
def update_guess(self, x0):
pass
def update_temp(self, x0):
pass
# A schedule due to Lester Ingber
class fast_sa(base_schedule):
def init(self, **options):
self.__dict__.update(options)
if self.m is None:
self.m = 1.0
if self.n is None:
self.n = 1.0
self.c = self.m * exp(-self.n * self.quench)
def update_guess(self, x0):
x0 = asarray(x0)
u = squeeze(random.uniform(0.0, 1.0, size=self.dims))
T = self.T
y = sign(u-0.5)*T*((1+1.0/T)**abs(2*u-1)-1.0)
xc = y*(self.upper - self.lower)
xnew = x0 + xc
return xnew
def update_temp(self):
self.T = self.T0*exp(-self.c * self.k**(self.quench))
self.k += 1
return
class cauchy_sa(base_schedule):
def update_guess(self, x0):
x0 = asarray(x0)
numbers = squeeze(random.uniform(-pi/2, pi/2, size=self.dims))
xc = self.learn_rate * self.T * tan(numbers)
xnew = x0 + xc
return xnew
def update_temp(self):
self.T = self.T0/(1+self.k)
self.k += 1
return
class boltzmann_sa(base_schedule):
def update_guess(self, x0):
std = minimum(sqrt(self.T) * ones(self.dims),
(self.upper - self.lower) / 3.0 / self.learn_rate)
x0 = asarray(x0)
xc = squeeze(random.normal(0, 1.0, size=self.dims))
xnew = x0 + xc*std*self.learn_rate
return xnew
def update_temp(self):
self.k += 1
self.T = self.T0 / log(self.k+1.0)
return
class _state(object):
def __init__(self):
self.x = None
self.cost = None
# TODO:
# allow for general annealing temperature profile
# in that case use update given by alpha and omega and
# variation of all previous updates and temperature?
# Simulated annealing
def anneal(func, x0, args=(), schedule='fast', full_output=0,
T0=None, Tf=1e-12, maxeval=None, maxaccept=None, maxiter=400,
boltzmann=1.0, learn_rate=0.5, feps=1e-6, quench=1.0, m=1.0, n=1.0,
lower=-100, upper=100, dwell=50, disp=True):
"""
Minimize a function using simulated annealing.
Uses simulated annealing, a random algorithm that uses no derivative
information from the function being optimized. Other names for this
family of approaches include: "Monte Carlo", "Metropolis",
"Metropolis-Hastings", `etc`. They all involve (a) evaluating the
objective function on a random set of points, (b) keeping those that
pass their randomized evaluation critera, (c) cooling (`i.e.`,
tightening) the evaluation critera, and (d) repeating until their
termination critera are met. In practice they have been used mainly in
discrete rather than in continuous optimization.
Available annealing schedules are 'fast', 'cauchy' and 'boltzmann'.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
`f(x, *args)`, where `x` is the argument in the form of a 1-D array
and `args` is a tuple of any additional fixed parameters needed to
completely specify the function.
x0: 1-D array
An initial guess at the optimizing argument of `func`.
args : tuple, optional
Any additional fixed parameters needed to completely
specify the objective function.
schedule : str, optional
The annealing schedule to use. Must be one of 'fast', 'cauchy' or
'boltzmann'. See `Notes`.
full_output : bool, optional
If `full_output`, then return all values listed in the Returns
section. Otherwise, return just the `xmin` and `status` values.
T0 : float, optional
The initial "temperature". If None, then estimate it as 1.2 times
the largest cost-function deviation over random points in the
box-shaped region specified by the `lower, upper` input parameters.
Tf : float, optional
Final goal temperature. Cease iterations if the temperature
falls below `Tf`.
maxeval : int, optional
Cease iterations if the number of function evaluations exceeds
`maxeval`.
maxaccept : int, optional
Cease iterations if the number of points accepted exceeds `maxaccept`.
See `Notes` for the probabilistic acceptance criteria used.
maxiter : int, optional
Cease iterations if the number of cooling iterations exceeds `maxiter`.
learn_rate : float, optional
Scale constant for tuning the probabilistc acceptance criteria.
boltzmann : float, optional
Boltzmann constant in the probabilistic acceptance criteria
(increase for less stringent criteria at each temperature).
feps : float, optional
Cease iterations if the relative errors in the function value over the
last four coolings is below `feps`.
quench, m, n : floats, optional
Parameters to alter the `fast` simulated annealing schedule.
See `Notes`.
lower, upper : floats or 1-D arrays, optional
Lower and upper bounds on the argument `x`. If floats are provided,
they apply to all components of `x`.
dwell : int, optional
The number of times to execute the inner loop at each value of the
temperature. See `Notes`.
disp : bool, optional
Print a descriptive convergence message if True.
Returns
-------
xmin : ndarray
The point where the lowest function value was found.
Jmin : float
The objective function value at `xmin`.
T : float
The temperature at termination of the iterations.
feval : int
Number of function evaluations used.
iters : int
Number of cooling iterations used.
accept : int
Number of tests accepted.
status : int
A code indicating the reason for termination:
- 0 : Points no longer changing.
- 1 : Cooled to final temperature.
- 2 : Maximum function evaluations reached.
- 3 : Maximum cooling iterations reached.
- 4 : Maximum accepted query locations reached.
- 5 : Final point not the minimum amongst encountered points.
See Also
--------
basinhopping : another (more performant) global optimizer
brute : brute-force global optimizer
Notes
-----
Simulated annealing is a random algorithm which uses no derivative
information from the function being optimized. In practice it has
been more useful in discrete optimization than continuous
optimization, as there are usually better algorithms for continuous
optimization problems.
Some experimentation by trying the different temperature
schedules and altering their parameters is likely required to
obtain good performance.
The randomness in the algorithm comes from random sampling in numpy.
To obtain the same results you can call `numpy.random.seed` with the
same seed immediately before calling `anneal`.
We give a brief description of how the three temperature schedules
generate new points and vary their temperature. Temperatures are
only updated with iterations in the outer loop. The inner loop is
over loop over ``xrange(dwell)``, and new points are generated for
every iteration in the inner loop. Whether the proposed new points
are accepted is probabilistic.
For readability, let ``d`` denote the dimension of the inputs to func.
Also, let ``x_old`` denote the previous state, and ``k`` denote the
iteration number of the outer loop. All other variables not
defined below are input variables to `anneal` itself.
In the 'fast' schedule the updates are::
u ~ Uniform(0, 1, size = d)
y = sgn(u - 0.5) * T * ((1 + 1/T)**abs(2*u - 1) - 1.0)
xc = y * (upper - lower)
x_new = x_old + xc
c = n * exp(-n * quench)
T_new = T0 * exp(-c * k**quench)
In the 'cauchy' schedule the updates are::
u ~ Uniform(-pi/2, pi/2, size=d)
xc = learn_rate * T * tan(u)
x_new = x_old + xc
T_new = T0 / (1 + k)
In the 'boltzmann' schedule the updates are::
std = minimum(sqrt(T) * ones(d), (upper - lower) / (3*learn_rate))
y ~ Normal(0, std, size = d)
x_new = x_old + learn_rate * y
T_new = T0 / log(1 + k)
References
----------
[1] P. J. M. van Laarhoven and E. H. L. Aarts, "Simulated Annealing: Theory
and Applications", Kluwer Academic Publishers, 1987.
[2] W.H. Press et al., "Numerical Recipies: The Art of Scientific Computing",
Cambridge U. Press, 1987.
Examples
--------
*Example 1.* We illustrate the use of `anneal` to seek the global minimum
of a function of two variables that is equal to the sum of a positive-
definite quadratic and two deep "Gaussian-shaped" craters. Specifically,
define the objective function `f` as the sum of three other functions,
``f = f1 + f2 + f3``. We suppose each of these has a signature
``(z, *params)``, where ``z = (x, y)``, ``params``, and the functions are
as defined below.
>>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
>>> def f1(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
>>> def f2(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
>>> def f3(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
>>> def f(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return f1(z, *params) + f2(z, *params) + f3(z, *params)
>>> x0 = np.array([2., 2.]) # Initial guess.
>>> from scipy import optimize
>>> np.random.seed(555) # Seeded to allow replication.
>>> res = optimize.anneal(f, x0, args=params, schedule='boltzmann',
full_output=True, maxiter=500, lower=-10,
upper=10, dwell=250, disp=True)
Warning: Maximum number of iterations exceeded.
>>> res[0] # obtained minimum
array([-1.03914194, 1.81330654])
>>> res[1] # function value at minimum
-3.3817...
So this run settled on the point [-1.039, 1.813] with a minimum function
value of about -3.382. The final temperature was about 212. The run used
125301 function evaluations, 501 iterations (including the initial guess as
a iteration), and accepted 61162 points. The status flag of 3 also
indicates that `maxiter` was reached.
This problem's true global minimum lies near the point [-1.057, 1.808]
and has a value of about -3.409. So these `anneal` results are pretty
good and could be used as the starting guess in a local optimizer to
seek a more exact local minimum.
*Example 2.* To minimize the same objective function using
the `minimize` approach, we need to (a) convert the options to an
"options dictionary" using the keys prescribed for this method,
(b) call the `minimize` function with the name of the method (which
in this case is 'Anneal'), and (c) take account of the fact that
the returned value will be a `Result` object (`i.e.`, a dictionary,
as defined in `optimize.py`).
All of the allowable options for 'Anneal' when using the `minimize`
approach are listed in the ``myopts`` dictionary given below, although
in practice only the non-default values would be needed. Some of their
names differ from those used in the `anneal` approach. We can proceed
as follows:
>>> myopts = {
'schedule' : 'boltzmann', # Non-default value.
'maxfev' : None, # Default, formerly `maxeval`.
'maxiter' : 500, # Non-default value.
'maxaccept' : None, # Default value.
'ftol' : 1e-6, # Default, formerly `feps`.
'T0' : None, # Default value.
'Tf' : 1e-12, # Default value.
'boltzmann' : 1.0, # Default value.
'learn_rate' : 0.5, # Default value.
'quench' : 1.0, # Default value.
'm' : 1.0, # Default value.
'n' : 1.0, # Default value.
'lower' : -10, # Non-default value.
'upper' : +10, # Non-default value.
'dwell' : 250, # Non-default value.
'disp' : True # Default value.
}
>>> from scipy import optimize
>>> np.random.seed(777) # Seeded to allow replication.
>>> res2 = optimize.minimize(f, x0, args=params, method='Anneal',
options=myopts)
Warning: Maximum number of iterations exceeded.
>>> res2
status: 3
success: False
accept: 61742
nfev: 125301
T: 214.20624873839623
fun: -3.4084065576676053
x: array([-1.05757366, 1.8071427 ])
message: 'Maximum cooling iterations reached'
nit: 501
"""
opts = {'schedule': schedule,
'T0': T0,
'Tf': Tf,
'maxfev': maxeval,
'maxaccept': maxaccept,
'maxiter': maxiter,
'boltzmann': boltzmann,
'learn_rate': learn_rate,
'ftol': feps,
'quench': quench,
'm': m,
'n': n,
'lower': lower,
'upper': upper,
'dwell': dwell,
'disp': disp}
res = _minimize_anneal(func, x0, args, **opts)
if full_output:
return res['x'], res['fun'], res['T'], res['nfev'], res['nit'], \
res['accept'], res['status']
else:
return res['x'], res['status']
def _minimize_anneal(func, x0, args=(),
schedule='fast', T0=None, Tf=1e-12, maxfev=None,
maxaccept=None, maxiter=400, boltzmann=1.0,
learn_rate=0.5, ftol=1e-6, quench=1.0, m=1.0, n=1.0,
lower=-100, upper=100, dwell=50, disp=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
simulated annealing algorithm.
Options for the simulated annealing algorithm are:
disp : bool
Set to True to print convergence messages.
schedule : str
Annealing schedule to use. One of: 'fast', 'cauchy' or
'boltzmann'.
T0 : float
Initial Temperature (estimated as 1.2 times the largest
cost-function deviation over random points in the range).
Tf : float
Final goal temperature.
maxfev : int
Maximum number of function evaluations to make.
maxaccept : int
Maximum changes to accept.
maxiter : int
Maximum number of iterations to perform.
boltzmann : float
Boltzmann constant in acceptance test (increase for less
stringent test at each temperature).
learn_rate : float
Scale constant for adjusting guesses.
ftol : float
Relative error in ``fun(x)`` acceptable for convergence.
quench, m, n : float
Parameters to alter fast_sa schedule.
lower, upper : float or ndarray
Lower and upper bounds on `x`.
dwell : int
The number of times to search the space at each temperature.
This function is called by the `minimize` function with
`method=anneal`. It is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
maxeval = maxfev
feps = ftol
x0 = asarray(x0)
lower = asarray(lower)
upper = asarray(upper)
schedule = eval(schedule+'_sa()')
# initialize the schedule
schedule.init(dims=shape(x0), func=func, args=args, boltzmann=boltzmann,
T0=T0, learn_rate=learn_rate, lower=lower, upper=upper,
m=m, n=n, quench=quench, dwell=dwell)
current_state, last_state, best_state = _state(), _state(), _state()
if T0 is None:
x0 = schedule.getstart_temp(best_state)
else:
best_state.x = None
best_state.cost = numpy.Inf
last_state.x = asarray(x0).copy()
fval = func(x0, *args)
schedule.feval += 1
last_state.cost = fval
if last_state.cost < best_state.cost:
best_state.cost = fval
best_state.x = asarray(x0).copy()
schedule.T = schedule.T0
fqueue = [100, 300, 500, 700]
iters = 0
while 1:
for n in xrange(dwell):
current_state.x = schedule.update_guess(last_state.x)
current_state.cost = func(current_state.x, *args)
schedule.feval += 1
dE = current_state.cost - last_state.cost
if schedule.accept_test(dE):
last_state.x = current_state.x.copy()
last_state.cost = current_state.cost
if last_state.cost < best_state.cost:
best_state.x = last_state.x.copy()
best_state.cost = last_state.cost
schedule.update_temp()
iters += 1
# Stopping conditions
# 0) last saved values of f from each cooling step
# are all very similar (effectively cooled)
# 1) Tf is set and we are below it
# 2) maxeval is set and we are past it
# 3) maxiter is set and we are past it
# 4) maxaccept is set and we are past it
fqueue.append(squeeze(last_state.cost))
fqueue.pop(0)
af = asarray(fqueue)*1.0
if all(abs((af-af[0])/af[0]) < feps):
retval = 0
if abs(af[-1]-best_state.cost) > feps*10:
retval = 5
if disp:
print("Warning: Cooled to %f at %s but this is not"
% (squeeze(last_state.cost),
str(squeeze(last_state.x)))
+ " the smallest point found.")
break
if (Tf is not None) and (schedule.T < Tf):
retval = 1
break
if (maxeval is not None) and (schedule.feval > maxeval):
retval = 2
break
if (iters > maxiter):
if disp:
print("Warning: Maximum number of iterations exceeded.")
retval = 3
break
if (maxaccept is not None) and (schedule.accepted > maxaccept):
retval = 4
break
result = Result(x=best_state.x, fun=best_state.cost,
T=schedule.T, nfev=schedule.feval, nit=iters,
accept=schedule.accepted, status=retval,
success=(retval <= 1),
message={0: 'Points no longer changing',
1: 'Cooled to final temperature',
2: 'Maximum function evaluations',
3: 'Maximum cooling iterations reached',
4: 'Maximum accepted query locations reached',
5: 'Final point not the minimum amongst '
'encountered points'}[retval])
return result
if __name__ == "__main__":
from numpy import cos
# minimum expected at ~-0.195
func = lambda x: cos(14.5 * x - 0.3) + (x + 0.2) * x
print(anneal(func, 1.0, full_output=1, upper=3.0, lower=-3.0,
feps=1e-4, maxiter=2000, schedule='cauchy'))
print(anneal(func, 1.0, full_output=1, upper=3.0, lower=-3.0,
feps=1e-4, maxiter=2000, schedule='fast'))
print(anneal(func, 1.0, full_output=1, upper=3.0, lower=-3.0,
feps=1e-4, maxiter=2000, schedule='boltzmann'))
# minimum expected at ~[-0.195, -0.1]
func = lambda x: (cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] +
(x[0] + 0.2) * x[0])
print(anneal(func, [1.0, 1.0], full_output=1,
upper=[3.0, 3.0], lower=[-3.0, -3.0],
feps=1e-4, maxiter=2000, schedule='cauchy'))
print(anneal(func, [1.0, 1.0], full_output=1,
upper=[3.0, 3.0], lower=[-3.0, -3.0],
feps=1e-4, maxiter=2000, schedule='fast'))
print(anneal(func, [1.0, 1.0], full_output=1,
upper=[3.0, 3.0], lower=[-3.0, -3.0],
feps=1e-4, maxiter=2000, schedule='boltzmann'))
|
kmspriyatham/symath
|
scipy/scipy/optimize/anneal.py
|
Python
|
apache-2.0
| 23,219
|
[
"Gaussian"
] |
9dc531565173e4b48c305ed88d8d9c116a69c07b293f472890581d5938b62eca
|
"""Amber Electric Coordinator."""
from __future__ import annotations
from datetime import timedelta
from typing import Any
from amberelectric import ApiException
from amberelectric.api import amber_api
from amberelectric.model.actual_interval import ActualInterval
from amberelectric.model.channel import ChannelType
from amberelectric.model.current_interval import CurrentInterval
from amberelectric.model.forecast_interval import ForecastInterval
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import LOGGER
def is_current(interval: ActualInterval | CurrentInterval | ForecastInterval) -> bool:
"""Return true if the supplied interval is a CurrentInterval."""
return isinstance(interval, CurrentInterval)
def is_forecast(interval: ActualInterval | CurrentInterval | ForecastInterval) -> bool:
"""Return true if the supplied interval is a ForecastInterval."""
return isinstance(interval, ForecastInterval)
def is_general(interval: ActualInterval | CurrentInterval | ForecastInterval) -> bool:
"""Return true if the supplied interval is on the general channel."""
return interval.channel_type == ChannelType.GENERAL
def is_controlled_load(
interval: ActualInterval | CurrentInterval | ForecastInterval,
) -> bool:
"""Return true if the supplied interval is on the controlled load channel."""
return interval.channel_type == ChannelType.CONTROLLED_LOAD
def is_feed_in(interval: ActualInterval | CurrentInterval | ForecastInterval) -> bool:
"""Return true if the supplied interval is on the feed in channel."""
return interval.channel_type == ChannelType.FEED_IN
class AmberUpdateCoordinator(DataUpdateCoordinator):
"""AmberUpdateCoordinator - In charge of downloading the data for a site, which all the sensors read."""
def __init__(
self, hass: HomeAssistant, api: amber_api.AmberApi, site_id: str
) -> None:
"""Initialise the data service."""
super().__init__(
hass,
LOGGER,
name="amberelectric",
update_interval=timedelta(minutes=1),
)
self._api = api
self.site_id = site_id
def update_price_data(self) -> dict[str, dict[str, Any]]:
"""Update callback."""
result: dict[str, dict[str, Any]] = {
"current": {},
"forecasts": {},
"grid": {},
}
try:
data = self._api.get_current_price(self.site_id, next=48)
except ApiException as api_exception:
raise UpdateFailed("Missing price data, skipping update") from api_exception
current = [interval for interval in data if is_current(interval)]
forecasts = [interval for interval in data if is_forecast(interval)]
general = [interval for interval in current if is_general(interval)]
if len(general) == 0:
raise UpdateFailed("No general channel configured")
result["current"]["general"] = general[0]
result["forecasts"]["general"] = [
interval for interval in forecasts if is_general(interval)
]
result["grid"]["renewables"] = round(general[0].renewables)
result["grid"]["price_spike"] = general[0].spike_status.value
controlled_load = [
interval for interval in current if is_controlled_load(interval)
]
if controlled_load:
result["current"]["controlled_load"] = controlled_load[0]
result["forecasts"]["controlled_load"] = [
interval for interval in forecasts if is_controlled_load(interval)
]
feed_in = [interval for interval in current if is_feed_in(interval)]
if feed_in:
result["current"]["feed_in"] = feed_in[0]
result["forecasts"]["feed_in"] = [
interval for interval in forecasts if is_feed_in(interval)
]
LOGGER.debug("Fetched new Amber data: %s", data)
return result
async def _async_update_data(self) -> dict[str, Any]:
"""Async update wrapper."""
return await self.hass.async_add_executor_job(self.update_price_data)
|
lukas-hetzenecker/home-assistant
|
homeassistant/components/amberelectric/coordinator.py
|
Python
|
apache-2.0
| 4,238
|
[
"Amber"
] |
445b56332a3de6147be8da74a1e3cc99b666a554de373653b25f8139aab4b13a
|
# Orca
#
# Copyright 2004-2009 Sun Microsystems Inc.
# Copyright 2010-2013 The Orca Team
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Labels for Orca's GUIs. These have been put in their own module so that we
can present them in the correct language when users change the language on the
fly without having to reload a bunch of modules."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2004-2009 Sun Microsystems Inc." \
"Copyright (c) 2010-2013 The Orca Team"
__license__ = "LGPL"
from .orca_i18n import _, C_
# Translators: This string appears on a button in a dialog. "Activating" the
# selected item will perform the action that one would expect to occur if the
# object were clicked on with the mouse. If the object is a link, activating
# it will bring you to a new page. If the object is a button, activating it
# will press the button. If the object is a combobox, activating it will expand
# it to show all of its contents. And so on.
ACTIVATE = _("_Activate")
# Translators: A single braille cell on a refreshable braille display consists
# of 8 dots. Dot 7 is the dot in the bottom left corner. If the user selects
# this option, Dot 7 will be used to 'underline' text of interest, e.g. when
# "marking"/indicating that a given word is bold.
BRAILLE_DOT_7 = _("Dot _7")
# Translators: A single braille cell on a refreshable braille display consists
# of 8 dots. Dot 8 is the dot in the bottom right corner. If the user selects
# this option, Dot 8 will be used to 'underline' text of interest, e.g. when
# "marking"/indicating that a given word is bold.
BRAILLE_DOT_8 = _("Dot _8")
# Translators: A single braille cell on a refreshable braille display consists
# of 8 dots. Dots 7-8 are the dots at the bottom. If the user selects this
# option, Dots 7-8 will be used to 'underline' text of interest, e.g. when
# "marking"/indicating that a given word is bold.
BRAILLE_DOT_7_8 = _("Dots 7 an_d 8")
# Translators: This is the label for a button in a dialog.
BTN_CANCEL = _("_Cancel")
# Translators: This is the label for a button in a dialog.
BTN_JUMP_TO = _("_Jump to")
# Translators: This is the label for a button in a dialog.
BTN_OK = _("_OK")
# Translators: Orca has had to implement its own caret navigation model to work
# around issues in Gecko/Firefox. In some versions of Firefox, we must perform
# a focus grab on each object being navigated in order for things to work as
# expected; in other versions of Firefox, we must avoid doing so in order for
# things# to work as expected. We cannot identify with certainty which situation
# the user is in, so we must provide this as an option within Orca.
CARET_NAVIGATION_GRAB_FOCUS = _("_Grab focus on objects when navigating")
# Translators: When the user arrows up and down in HTML content, and Orca is
# controlling the caret, the user might want Orca to always position the
# cursor at the beginning of the line (as opposed to the position directly
# above/below the current cursor position). Different users have different
# preferences. This string is the label for a checkbox which allows users
# to set the line-positioning behavior they want.
CARET_NAVIGATION_START_OF_LINE = \
_("_Position cursor at start of line when navigating vertically")
# Translators: If this checkbox is checked, then Orca will tell you when one of
# your buddies is typing a message.
CHAT_ANNOUNCE_BUDDY_TYPING = _("Announce when your _buddies are typing")
# Translators: If this checkbox is checked, then Orca will provide the user with
# chat room specific message histories rather than just a single history which
# contains the latest messages from all the chat rooms that they are in.
CHAT_SEPARATE_MESSAGE_HISTORIES = _("Provide chat room specific _message histories")
# Translators: This is the label of a panel holding options for how messages in
# this application's chat rooms should be spoken. The options are: Speak messages
# from all channels (i.e. even if the chat application doesn't have focus); speak
# messages from a channel only if it is the active channel; speak messages from
# any channel, but only if the chat application has focus.
CHAT_SPEAK_MESSAGES_FROM = _("Speak messages from")
# Translators: This is the label of a radio button. If it is selected, Orca will
# speak all new chat messages as they appear irrespective of whether or not the
# chat application currently has focus. This is the default behaviour.
CHAT_SPEAK_MESSAGES_ALL = _("All cha_nnels")
# Translators: This is the label of a radio button. If it is selected, Orca will
# speak all new chat messages as they appear if and only if the chat application
# has focus. The string substituion is for the application name (e.g Pidgin).
CHAT_SPEAK_MESSAGES_ALL_IF_FOCUSED = _("All channels when an_y %s window is active")
# Translators: This is the label of a radio button. If it is selected, Orca will
# only speak new chat messages for the currently active channel, irrespective of
# whether the chat application has focus.
CHAT_SPEAK_MESSAGES_ACTIVE = _("A channel only if its _window is active")
# Translators: If this checkbox is checked, then Orca will speak the name of the
# chat room prior to presenting an incoming message.
CHAT_SPEAK_ROOM_NAME = _("_Speak Chat Room name")
# Translators: Orca's keybindings support double and triple "clicks" or key
# presses, similar to using a mouse. This string appears in Orca's preferences
# dialog after a keybinding which requires a double click.
CLICK_COUNT_DOUBLE = _("double click")
# Translators: Orca's keybindings support double and triple "clicks" or key
# presses, similar to using a mouse. This string appears in Orca's preferences
# dialog after a keybinding which requires a triple click.
CLICK_COUNT_TRIPLE = _("triple click")
# Translators: This is a label which will appear in the list of available speech
# engines as a special item. It refers to the default engine configured within
# the speech subsystem. Apart from this item, the user will have a chance to
# select a particular speech engine by its real name (Festival, IBMTTS, etc.)
DEFAULT_SYNTHESIZER = _("Default Synthesizer")
# Translators: This is a label for a column header in Orca's pronunciation
# dictionary. The pronunciation dictionary allows the user to correct words
# which the speech synthesizer mispronounces (e.g. a person's name, a technical
# word) or doesn't pronounce as the user desires (e.g. an acronym) by providing
# an alternative string. The "Actual String" here refers to the word to be
# corrected as it would actually appear in text being read. Example: "LOL".
DICTIONARY_ACTUAL_STRING = _("Actual String")
# Translators: This is a label for a column header in Orca's pronunciation
# dictionary. The pronunciation dictionary allows the user to correct words
# which the speech synthesizer mispronounces (e.g. a person's name, a technical
# word) or doesn't pronounce as the user desires (e.g. an acronym) by providing
# an alternative string. The "Replacement String" here refers to how the user
# would like the "Actual String" to be pronounced by the speech synthesizer.
# Example: "L O L" or "Laughing Out Loud" (for Actual String "LOL").
DICTIONARY_REPLACEMENT_STRING = _("Replacement String")
# Translators: Orca has an "echo" feature to present text as it is being written
# by the user. While Orca's "key echo" options present the actual keyboard keys
# being pressed, "character echo" presents the character/string of length 1 that
# is inserted as a result of the keypress.
ECHO_CHARACTER = _("Enable echo by cha_racter")
# Translators: Orca has an "echo" feature to present text as it is being written
# by the user. This string refers to a "key echo" option. When this option is
# enabled, dead keys will be announced when pressed.
ECHO_DIACRITICAL = _("Enable non-spacing _diacritical keys")
# Translators: Orca has a "find" feature which allows the user to search the
# active application for on screen text and widgets. This label is associated
# with the setting to begin the search from the current location rather than
# from the top of the screen.
FIND_START_AT_CURRENT_LOCATION = _("C_urrent location")
# Translators: This is the label for a spinbutton. This option allows the user
# to specify the number of matched characters that must be present before Orca
# speaks the line that contains the results from an application's Find toolbar.
FIND_MINIMUM_MATCH_LENGTH = _("Minimum length of matched text:")
# Translators: This is the label of a panel containing options for what Orca
# presents when the user is in the Find toolbar of an application, e.g. Firefox.
FIND_OPTIONS = _("Find Options")
# Translators: This is the label for a checkbox. This option controls whether
# the line that contains the match from an application's Find toolbar should
# always be spoken, or only spoken if it is a different line than the line
# which contained the last match.
FIND_ONLY_SPEAK_CHANGED_LINES = _("Onl_y speak changed lines during find")
# Translators: This is the label for a checkbox. This option controls whether or
# not Orca will automatically speak the line that contains the match while the
# user is performing a search from the Find toolbar of an application, e.g.
# Firefox.
FIND_SPEAK_RESULTS = _("Speak results during _find")
# Translators: Function is a table column header where the cells in the column
# are a sentence that briefly describes what action Orca will take if and when
# the user invokes that keyboard command.
KB_HEADER_FUNCTION = _("Function")
# Translators: Key Binding is a table column header where the cells in the
# column represent keyboard combinations the user can press to invoke Orca
# commands.
KB_HEADER_KEY_BINDING = _("Key Binding")
# Translators: This string is a label for the group of Orca commands which
# can be used in any setting, task, or application. They are not specific
# to, for instance, web browsing.
KB_GROUP_DEFAULT = C_("keybindings", "Default")
# Translators: An external braille device has buttons on it that permit the
# user to create input gestures from the braille device. The braille bindings
# are what determine the actions Orca will take when the user presses these
# buttons.
KB_GROUP_BRAILLE = _("Braille Bindings")
# Translators: This string is a label for the group of Orca commands which
# do not currently have an associated key binding.
KB_GROUP_UNBOUND = _("Unbound")
# Translators: Modified is a table column header in Orca's preferences dialog.
# This column contains a checkbox which indicates whether a key binding
# for an Orca command has been changed by the user to something other than its
# default value.
KB_MODIFIED = C_("keybindings", "Modified")
# Translators: This label refers to the keyboard layout (desktop or laptop).
KEYBOARD_LAYOUT_DESKTOP = _("_Desktop")
# Translators: Orca's preferences can be configured on a per-application basis,
# allowing users to customize Orca's behavior, keybindings, etc. to work one
# way in LibreOffice and another way in a chat application. This string is the
# title of Orca's application-specific preferences dialog for an application.
# The string substituted in is the accessible name of the application (e.g.
# "Gedit", "Firefox", etc.
PREFERENCES_APPLICATION_TITLE = _("Screen Reader Preferences for %s")
# Translators: This is a table column header. This column consists of a single
# checkbox. If the checkbox is checked, Orca will indicate the associated item
# or attribute by "marking" it in braille. "Marking" is not the same as writing
# out the word; instead marking refers to adding some other indicator, e.g.
# "underlining" with braille dots 7-8 a word that is bold.
PRESENTATION_MARK_IN_BRAILLE = _("Mark in braille")
# Translators: "Present Unless" is a column header of the text attributes panel
# of the Orca preferences dialog. On this panel, the user can select a set of
# text attributes that they would like spoken and/or indicated in braille.
# Because the list of attributes could get quite lengthy, we provide the option
# to always speak/braille a text attribute *unless* its value is equal to the
# value given by the user in this column of the list. For example, given the
# text attribute "underline" and a present unless value of "none", the user is
# stating that he/she would like to have underlined text announced for all cases
# (single, double, low, etc.) except when the value of underline is none (i.e.
# when it's not underlined). "Present" here is being used as a verb.
PRESENTATION_PRESENT_UNLESS = _("Present Unless")
# Translators: This is a table column header. The "Speak" column consists of a
# single checkbox. If the checkbox is checked, Orca will speak the associated
# item or attribute (e.g. saying "Bold" as part of the information presented
# when the user gives the Orca command to obtain the format and font details of
# the current text).
PRESENTATION_SPEAK = _("Speak")
# Translators: This is the title of a message dialog informing the user that
# he/she attempted to save a new user profile under a name which already exists.
# A "user profile" is a collection of settings which apply to a given task, such
# as a "Spanish" profile which would use Spanish text-to-speech and Spanish
# braille and selected when reading Spanish content.
PROFILE_CONFLICT_TITLE = _("Save Profile As Conflict")
# Translators: This is the label of a message dialog informing the user that
# he/she attempted to save a new user profile under a name which already exists.
# A "user profile" is a collection of settings which apply to a given task, such
# as a "Spanish" profile which would use Spanish text-to-speech and Spanish
# braille and selected when reading Spanish content.
PROFILE_CONFLICT_LABEL = _("User Profile Conflict!")
# Translators: This is the message in a dialog informing the user that he/she
# attempted to save a new user profile under a name which already exists.
# A "user profile" is a collection of settings which apply to a given task, such
# as a "Spanish" profile which would use Spanish text-to-speech and Spanish
# braille and selected when reading Spanish content.
PROFILE_CONFLICT_MESSAGE = _("Profile %s already exists.\n" \
"Continue updating the existing profile with " \
"these new changes?")
# Translators: This text is displayed in a message dialog when a user indicates
# he/she wants to switch to a new user profile which will cause him/her to lose
# settings which have been altered but not yet saved. A "user profile" is a
# collection of settings which apply to a given task such as a "Spanish" profile
# which would use Spanish text-to-speech and Spanish braille and selected when
# reading Spanish content.
PROFILE_LOAD_LABEL = _("Load user profile")
# Translators: This text is displayed in a message dialog when a user indicates
# he/she wants to switch to a new user profile which will cause him/her to lose
# settings which have been altered but not yet saved. A "user profile" is a
# collection of settings which apply to a given task such as a "Spanish" profile
# which would use Spanish text-to-speech and Spanish braille and selected when
# reading Spanish content.
PROFILE_LOAD_MESSAGE = \
_("You are about to change the active profile. If you\n" \
"have just made changes in your preferences, they will\n" \
"be dropped at profile load.\n\n" \
"Continue loading profile discarding previous changes?")
# Translators: Profiles in Orca make it possible for users to quickly switch
# amongst a group of pre-defined settings (e.g. an 'English' profile for reading
# text written in English using an English-language speech synthesizer and
# braille rules, and a similar 'Spanish' profile for reading Spanish text. The
# following string is the title of a dialog in which users can save a newly-
# defined profile.
PROFILE_SAVE_AS_TITLE = _("Save Profile As")
# Translators: Profiles in Orca make it possible for users to quickly switch
# amongst a group of pre-defined settings (e.g. an 'English' profile for reading
# text written in English using an English-language speech synthesizer and
# braille rules, and a similar 'Spanish' profile for reading Spanish text. The
# following string is the label for a text entry in which the user enters the
# name of a new settings profile being saved via the 'Save Profile As' dialog.
PROFILE_NAME_LABEL = _("_Profile Name:")
# Translators: Orca has a setting which determines which progress bar updates
# should be announced. Choosing "All" means that Orca will present progress bar
# updates regardless of what application and window they happen to be in.
PROGRESS_BAR_ALL = C_("ProgressBar", "All")
# Translators: Orca has a setting which determines which progress bar updates
# should be announced. Choosing "Application" means that Orca will present
# progress bar updates as long as the progress bar is in the active application
# (but not necessarily in the current window).
PROGRESS_BAR_APPLICATION = C_("ProgressBar", "Application")
# Translators: Orca has a setting which determines which progress bar updates
# should be announced. Choosing "Window" means that Orca will present progress
# bar updates as long as the progress bar is in the active window.
PROGRESS_BAR_WINDOW = C_("ProgressBar", "Window")
# Translators: If this setting is chosen, no punctuation symbols will be spoken
# as a user reads a document.
PUNCTUATION_STYLE_NONE = C_("punctuation level", "_None")
# Translators: If this setting is chosen, common punctuation symbols (like
# comma, period, question mark) will not be spoken as a user reads a document,
# but less common symbols (such as #, @, $) will.
PUNCTUATION_STYLE_SOME = _("So_me")
# Translators: If this setting is chosen, the majority of punctuation symbols
# will be spoken as a user reads a document.
PUNCTUATION_STYLE_MOST = _("M_ost")
# Translators: If this setting is chosen and the user is reading over an entire
# document, Orca will pause at the end of each line.
SAY_ALL_STYLE_LINE = _("Line")
# Translators: If this setting is chosen and the user is reading over an entire
# document, Orca will pause at the end of each sentence.
SAY_ALL_STYLE_SENTENCE = _("Sentence")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of a blockquote.
SN_HEADER_BLOCKQUOTE = C_("structural navigation", "Blockquote")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of a button.
SN_HEADER_BUTTON = C_("structural navigation", "Button")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the caption of a table.
SN_HEADER_CAPTION = C_("structural navigation", "Caption")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the label of a check box.
SN_HEADER_CHECK_BOX = C_("structural navigation", "Check Box")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the selected item in a combo box.
SN_HEADER_COMBO_BOX = C_("structural navigation", "Combo Box")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the description of an element.
SN_HEADER_DESCRIPTION = C_("structural navigation", "Description")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of a heading.
SN_HEADER_HEADING = C_("structural navigation", "Heading")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the label of a form field.
SN_HEADER_LABEL = C_("structural navigation", "Label")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of a landmark. ARIA role landmarks are the W3C defined HTML
# tag attribute 'role' used to identify important part of webpage like banners,
# main context, search etc.
SN_HEADER_LANDMARK = C_("structural navigation", "Landmark")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of a column which
# contains the level of a heading. Level will be a "1" for <h1>, a "2" for <h2>,
# and so on.
SN_HEADER_LEVEL = C_("structural navigation", "Level")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of a link.
SN_HEADER_LINK = C_("structural navigation", "Link")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of a list.
SN_HEADER_LIST = C_("structural navigation", "List")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of a list item.
SN_HEADER_LIST_ITEM = C_("structural navigation", "List Item")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of an object.
SN_HEADER_OBJECT = C_("structural navigation", "Object")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of a paragraph.
SN_HEADER_PARAGRAPH = C_("structural navigation", "Paragraph")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the label of a radio button.
SN_HEADER_RADIO_BUTTON = C_("structural navigation", "Radio Button")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the role of a widget. Examples include "heading", "paragraph",
# "table", "combo box", etc.
SN_HEADER_ROLE = C_("structural navigation", "Role")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the selected item of a form field.
SN_HEADER_SELETED_ITEM = C_("structural navigation", "Selected Item")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the state of a widget. Examples include "checked"/"not checked",
# "selected"/"not selected", "visited/not visited", etc.
SN_HEADER_STATE = C_("structural navigation", "State")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of an entry.
SN_HEADER_TEXT = C_("structural navigation", "Text")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the URI of a link.
SN_HEADER_URI = C_("structural navigation", "URI")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the value of a form field.
SN_HEADER_VALUE = C_("structural navigation", "Value")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_BLOCKQUOTE = C_("structural navigation", "Blockquotes")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_BUTTON = C_("structural navigation", "Buttons")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_CHECK_BOX = C_("structural navigation", "Check Boxes")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_COMBO_BOX = C_("structural navigation", "Combo Boxes")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_ENTRY = C_("structural navigation", "Entries")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_FORM_FIELD = C_("structural navigation", "Form Fields")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_HEADING = C_("structural navigation", "Headings")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
# Level will be a "1" for <h1>, a "2" for <h2>, and so on.
SN_TITLE_HEADING_AT_LEVEL = C_("structural navigation", "Headings at Level %d")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
# ARIA role landmarks are the W3C defined HTML tag attribute 'role' used to
# identify important part of webpage like banners, main context, search etc.
SN_TITLE_LANDMARK = C_("structural navigation", "Landmarks")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
# A 'large object' is a logical chunk of text, such as a paragraph, a list,
# a table, etc.
SN_TITLE_LARGE_OBJECT = C_("structural navigation", "Large Objects")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_LINK = C_("structural navigation", "Links")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_LIST = C_("structural navigation", "Lists")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_LIST_ITEM = C_("structural navigation", "List Items")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_PARAGRAPH = C_("structural navigation", "Paragraphs")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_RADIO_BUTTON = C_("structural navigation", "Radio Buttons")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_TABLE = C_("structural navigation", "Tables")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_UNVISITED_LINK = C_("structural navigation", "Unvisited Links")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_VISITED_LINK = C_("structural navigation", "Visited Links")
# Translators: This is the title of a panel holding options for how to navigate
# HTML content (e.g., Orca caret navigation, positioning of caret, structural
# navigation, etc.).
PAGE_NAVIGATION = _("Page Navigation")
# Translators: When the user loads a new web page, they can optionally have Orca
# automatically start reading the page from beginning to end. This is the label
# of a checkbox in which users can indicate their preference.
READ_PAGE_UPON_LOAD = \
_("Automatically start speaking a page when it is first _loaded")
# Translators: Different speech systems and speech engines work differently when
# it comes to handling pauses (e.g. sentence boundaries). This property allows
# the user to specify whether speech should be sent to the speech synthesis
# system immediately when a pause directive is enountered or if it should be
# queued up and sent to the speech synthesis system once the entire set of
# utterances has been calculated.
SPEECH_BREAK_INTO_CHUNKS = _("Break speech into ch_unks between pauses")
# Translators: This string will appear in the list of available voices for the
# current speech engine. "%s" will be replaced by the name of the current speech
# engine, such as "Festival default voice" or "IBMTTS default voice". It refers
# to the default voice configured for given speech engine within the speech
# subsystem. Apart from this item, the list will contain the names of all
# available "real" voices provided by the speech engine.
SPEECH_DEFAULT_VOICE = _("%s default voice")
# Translators: This refers to the voice used by Orca when presenting the content
# of the screen and other messages.
SPEECH_VOICE_TYPE_DEFAULT = C_("VoiceType", "Default")
# Translators: This refers to the voice used by Orca when presenting one or more
# characters which is part of a hyperlink.
SPEECH_VOICE_TYPE_HYPERLINK = C_("VoiceType", "Hyperlink")
# Translators: This refers to the voice used by Orca when presenting information
# which is not displayed on the screen as text, but is still being communicated
# by the system in some visual fashion. For instance, Orca says "misspelled" to
# indicate the presence of the red squiggly line found under a spelling error;
# Orca might say "3 of 6" when a user Tabs into a list of six items and the
# third item is selected. And so on.
SPEECH_VOICE_TYPE_SYSTEM = C_("VoiceType", "System")
# Translators: This refers to the voice used by Orca when presenting one or more
# characters which is written in uppercase.
SPEECH_VOICE_TYPE_UPPERCASE = C_("VoiceType", "Uppercase")
# Translators this label refers to the name of particular speech synthesis
# system. (http://devel.freebsoft.org/speechd)
SPEECH_DISPATCHER = _("Speech Dispatcher")
# Translators: This is a label for a group of options related to Orca's behavior
# when presenting an application's spell check dialog.
SPELL_CHECK = C_("OptionGroup", "Spell Check")
# Translators: This is a label for a checkbox associated with an Orca setting.
# When this option is enabled, Orca will spell out the current error in addition
# to speaking it. For example, if the misspelled word is "foo," enabling this
# setting would cause Orca to speak "f o o" after speaking "foo".
SPELL_CHECK_SPELL_ERROR = _("Spell _error")
# Translators: This is a label for a checkbox associated with an Orca setting.
# When this option is enabled, Orca will spell out the current suggestion in
# addition to speaking it. For example, if the misspelled word is "foo," and
# the first suggestion is "for" enabling this setting would cause Orca to speak
# "f o r" after speaking "for".
SPELL_CHECK_SPELL_SUGGESTION = _("Spell _suggestion")
# Translators: This is a label for a checkbox associated with an Orca setting.
# When this option is enabled, Orca will present the context (surrounding text,
# typically the sentence or line) in which the mistake occurred.
SPELL_CHECK_PRESENT_CONTEXT = _("Present _context of error")
# Translators: This is a label for an option to tell Orca whether or not it
# should speak the coordinates of the current spread sheet cell. Coordinates are
# the row and column position within the spread sheet (i.e. A1, B1, C2 ...)
SPREADSHEET_SPEAK_CELL_COORDINATES = _("Speak spread sheet cell coordinates")
# Translators: This is a label for an option for whether or not to speak the
# header of a table cell in document content.
TABLE_ANNOUNCE_CELL_HEADER = _("Announce cell _header")
# Translators: This is the title of a panel containing options for specifying
# how to navigate tables in document content.
TABLE_NAVIGATION = _("Table Navigation")
# Translators: This is a label for an option to tell Orca to skip over empty/
# blank cells when navigating tables in document content.
TABLE_SKIP_BLANK_CELLS = _("Skip _blank cells")
# Translators: When users are navigating a table, they sometimes want the entire
# row of a table read; other times they want just the current cell presented to
# them. This label is associated with the default presentation to be used.
TABLE_SPEAK_CELL = _("Speak _cell")
# Translators: This is a label for an option to tell Orca whether or not it
# should speak table cell coordinates in document content.
TABLE_SPEAK_CELL_COORDINATES = _("Speak _cell coordinates")
# Translators: This is a label for an option to tell Orca whether or not it
# should speak the span size of a table cell (e.g., how many rows and columns
# a particular table cell spans in a table).
TABLE_SPEAK_CELL_SPANS = _("Speak _multiple cell spans")
# Translators: This is a table column header. "Attribute" here refers to text
# attributes such as bold, underline, family-name, etc.
TEXT_ATTRIBUTE_NAME = _("Attribute Name")
# Translators: Gecko native caret navigation is where Firefox itself controls
# how the arrow keys move the caret around HTML content. It's often broken, so
# Orca needs to provide its own support. As such, Orca offers the user the
# ability to switch between the Firefox mode and the Orca mode. This is the
# label of a checkbox in which users can indicate their default preference.
USE_CARET_NAVIGATION = _("Control caret navigation")
# Translators: Orca provides keystrokes to navigate HTML content in a structural
# manner: go to previous/next header, list item, table, etc. This is the label
# of a checkbox in which users can indicate their default preference.
USE_STRUCTURAL_NAVIGATION = _("Enable _structural navigation")
# Translators: This refers to the amount of information Orca provides about a
# particular object that receives focus.
VERBOSITY_LEVEL_BRIEF = _("Brie_f")
|
h4ck3rm1k3/orca-sonar
|
src/orca/guilabels.py
|
Python
|
lgpl-2.1
| 38,952
|
[
"ORCA"
] |
9dd2a3836c92f0e111b9f019e666cc25879ae2d2c0bc2b97afddf02cb6f4449a
|
#!/usr/bin/env python
"""Detector that can be used to experiment with different correlation peak
interpolation methods.
Example usage:
python -m "thrifty.experimental.detect_xcorr_interpol" \
--method autocorr rx.card -o rx.data
"""
from __future__ import print_function
import argparse
from thrifty.detect import Detector, detector_cli
from thrifty.soa_estimator import SoaEstimator
from thrifty.experimental import xcorr_interpolators
class IterativeSoaEstimator(SoaEstimator):
def __init__(self, **args):
super(IterativeSoaEstimator, self).__init__(**args)
self._iterative = xcorr_interpolators.make_maximise(args['template'])
self._last_fft = None
self.interpolate = self.iterative_interpolate
def soa_estimate(self, fft):
self._last_fft = fft
return super(IterativeSoaEstimator, self).soa_estimate(fft)
def iterative_interpolate(self, corr_mag, peak_idx):
signal = self._last_fft.ifft
guess = xcorr_interpolators.gaussian(corr_mag, peak_idx)
return self._iterative(signal, peak_idx, guess)
class InterpolationDetector(Detector):
def __init__(self, settings, blocks, rxid=-1, method='gaussian'):
super(InterpolationDetector, self).__init__(settings, blocks, rxid)
if method == 'maximise':
self.soa_estimate = IterativeSoaEstimator(
template=settings.template,
thresh_coeffs=settings.corr_thresh,
block_len=settings.block_len,
history_len=settings.history_len)
else:
if method == 'none':
interpolator = xcorr_interpolators.none
elif method == 'parabolic':
interpolator = xcorr_interpolators.parabolic
elif method == 'cosine':
interpolator = xcorr_interpolators.cosine
elif method == 'gaussian':
interpolator = xcorr_interpolators.gaussian
elif method == 'autocorr':
interpolator = xcorr_interpolators.make_autocorr_fit(
settings.template)
else:
raise KeyError('Unknown interpolation method')
self.soa_estimate.interpolate = interpolator
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
method_names = xcorr_interpolators.INTERPOLATORS.keys()
parser.add_argument('--method',
type=str,
default='gaussian',
help="Correlation interpolation method. "
"Valid methods are: " + ' '.join(method_names))
detector_cli(InterpolationDetector, parser, ['method'])
|
swkrueger/Thrifty
|
thrifty/experimental/detect_xcorr_interpol.py
|
Python
|
gpl-3.0
| 2,795
|
[
"Gaussian"
] |
37eb1c8269c2725b8c77b425db6125a6a344a5ed9a68e9f15f70c5e6e0ac76cb
|
# FreeCAD TemplatePyMod module
# (c) 2010 Werner Mayer LGPL
"""
The module can be executed with:
FreeCAD -P <path_to_file> Automation.py
FreeCADCmd -P <path_to_file> Automation.py
"""
import FreeCAD, Part
def makeSnapshotWithGui():
from PySide import QtGui
import FreeCADGui
def getMainWindow():
toplevel = QtGui.qApp.topLevelWidgets()
for i in toplevel:
if i.metaObject().className() == "Gui::MainWindow":
return i
raise RuntimeError("No main window found")
mw=getMainWindow()
mw.hide()
#mw.showMinimized()
# Create a test geometry and add it to the document
obj=Part.makeCone(10,8,10)
doc = FreeCAD.newDocument()
Part.show(obj)
# switch off animation so that the camera is moved to the final position immediately
view = FreeCADGui.getDocument(doc.Name).activeView()
view.setAnimationEnabled(False)
view.viewAxometric()
view.fitAll()
view.saveImage('crystal.png',800,600,'Current')
FreeCAD.closeDocument(doc.Name)
# close the application
QtGui.qApp.quit()
def makeSnapshotWithoutGui():
from pivy import coin
# create a test geometry and create an IV representation as string
box=Part.makeCone(10,8,10)
iv=box.writeInventor()
# load it into a buffer
inp=coin.SoInput()
inp.setBuffer(iv)
# and create a scenegraph
data = coin.SoDB.readAll(inp)
base = coin.SoBaseColor()
base.rgb.setValue(0.6,0.7,1.0)
data.insertChild(base,0)
# add light and camera so that the rendered geometry is visible
root = coin.SoSeparator()
light = coin.SoDirectionalLight()
cam = coin.SoOrthographicCamera()
root.addChild(cam)
root.addChild(light)
root.addChild(data)
# do the rendering now
axo = coin.SbRotation(-0.353553, -0.146447, -0.353553, -0.853553)
viewport=coin.SbViewportRegion(400,400)
cam.orientation.setValue(axo)
cam.viewAll(root,viewport)
off=coin.SoOffscreenRenderer(viewport)
root.ref()
off.render(root)
root.unref()
# export the image, PS is always available
off.writeToPostScript("crystal.ps")
# Other formats are only available if simage package is installed
if off.isWriteSupported("PNG"):
print "Save as PNG"
off.writeToFile("crystal.png","PNG")
if FreeCAD.GuiUp:
makeSnapshotWithGui()
else:
makeSnapshotWithoutGui()
|
marcoitur/FreeCAD
|
src/Mod/TemplatePyMod/Automation.py
|
Python
|
lgpl-2.1
| 2,290
|
[
"CRYSTAL"
] |
1b6b407df413362e943190957e3cec819fb685b73da1785f7d74d5b27fed7376
|
#### PATTERN | GRAPH ###############################################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
import os
from math import sqrt, pow
from math import sin, cos, atan2, degrees, radians, pi
from random import random
from heapq import heappush, heappop
from warnings import warn
from codecs import open
from shutil import rmtree
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
# float("inf") doesn't work on windows.
INFINITE = 1e20
#--- LIST FUNCTIONS --------------------------------------------------------------------------------
def unique(iterable):
""" Returns a list copy in which each item occurs only once (in-order).
"""
seen = set()
return [x for x in iterable if x not in seen and not seen.add(x)]
#--- DRAWING FUNCTIONS -----------------------------------------------------------------------------
# This module is standalone (i.e., it is not a graph rendering package).
# If you want to call Graph.draw() then line(), ellipse() and Text.draw() must be implemented.
def line(x1, y1, x2, y2, stroke=(0,0,0,1), strokewidth=1):
""" Draws a line from (x1, y1) to (x2, y2) using the given stroke color and stroke width.
"""
pass
def ellipse(x, y, width, height, fill=(0,0,0,1), stroke=None, strokewidth=1):
""" Draws an ellipse at (x, y) with given fill and stroke color and stroke width.
"""
pass
class Text(object):
def __init__(self, string, **kwargs):
""" Draws the node label.
Optional properties include width, fill, font, fontsize, fontweight.
"""
self.string = string
self.__dict__.update(kwargs)
def copy(self):
k = self.__dict__.copy()
k.pop("string")
return Text(self.string, **k)
def draw(self):
pass
class Vector(object):
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def coordinates(x, y, distance, angle):
return (
(x + distance * cos(radians(angle))),
(y + distance * sin(radians(angle)))
)
#--- DEEPCOPY --------------------------------------------------------------------------------------
def deepcopy(o):
""" Returns a deep (recursive) copy of the given object.
"""
if o is None:
return o
if hasattr(o, "copy"):
return o.copy()
if isinstance(o, (basestring, bool, int, float, long, complex)):
return o
if isinstance(o, (list, tuple, set)):
return o.__class__(deepcopy(v) for v in o)
if isinstance(o, dict):
return dict((deepcopy(k), deepcopy(v)) for k,v in o.iteritems())
raise Exception, "don't know how to copy %s" % o.__class__.__name__
#### NODE ##########################################################################################
#--- NODE ------------------------------------------------------------------------------------------
class Node(object):
def __init__(self, id="", radius=5, **kwargs):
""" A node with a unique id in the graph.
Node.id is drawn as a text label, unless optional parameter text=False.
Optional parameters include: fill, stroke, strokewidth, text, font, fontsize, fontweight.
"""
self.graph = None
self.links = Links()
self.id = id
self._x = 0.0 # Calculated by Graph.layout.update().
self._y = 0.0 # Calculated by Graph.layout.update().
self.force = Vector(0.0, 0.0)
self.radius = radius
self.fixed = kwargs.pop("fixed", False)
self.fill = kwargs.pop("fill", None)
self.stroke = kwargs.pop("stroke", (0,0,0,1))
self.strokewidth = kwargs.pop("strokewidth", 1)
self.text = kwargs.get("text", True) and \
Text(isinstance(id, unicode) and id or str(id).decode("utf-8", "ignore"),
width = 85,
fill = kwargs.pop("text", (0,0,0,1)),
fontsize = kwargs.pop("fontsize", 11), **kwargs) or None
self._weight = None # Calculated by Graph.eigenvector_centrality().
self._centrality = None # Calculated by Graph.betweenness_centrality().
@property
def _distance(self):
# Graph.distance controls the (x,y) spacing between nodes.
return self.graph and float(self.graph.distance) or 1.0
def _get_x(self):
return self._x * self._distance
def _get_y(self):
return self._y * self._distance
def _set_x(self, v):
self._x = v / self._distance
def _set_y(self, v):
self._y = v / self._distance
x = property(_get_x, _set_x)
y = property(_get_y, _set_y)
@property
def edges(self):
""" Yields a list of edges from/to the node.
"""
return self.graph is not None \
and [e for e in self.graph.edges if self.id in (e.node1.id, e.node2.id)] \
or []
@property
def edge(self, node, reverse=False):
""" Yields the Edge from this node to the given node, or None.
"""
if not isinstance(node, Node):
node = self.graph and self.graph.get(node) or node
if reverse:
return node.links.edge(self)
return self.links.edge(node)
@property
def weight(self):
""" Yields eigenvector centrality as a number between 0.0-1.0.
"""
if self.graph and self._weight is None:
self.graph.eigenvector_centrality()
return self._weight
@property
def centrality(self):
""" Yields betweenness centrality as a number between 0.0-1.0.
"""
if self.graph and self._centrality is None:
self.graph.betweenness_centrality()
return self._centrality
eigenvector = eigenvector_centrality = weight
betweenness = betweenness_centrality = centrality
@property
def degree(self):
""" Yields degree centrality as a number between 0.0-1.0.
"""
return self.graph and (1.0 * len(self.links) / len(self.graph)) or 0.0
def flatten(self, depth=1, traversable=lambda node, edge: True, _visited=None):
""" Recursively lists the node and nodes linked to it.
Depth 0 returns a list with the node.
Depth 1 returns a list with the node and all the directly linked nodes.
Depth 2 includes the linked nodes' links, and so on.
"""
_visited = _visited or {}
_visited[self.id] = (self, depth)
if depth >= 1:
for n in self.links:
if n.id not in _visited or _visited[n.id][1] < depth-1:
if traversable(self, self.links.edges[n.id]):
n.flatten(depth-1, traversable, _visited)
return [n for n,d in _visited.values()] # Fast, but not order-preserving.
def draw(self, weighted=False):
""" Draws the node as a circle with the given radius, fill, stroke and strokewidth.
Draws the node centrality as a shadow effect when weighted=True.
Draws the node text label.
Override this method in a subclass for custom drawing.
"""
# Draw the node weight as a shadow (based on node betweenness centrality).
if weighted is not False and self.centrality > (weighted==True and -1 or weighted):
w = self.centrality * 35
ellipse(
self.x,
self.y,
self.radius*2 + w,
self.radius*2 + w, fill=(0,0,0,0.2), stroke=None)
# Draw the node.
ellipse(
self.x,
self.y,
self.radius*2,
self.radius*2, fill=self.fill, stroke=self.stroke, strokewidth=self.strokewidth)
# Draw the node text label.
if self.text:
self.text.draw(
self.x + self.radius,
self.y + self.radius)
def contains(self, x, y):
""" Returns True if the given coordinates (x, y) are inside the node radius.
"""
return abs(self.x - x) < self.radius*2 and \
abs(self.y - y) < self.radius*2
def __repr__(self):
return "%s(id=%s)" % (self.__class__.__name__, repr(self.id))
def __eq__(self, node):
return isinstance(node, Node) and self.id == node.id
def __ne__(self, node):
return not self.__eq__(node)
#--- NODE LINKS ------------------------------------------------------------------------------------
class Links(list):
def __init__(self):
""" A list in which each node has an associated edge.
The Links.edge() method returns the edge for a given node id.
"""
self.edges = dict()
def append(self, node, edge=None):
if node.id not in self.edges:
list.append(self, node)
self.edges[node.id] = edge
def remove(self, node):
list.remove(self, node)
self.edges.pop(node.id, None)
def edge(self, node):
return self.edges.get(isinstance(node, Node) and node.id or node)
#### EDGE ##########################################################################################
class Edge(object):
def __init__(self, node1, node2, weight=0.0, length=1.0, type=None, stroke=(0,0,0,1), strokewidth=1):
""" A connection between two nodes.
Its weight indicates the importance (not the cost) of the connection.
Its type is useful in a semantic network (e.g. "is-a", "is-part-of", ...)
"""
self.node1 = node1
self.node2 = node2
self._weight = weight
self.length = length
self.type = type
self.stroke = stroke
self.strokewidth = strokewidth
def _get_weight(self):
return self._weight
def _set_weight(self, v):
self._weight = v
# Clear cached adjacency map in the graph, since edge weights have changed.
if self.node1.graph is not None:
self.node1.graph._adjacency = None
if self.node2.graph is not None:
self.node2.graph._adjacency = None
weight = property(_get_weight, _set_weight)
def draw(self, weighted=False, directed=False):
""" Draws the edge as a line with the given stroke and strokewidth (increased with Edge.weight).
Override this method in a subclass for custom drawing.
"""
w = weighted and self.weight or 0
line(
self.node1.x,
self.node1.y,
self.node2.x,
self.node2.y, stroke=self.stroke, strokewidth=self.strokewidth+w)
if directed:
self.draw_arrow(stroke=self.stroke, strokewidth=self.strokewidth+w)
def draw_arrow(self, **kwargs):
""" Draws the direction of the edge as an arrow on the rim of the receiving node.
"""
x0, y0 = self.node1.x, self.node1.y
x1, y1 = self.node2.x, self.node2.y
# Find the edge's angle based on node1 and node2 position.
a = degrees(atan2(y1-y0, x1-x0))
# The arrow points to node2's rim instead of it's center.
r = self.node2.radius
d = sqrt(pow(x1-x0, 2) + pow(y1-y0, 2))
x01, y01 = coordinates(x0, y0, d-r-1, a)
# Find the two other arrow corners under the given angle.
r = max(kwargs.get("strokewidth", 1) * 3, 6)
dx1, dy1 = coordinates(x01, y01, -r, a-20)
dx2, dy2 = coordinates(x01, y01, -r, a+20)
line(x01, y01, dx1, dy1, **kwargs)
line(x01, y01, dx2, dy2, **kwargs)
line(dx1, dy1, dx2, dy2, **kwargs)
def __repr__(self):
return "%s(id1=%s, id2=%s)" % (self.__class__.__name__, repr(self.node1.id), repr(self.node2.id))
#### GRAPH #########################################################################################
#--- GRAPH NODE DICTIONARY -------------------------------------------------------------------------
class nodedict(dict):
def __init__(self, graph, *args, **kwargs):
""" Graph.shortest_paths() and Graph.eigenvector_centrality() return a nodedict,
where dictionary values can be accessed by Node as well as by node id.
"""
dict.__init__(self, *args, **kwargs)
self.graph = graph
def __contains__(self, node):
return dict.__contains__(self, self.graph.get(node, node))
def __getitem__(self, node):
return dict.__getitem__(self, isinstance(node, Node) and node or self.graph[node])
def get(self, node, default=None):
return dict.get(self, self.graph.get(node, node), default)
#--- GRAPH -----------------------------------------------------------------------------------------
# Graph layouts:
SPRING = "spring"
# Graph node centrality:
EIGENVECTOR = "eigenvector"
BETWEENNESS = "betweenness"
DEGREE = "degree"
# Graph node sort order:
WEIGHT, CENTRALITY = "weight", "centrality"
ALL = "all"
class Graph(dict):
def __init__(self, layout=SPRING, distance=10.0):
""" A network of nodes connected by edges that can be drawn with a given layout.
"""
self.nodes = [] # List of Node objects.
self.edges = [] # List of Edge objects.
self.root = None
self._adjacency = None # Cached adjacency() dict.
self.layout = layout == SPRING and GraphSpringLayout(self) or GraphLayout(self)
self.distance = distance
def __getitem__(self, id):
try:
return dict.__getitem__(self, id)
except KeyError:
raise KeyError, "no node with id '%s' in graph" % id
def append(self, base, *args, **kwargs):
""" Appends a Node or Edge to the graph: Graph.append(Node, id="rabbit").
"""
kwargs["base"] = base
if issubclass(base, Node):
return self.add_node(*args, **kwargs)
if issubclass(base, Edge):
return self.add_edge(*args, **kwargs)
def add_node(self, id, *args, **kwargs):
""" Appends a new Node to the graph.
An optional base parameter can be used to pass a subclass of Node.
"""
n = kwargs.pop("base", Node)
n = isinstance(id, Node) and id or self.get(id) or n(id, *args, **kwargs)
if n.id not in self:
self.nodes.append(n)
self[n.id] = n; n.graph = self
self.root = kwargs.get("root", False) and n or self.root
# Clear adjacency cache.
self._adjacency = None
return n
def add_edge(self, id1, id2, *args, **kwargs):
""" Appends a new Edge to the graph.
An optional base parameter can be used to pass a subclass of Edge:
Graph.add_edge("cold", "winter", base=IsPropertyOf)
"""
# Create nodes that are not yet part of the graph.
n1 = self.add_node(id1)
n2 = self.add_node(id2)
# Creates an Edge instance.
# If an edge (in the same direction) already exists, yields that edge instead.
e1 = n1.links.edge(n2)
if e1 and e1.node1 == n1 and e1.node2 == n2:
return e1
e2 = kwargs.pop("base", Edge)
e2 = e2(n1, n2, *args, **kwargs)
self.edges.append(e2)
# Synchronizes Node.links:
# A.links.edge(B) yields edge A->B
# B.links.edge(A) yields edge B->A
n1.links.append(n2, edge=e2)
n2.links.append(n1, edge=e1 or e2)
# Clear adjacency cache.
self._adjacency = None
return e2
def remove(self, x):
""" Removes the given Node (and all its edges) or Edge from the graph.
Note: removing Edge a->b does not remove Edge b->a.
"""
if isinstance(x, Node) and x.id in self:
self.pop(x.id)
self.nodes.remove(x); x.graph = None
# Remove all edges involving the given node.
for e in list(self.edges):
if x in (e.node1, e.node2):
if x in e.node1.links: e.node1.links.remove(x)
if x in e.node2.links: e.node2.links.remove(x)
self.edges.remove(e)
if isinstance(x, Edge):
self.edges.remove(x)
# Clear adjacency cache.
self._adjacency = None
def node(self, id):
""" Returns the node in the graph with the given id.
"""
if isinstance(id, Node) and id.graph == self:
return id
return self.get(id, None)
def edge(self, id1, id2):
""" Returns the edge between the nodes with given id1 and id2.
"""
if isinstance(id1, Node) and id1.graph == self:
id1 = id1.id
if isinstance(id2, Node) and id2.graph == self:
id2 = id2.id
return id1 in self and id2 in self and self[id1].links.edge(id2) or None
def paths(self, node1, node2, length=4, path=[]):
""" Returns a list of paths (shorter than or equal to given length) connecting the two nodes.
"""
if not isinstance(node1, Node):
node1 = self[node1]
if not isinstance(node2, Node):
node2 = self[node2]
return [[self[id] for id in p] for p in paths(self, node1.id, node2.id, length, path)]
def shortest_path(self, node1, node2, heuristic=None, directed=False):
""" Returns a list of nodes connecting the two nodes.
"""
if not isinstance(node1, Node):
node1 = self[node1]
if not isinstance(node2, Node):
node2 = self[node2]
try:
p = dijkstra_shortest_path(self, node1.id, node2.id, heuristic, directed)
p = [self[id] for id in p]
return p
except IndexError:
return None
def shortest_paths(self, node, heuristic=None, directed=False):
""" Returns a dictionary of nodes, each linked to a list of nodes (shortest path).
"""
if not isinstance(node, Node):
node = self[node]
p = nodedict(self)
for id, path in dijkstra_shortest_paths(self, node.id, heuristic, directed).iteritems():
p[self[id]] = path and [self[id] for id in path] or None
return p
def eigenvector_centrality(self, normalized=True, reversed=True, rating={}, iterations=100, tolerance=0.0001):
""" Calculates eigenvector centrality and returns a node => weight dictionary.
Node.weight is updated in the process.
Node.weight is higher for nodes with a lot of (indirect) incoming traffic.
"""
ec = eigenvector_centrality(self, normalized, reversed, rating, iterations, tolerance)
ec = nodedict(self, ((self[id], w) for id, w in ec.iteritems()))
for n, w in ec.iteritems():
n._weight = w
return ec
def betweenness_centrality(self, normalized=True, directed=False):
""" Calculates betweenness centrality and returns a node => weight dictionary.
Node.centrality is updated in the process.
Node.centrality is higher for nodes with a lot of passing traffic.
"""
bc = brandes_betweenness_centrality(self, normalized, directed)
bc = nodedict(self, ((self[id], w) for id, w in bc.iteritems()))
for n, w in bc.iteritems():
n._centrality = w
return bc
def sorted(self, order=WEIGHT, threshold=0.0):
""" Returns a list of nodes sorted by WEIGHT or CENTRALITY.
Nodes with a lot of traffic will be at the start of the list.
"""
o = lambda node: getattr(node, order)
nodes = ((o(n), n) for n in self.nodes if o(n) >= threshold)
nodes = reversed(sorted(nodes))
return [n for w, n in nodes]
def prune(self, depth=0):
""" Removes all nodes with less or equal links than depth.
"""
for n in (n for n in self.nodes if len(n.links) <= depth):
self.remove(n)
def fringe(self, depth=0, traversable=lambda node, edge: True):
""" For depth=0, returns the list of leaf nodes (nodes with only one connection).
For depth=1, returns the list of leaf nodes and their connected nodes, and so on.
"""
u = []; [u.extend(n.flatten(depth, traversable)) for n in self.nodes if len(n.links) == 1]
return unique(u)
@property
def density(self):
""" Yields the number of edges vs. the maximum number of possible edges.
For example, <0.35 => sparse, >0.65 => dense, 1.0 => complete.
"""
return 2.0*len(self.edges) / (len(self.nodes) * (len(self.nodes)-1))
@property
def is_complete(self):
return self.density == 1.0
@property
def is_dense(self):
return self.density > 0.65
@property
def is_sparse(self):
return self.density < 0.35
def split(self):
""" Returns the list of unconnected subgraphs.
"""
return partition(self)
def update(self, iterations=10, **kwargs):
""" Graph.layout.update() is called the given number of iterations.
"""
for i in range(iterations):
self.layout.update(**kwargs)
def draw(self, weighted=False, directed=False):
""" Draws all nodes and edges.
"""
for e in self.edges:
e.draw(weighted, directed)
for n in reversed(self.nodes): # New nodes (with Node._weight=None) first.
n.draw(weighted)
def node_at(self, x, y):
""" Returns the node at (x,y) or None.
"""
for n in self.nodes:
if n.contains(x, y): return n
def _add_node_copy(self, n, **kwargs):
# Magical fairy dust to copy subclasses of Node.
# We assume that the subclass constructor takes an optional "text" parameter
# (Text objects in NodeBox for OpenGL's implementation are expensive).
try:
new = self.add_node(n.id, root=kwargs.get("root",False), text=False)
except TypeError:
new = self.add_node(n.id, root=kwargs.get("root",False))
new.__class__ = n.__class__
new.__dict__.update((k, deepcopy(v)) for k,v in n.__dict__.iteritems()
if k not in ("graph", "links", "_x", "_y", "force", "_weight", "_centrality"))
def _add_edge_copy(self, e, **kwargs):
if kwargs.get("node1", e.node1).id not in self \
or kwargs.get("node2", e.node2).id not in self:
return
new = self.add_edge(
kwargs.get("node1", self[e.node1.id]),
kwargs.get("node2", self[e.node2.id]))
new.__class__ = e.__class__
new.__dict__.update((k, deepcopy(v)) for k,v in e.__dict__.iteritems()
if k not in ("node1", "node2"))
def copy(self, nodes=ALL):
""" Returns a copy of the graph with the given list of nodes (and connecting edges).
The layout will be reset.
"""
g = Graph(layout=None, distance=self.distance)
g.layout = self.layout.copy(graph=g)
for n in (nodes==ALL and self.nodes or (isinstance(n, Node) and n or self[n] for n in nodes)):
g._add_node_copy(n, root=self.root==n)
for e in self.edges:
g._add_edge_copy(e)
return g
def export(self, *args, **kwargs):
export(self, *args, **kwargs)
def write(self, *args, **kwargs):
write(self, *args, **kwargs)
def serialize(self, *args, **kwargs):
return render(self, *args, **kwargs)
#--- GRAPH LAYOUT ----------------------------------------------------------------------------------
# Graph drawing or graph layout, as a branch of graph theory,
# applies topology and geometry to derive two-dimensional representations of graphs.
class GraphLayout(object):
def __init__(self, graph):
""" Calculates node positions iteratively when GraphLayout.update() is called.
"""
self.graph = graph
self.iterations = 0
def update(self):
self.iterations += 1
def reset(self):
self.iterations = 0
for n in self.graph.nodes:
n._x = 0.0
n._y = 0.0
n.force = Vector(0.0, 0.0)
@property
def bounds(self):
""" Returns a (x, y, width, height)-tuple of the approximate layout dimensions.
"""
x0, y0 = +INFINITE, +INFINITE
x1, y1 = -INFINITE, -INFINITE
for n in self.graph.nodes:
if (n.x < x0): x0 = n.x
if (n.y < y0): y0 = n.y
if (n.x > x1): x1 = n.x
if (n.y > y1): y1 = n.y
return (x0, y0, x1-x0, y1-y0)
def copy(self, graph):
return GraphLayout(self, graph)
#--- GRAPH LAYOUT: FORCE-BASED ---------------------------------------------------------------------
class GraphSpringLayout(GraphLayout):
def __init__(self, graph):
""" A force-based layout in which edges are regarded as springs.
The forces are applied to the nodes, pulling them closer or pushing them apart.
"""
# Based on: http://snipplr.com/view/1950/graph-javascript-framework-version-001/
GraphLayout.__init__(self, graph)
self.k = 4.0 # Force constant.
self.force = 0.01 # Force multiplier.
self.repulsion = 50 # Maximum repulsive force radius.
def _distance(self, node1, node2):
# Yields a tuple with distances (dx, dy, d, d**2).
# Ensures that the distance is never zero (which deadlocks the animation).
dx = node2._x - node1._x
dy = node2._y - node1._y
d2 = dx * dx + dy * dy
if d2 < 0.01:
dx = random() * 0.1 + 0.1
dy = random() * 0.1 + 0.1
d2 = dx * dx + dy * dy
return dx, dy, sqrt(d2), d2
def _repulse(self, node1, node2):
# Updates Node.force with the repulsive force.
dx, dy, d, d2 = self._distance(node1, node2)
if d < self.repulsion:
f = self.k ** 2 / d2
node2.force.x += f * dx
node2.force.y += f * dy
node1.force.x -= f * dx
node1.force.y -= f * dy
def _attract(self, node1, node2, weight=0, length=1.0):
# Updates Node.force with the attractive edge force.
dx, dy, d, d2 = self._distance(node1, node2)
d = min(d, self.repulsion)
f = (d2 - self.k ** 2) / self.k * length
f *= weight * 0.5 + 1
f /= d
node2.force.x -= f * dx
node2.force.y -= f * dy
node1.force.x += f * dx
node1.force.y += f * dy
def update(self, weight=10.0, limit=0.5):
""" Updates the position of nodes in the graph.
The weight parameter determines the impact of edge weight.
The limit parameter determines the maximum movement each update().
"""
GraphLayout.update(self)
# Forces on all nodes due to node-node repulsions.
for i, n1 in enumerate(self.graph.nodes):
for j, n2 in enumerate(self.graph.nodes[i+1:]):
self._repulse(n1, n2)
# Forces on nodes due to edge attractions.
for e in self.graph.edges:
self._attract(e.node1, e.node2, weight * e.weight, 1.0 / (e.length or 0.01))
# Move nodes by given force.
for n in self.graph.nodes:
if not n.fixed:
n._x += max(-limit, min(self.force * n.force.x, limit))
n._y += max(-limit, min(self.force * n.force.y, limit))
n.force.x = 0
n.force.y = 0
def copy(self, graph):
g = GraphSpringLayout(graph)
g.k, g.force, g.repulsion = self.k, self.force, self.repulsion
return g
#### GRAPH ANALYSIS ################################################################################
#--- GRAPH SEARCH ----------------------------------------------------------------------------------
def depth_first_search(node, visit=lambda node: False, traversable=lambda node, edge: True, _visited=None):
""" Visits all the nodes connected to the given root node, depth-first.
The visit function is called on each node.
Recursion will stop if it returns True, and subsequently dfs() will return True.
The traversable function takes the current node and edge,
and returns True if we are allowed to follow this connection to the next node.
For example, the traversable for directed edges is follows:
lambda node, edge: node == edge.node1
"""
stop = visit(node)
_visited = _visited or {}
_visited[node.id] = True
for n in node.links:
if stop: return True
if traversable(node, node.links.edge(n)) is False: continue
if not n.id in _visited:
stop = depth_first_search(n, visit, traversable, _visited)
return stop
dfs = depth_first_search;
def breadth_first_search(node, visit=lambda node: False, traversable=lambda node, edge: True):
""" Visits all the nodes connected to the given root node, breadth-first.
"""
q = [node]
_visited = {}
while q:
node = q.pop(0)
if not node.id in _visited:
if visit(node):
return True
q.extend((n for n in node.links if traversable(node, node.links.edge(n)) is not False))
_visited[node.id] = True
return False
bfs = breadth_first_search;
def paths(graph, id1, id2, length=4, path=[], _root=True):
""" Returns a list of paths from node with id1 to node with id2.
Only paths shorter than or equal to the given length are included.
Uses a brute-force DFS approach (performance drops exponentially for longer paths).
"""
if len(path) >= length:
return []
if id1 not in graph:
return []
if id1 == id2:
return [path + [id1]]
path = path + [id1]
p = []
s = set(path) # 5% speedup.
for node in graph[id1].links:
if node.id not in s:
p.extend(paths(graph, node.id, id2, length, path, False))
return _root and sorted(p, key=len) or p
def edges(path):
""" Returns an iterator of Edge objects for the given list of nodes.
It yields None where two successive nodes are not connected.
"""
# For example, the distance (i.e., edge weight sum) of a path:
# sum(e.weight for e in edges(path))
return len(path) > 1 and (n.links.edge(path[i+1]) for i,n in enumerate(path[:-1])) or iter(())
#--- GRAPH ADJACENCY -------------------------------------------------------------------------------
def adjacency(graph, directed=False, reversed=False, stochastic=False, heuristic=None):
""" Returns a dictionary indexed by node id1's,
in which each value is a dictionary of connected node id2's linking to the edge weight.
If directed=True, edges go from id1 to id2, but not the other way.
If stochastic=True, all the weights for the neighbors of a given node sum to 1.
A heuristic function can be given that takes two node id's and returns
an additional cost for movement between the two nodes.
"""
# Caching a heuristic from a method won't work.
# Bound method objects are transient,
# i.e., id(object.method) returns a new value each time.
if graph._adjacency is not None and \
graph._adjacency[1:] == (directed, reversed, stochastic, heuristic and heuristic.func_code):
return graph._adjacency[0]
map = {}
for n in graph.nodes:
map[n.id] = {}
for e in graph.edges:
id1, id2 = not reversed and (e.node1.id, e.node2.id) or (e.node2.id, e.node1.id)
map[id1][id2] = 1.0 - 0.5 * e.weight
if heuristic:
map[id1][id2] += heuristic(id1, id2)
if not directed:
map[id2][id1] = map[id1][id2]
if stochastic:
for id1 in map:
n = sum(map[id1].values())
for id2 in map[id1]:
map[id1][id2] /= n
# Cache the adjacency map: this makes dijkstra_shortest_path() 2x faster in repeated use.
graph._adjacency = (map, directed, reversed, stochastic, heuristic and heuristic.func_code)
return map
def dijkstra_shortest_path(graph, id1, id2, heuristic=None, directed=False):
""" Dijkstra algorithm for finding the shortest path between two nodes.
Returns a list of node id's, starting with id1 and ending with id2.
Raises an IndexError between nodes on unconnected graphs.
"""
# Based on: Connelly Barnes, http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466
def flatten(list):
# Flattens a linked list of the form [0,[1,[2,[]]]]
while len(list) > 0:
yield list[0]; list=list[1]
G = adjacency(graph, directed=directed, heuristic=heuristic)
q = [(0, id1, ())] # Heap of (cost, path_head, path_rest).
visited = set() # Visited nodes.
while True:
(cost1, n1, path) = heappop(q)
if n1 not in visited:
visited.add(n1)
if n1 == id2:
return list(flatten(path))[::-1] + [n1]
path = (n1, path)
for (n2, cost2) in G[n1].iteritems():
if n2 not in visited:
heappush(q, (cost1 + cost2, n2, path))
def dijkstra_shortest_paths(graph, id, heuristic=None, directed=False):
""" Dijkstra algorithm for finding the shortest paths from the given node to all other nodes.
Returns a dictionary of node id's, each linking to a list of node id's (i.e., the path).
"""
# Based on: Dijkstra's algorithm for shortest paths modified from Eppstein.
# Based on: NetworkX 1.4.1: Aric Hagberg, Dan Schult and Pieter Swart.
# This is 5x faster than:
# for n in g: dijkstra_shortest_path(g, id, n.id)
W = adjacency(graph, directed=directed, heuristic=heuristic)
Q = [] # Use Q as a heap with (distance, node id)-tuples.
D = {} # Dictionary of final distances.
P = {} # Dictionary of paths.
P[id] = [id]
seen = {id: 0}
heappush(Q, (0, id))
while Q:
(dist, v) = heappop(Q)
if v in D: continue
D[v] = dist
for w in W[v].iterkeys():
vw_dist = D[v] + W[v][w]
if w not in D and (w not in seen or vw_dist < seen[w]):
seen[w] = vw_dist
heappush(Q, (vw_dist, w))
P[w] = P[v] + [w]
for n in graph:
if n not in P: P[n]=None
return P
def floyd_warshall_all_pairs_distance(graph, heuristic=None, directed=False):
""" Floyd-Warshall's algorithm for finding the path length for all pairs for nodes.
Returns a dictionary of node id's,
each linking to a dictionary of node id's linking to path length.
"""
from collections import defaultdict # Requires Python 2.5+.
g = graph.keys()
d = defaultdict(lambda: defaultdict(lambda: 1e30)) # float('inf')
p = defaultdict(dict) # Predecessors.
for e in graph.edges:
u = e.node1.id
v = e.node2.id
w = 1.0 - 0.5 * e.weight
w = heuristic and heuristic(u, v) + w or w
d[u][v] = min(w, d[u][v])
d[u][u] = 0
p[u][v] = u
if not directed:
d[v][u] = min(w, d[v][u])
p[v][u] = v
for w in g:
dw = d[w]
for u in g:
du, duw = d[u], d[u][w]
for v in g:
# Performance optimization, assumes d[w][v] > 0.
#if du[v] > duw + dw[v]:
if du[v] > duw and du[v] > duw + dw[v]:
d[u][v] = duw + dw[v]
p[u][v] = p[w][v]
class pdict(dict):
def __init__(self, predecessors, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.predecessors = predecessors
return pdict(p, ((u, dict((v, w) for v,w in d[u].iteritems() if w < 1e30)) for u in d))
def predecessor_path(tree, u, v):
""" Returns the path between node u and node v as a list of node id's.
The given tree is the return value of floyd_warshall_all_pairs_distance().predecessors.
"""
def _traverse(u, v):
w = tree[u][v]
if w == u:
return []
return _traverse(u,w) + [w] + _traverse(w,v)
return [u] + _traverse(u,v) + [v]
#--- GRAPH CENTRALITY ------------------------------------------------------------------------------
def brandes_betweenness_centrality(graph, normalized=True, directed=False):
""" Betweenness centrality for nodes in the graph.
Betweenness centrality is a measure of the number of shortests paths that pass through a node.
Nodes in high-density areas will get a good score.
"""
# Ulrik Brandes, A Faster Algorithm for Betweenness Centrality,
# Journal of Mathematical Sociology 25(2):163-177, 2001,
# http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
# Based on: Dijkstra's algorithm for shortest paths modified from Eppstein.
# Based on: NetworkX 1.0.1: Aric Hagberg, Dan Schult and Pieter Swart.
# http://python-networkx.sourcearchive.com/documentation/1.0.1/centrality_8py-source.html
W = adjacency(graph, directed=directed)
b = dict.fromkeys(graph, 0.0)
for id in graph:
Q = [] # Use Q as a heap with (distance, node id)-tuples.
D = {} # Dictionary of final distances.
P = {} # Dictionary of paths.
for n in graph: P[n]=[]
seen = {id: 0}
heappush(Q, (0, id, id))
S = []
E = dict.fromkeys(graph, 0) # sigma
E[id] = 1.0
while Q:
(dist, pred, v) = heappop(Q)
if v in D:
continue
D[v] = dist
S.append(v)
E[v] += E[pred]
for w in W[v]:
vw_dist = D[v] + W[v][w]
if w not in D and (w not in seen or vw_dist < seen[w]):
seen[w] = vw_dist
heappush(Q, (vw_dist, v, w))
P[w] = [v]
E[w] = 0.0
elif vw_dist == seen[w]: # Handle equal paths.
P[w].append(v)
E[w] += E[v]
d = dict.fromkeys(graph, 0.0)
for w in reversed(S):
for v in P[w]:
d[v] += (1.0 + d[w]) * E[v] / E[w]
if w != id:
b[w] += d[w]
# Normalize between 0.0 and 1.0.
m = normalized and max(b.values()) or 1
b = dict((id, w/m) for id, w in b.iteritems())
return b
def eigenvector_centrality(graph, normalized=True, reversed=True, rating={}, iterations=100, tolerance=0.0001):
""" Eigenvector centrality for nodes in the graph (cfr. Google's PageRank).
Eigenvector centrality is a measure of the importance of a node in a directed network.
It rewards nodes with a high potential of (indirectly) connecting to high-scoring nodes.
Nodes with no incoming connections have a score of zero.
If you want to measure outgoing connections, reversed should be False.
"""
# Based on: NetworkX, Aric Hagberg (hagberg@lanl.gov)
# http://python-networkx.sourcearchive.com/documentation/1.0.1/centrality_8py-source.html
# Note: much faster than betweenness centrality (which grows exponentially).
def normalize(vector):
w = 1.0 / (sum(vector.values()) or 1)
for node in vector:
vector[node] *= w
return vector
G = adjacency(graph, directed=True, reversed=reversed)
v = normalize(dict([(n, random()) for n in graph])) # Node ID => weight vector.
# Eigenvector calculation using the power iteration method: y = Ax.
# It has no guarantee of convergence.
for i in range(iterations):
v0 = v
v = dict.fromkeys(v0.iterkeys(), 0)
for n1 in v:
for n2 in G[n1]:
v[n1] += 0.01 + v0[n2] * G[n1][n2] * rating.get(n1, 1)
normalize(v)
e = sum([abs(v[n]-v0[n]) for n in v]) # Check for convergence.
if e < len(G) * tolerance:
# Normalize between 0.0 and 1.0.
m = normalized and max(v.values()) or 1
v = dict((id, w/m) for id, w in v.iteritems())
return v
warn("node weight is 0 because eigenvector_centrality() did not converge.", Warning)
return dict((n, 0) for n in G)
#--- GRAPH PARTITIONING ----------------------------------------------------------------------------
# a | b => all elements from a and all the elements from b.
# a & b => elements that appear in a as well as in b.
# a - b => elements that appear in a but not in b.
def union(a, b):
return list(set(a) | set(b))
def intersection(a, b):
return list(set(a) & set(b))
def difference(a, b):
return list(set(a) - set(b))
def partition(graph):
""" Returns a list of unconnected subgraphs.
"""
# Creates clusters of nodes and directly connected nodes.
# Iteratively merges two clusters if they overlap.
g = []
for n in graph.nodes:
g.append(dict.fromkeys((n.id for n in n.flatten()), True))
for i in reversed(range(len(g))):
for j in reversed(range(i+1, len(g))):
if g[i] and g[j] and len(intersection(g[i], g[j])) > 0:
g[i] = union(g[i], g[j])
g[j] = []
g = [graph.copy(nodes=[graph[id] for id in n]) for n in g if n]
g.sort(lambda a, b: len(b) - len(a))
return g
def is_clique(graph):
""" A clique is a set of nodes in which each node is connected to all other nodes.
"""
#for n1 in graph.nodes:
# for n2 in graph.nodes:
# if n1 != n2 and graph.edge(n1.id, n2.id) is None:
# return False
return graph.density == 1.0
def clique(graph, id):
""" Returns the largest possible clique for the node with given id.
"""
if isinstance(id, Node):
id = id.id
a = [id]
for n in graph.nodes:
try:
# Raises StopIteration if all nodes in the clique are connected to n:
(id for id in a if n.id==id or graph.edge(n.id, id) is None).next()
except StopIteration:
a.append(n.id)
return a
def cliques(graph, threshold=3):
""" Returns all cliques in the graph with at least the given number of nodes.
"""
a = []
for n in graph.nodes:
c = clique(graph, n.id)
if len(c) >= threshold:
c.sort()
if c not in a: a.append(c)
return a
#### GRAPH UTILITY FUNCTIONS #######################################################################
# Utility functions for safely linking and unlinking of nodes,
# with respect for the surrounding nodes.
def unlink(graph, node1, node2=None):
""" Removes the edges between node1 and node2.
If only node1 is given, removes all edges to and from it.
This does not remove node1 from the graph.
"""
if not isinstance(node1, Node):
node1 = graph[node1]
if not isinstance(node2, Node) and node2 is not None:
node2 = graph[node2]
for e in list(graph.edges):
if node1 in (e.node1, e.node2) and node2 in (e.node1, e.node2, None):
graph.edges.remove(e)
try:
node1.links.remove(node2)
node2.links.remove(node1)
except: # 'NoneType' object has no attribute 'links'
pass
def redirect(graph, node1, node2):
""" Connects all of node1's edges to node2 and unlinks node1.
"""
if not isinstance(node1, Node):
node1 = graph[node1]
if not isinstance(node2, Node):
node2 = graph[node2]
for e in graph.edges:
if node1 in (e.node1, e.node2):
if e.node1 == node1 and e.node2 != node2:
graph._add_edge_copy(e, node1=node2, node2=e.node2)
if e.node2 == node1 and e.node1 != node2:
graph._add_edge_copy(e, node1=e.node1, node2=node2)
unlink(graph, node1)
def cut(graph, node):
""" Unlinks the given node, but keeps edges intact by connecting the surrounding nodes.
If A, B, C, D are nodes and A->B, B->C, B->D, if we then cut B: A->C, A->D.
"""
if not isinstance(node, Node):
node = graph[node]
for e in graph.edges:
if node in (e.node1, e.node2):
for n in node.links:
if e.node1 == node and e.node2 != n:
graph._add_edge_copy(e, node1=n, node2=e.node2)
if e.node2 == node and e.node1 != n:
graph._add_edge_copy(e, node1=e.node1, node2=n)
unlink(graph, node)
def insert(graph, node, a, b):
""" Inserts the given node between node a and node b.
If A, B, C are nodes and A->B, if we then insert C: A->C, C->B.
"""
if not isinstance(node, Node):
node = graph[node]
if not isinstance(a, Node):
a = graph[a]
if not isinstance(b, Node):
b = graph[b]
for e in graph.edges:
if e.node1 == a and e.node2 == b:
graph._add_edge_copy(e, node1=a, node2=node)
graph._add_edge_copy(e, node1=node, node2=b)
if e.node1 == b and e.node2 == a:
graph._add_edge_copy(e, node1=b, node2=node)
graph._add_edge_copy(e, node1=node, node2=a)
unlink(graph, a, b)
#### GRAPH EXPORT ##################################################################################
class GraphRenderer(object):
def __init__(self, graph):
self.graph = graph
def serialize(self, *args, **kwargs):
pass
def export(self, path, *args, **kwargs):
pass
#--- GRAPH EXPORT: HTML5 <CANVAS> ELEMENT ---------------------------------------------------------
# Exports graphs to interactive web pages using graph.js.
def minify(js):
""" Returns a compressed Javascript string with comments and whitespace removed.
"""
import re
W = (
"\(\[\{\,\;\=\-\+\*\/",
"\)\]\}\,\;\=\-\+\*\/"
)
for a, b in (
(re.compile(r"\/\*.*?\*\/", re.S), ""), # multi-line comments /**/
(re.compile(r"\/\/.*"), ""), # singe line comments //
(re.compile(r";\n"), "; "), # statements (correctly) terminated with ;
(re.compile(r"[ \t]+"), " "), # spacing and indentation
(re.compile(r"[ \t]([\(\[\{\,\;\=\-\+\*\/])"), "\\1"),
(re.compile(r"([\)\]\}\,\;\=\-\+\*\/])[ \t]"), "\\1"),
(re.compile(r"\s+\n"), "\n"),
(re.compile(r"\n+"), "\n")):
js = a.sub(b, js)
return js.strip()
DEFAULT, INLINE = "default", "inline"
HTML, CANVAS, STYLE, CSS, SCRIPT, DATA = \
"html", "canvas", "style", "css", "script", "data"
class HTMLCanvasRenderer(GraphRenderer):
def __init__(self, graph, **kwargs):
self.graph = graph
self._source = \
"<!doctype html>\n" \
"<html>\n" \
"<head>\n" \
"\t<title>%s</title>\n" \
"\t<meta charset=\"utf-8\">\n" \
"\t%s\n" \
"\t<script type=\"text/javascript\" src=\"%scanvas.js\"></script>\n" \
"\t<script type=\"text/javascript\" src=\"%sgraph.js\"></script>\n" \
"</head>\n" \
"<body>\n" \
"\t<div id=\"%s\" style=\"width:%spx; height:%spx;\">\n" \
"\t\t<script type=\"text/canvas\">\n" \
"\t\t%s\n" \
"\t\t</script>\n" \
"\t</div>\n" \
"</body>\n" \
"</html>"
# HTML
self.title = "Graph" # <title>Graph</title>
self.javascript = None # Path to canvas.js + graph.js.
self.stylesheet = INLINE # Either None, INLINE, DEFAULT (style.css) or a custom path.
self.id = "graph" # <div id="graph">
self.ctx = "canvas.element"
self.width = 700 # Canvas width in pixels.
self.height = 500 # Canvas height in pixels.
# JS Graph
self.frames = 500 # Number of frames of animation.
self.fps = 30 # Frames per second.
self.ipf = 2 # Iterations per frame.
self.weighted = False # Indicate betweenness centrality as a shadow?
self.directed = False # Indicate edge direction with an arrow?
self.prune = None # None or int, calls Graph.prune() in Javascript.
self.pack = True # Shortens leaf edges, adds eigenvector weight to node radius.
# JS GraphLayout
self.distance = graph.distance # Node spacing.
self.k = graph.layout.k # Force constant.
self.force = graph.layout.force # Force dampener.
self.repulsion = graph.layout.repulsion # Repulsive force radius.
# Data
self.weight = [DEGREE, WEIGHT, CENTRALITY]
self.href = {} # Dictionary of Node.id => URL.
self.css = {} # Dictionary of Node.id => CSS classname.
# Default options.
# If a Node or Edge has one of these settings,
# it is not passed to Javascript to save bandwidth.
self.default = {
"radius": 5,
"fixed": False,
"fill": None,
"stroke": (0,0,0,1),
"strokewidth": 1,
"text": (0,0,0,1),
"fontsize": 11,
}
# Override settings from keyword arguments.
self.default.update(kwargs.pop("default", {}))
for k, v in kwargs.items():
setattr(self, k, v)
def _escape(self, s):
if isinstance(s, basestring):
return "\"%s\"" % s.replace("\"", "\\\"")
return s
def _rgba(self, clr):
# Color or tuple to a CSS "rgba(255,255,255,1.0)" string.
return "\"rgba(%s,%s,%s,%.2f)\"" % (int(clr[0]*255), int(clr[1]*255), int(clr[2]*255), clr[3])
@property
def data(self):
""" Yields a string of Javascript code that loads the nodes and edges into variable g,
which is a Javascript Graph object (see graph.js).
This can be the response of an XMLHttpRequest, after wich you move g into your own variable.
"""
return "".join(self._data())
def _data(self):
s = []
s.append("g = new Graph(%s, %s);\n" % (self.ctx, self.distance))
s.append("var n = {")
if len(self.graph.nodes) > 0:
s.append("\n")
# Translate node properties to Javascript dictionary (var n).
for n in self.graph.nodes:
p = []
if n._x != 0:
p.append("x:%i" % n._x) # 0
if n._y != 0:
p.append("y:%i" % n._y) # 0
if n.radius != self.default["radius"]:
p.append("radius:%.1f" % n.radius) # 5.0
if n.fixed != self.default["fixed"]:
p.append("fixed:%s" % repr(n.fixed).lower()) # false
if n.fill != self.default["fill"]:
p.append("fill:%s" % self._rgba(n.fill)) # [0,0,0,1.0]
if n.stroke != self.default["stroke"]:
p.append("stroke:%s" % self._rgba(n.stroke)) # [0,0,0,1.0]
if n.strokewidth != self.default["strokewidth"]:
p.append("strokewidth:%.1f" % n.strokewidth) # 0.5
if n.text is None:
p.append("text:false")
if n.text and n.text.fill != self.default["text"]:
p.append("text:%s" % self._rgba(n.text.fill)) # [0,0,0,1.0]
if n.text and "font" in n.text.__dict__:
p.append("font:\"%s\"" % n.text.__dict__["font"]) # "sans-serif"
if n.text and n.text.__dict__.get("fontsize", self.default["fontsize"]) != self.default["fontsize"]:
p.append("fontsize:%i" % int(max(1, n.text.fontsize)))
if n.text and "fontweight" in n.text.__dict__: # "bold"
p.append("fontweight:\"%s\"" % n.text.__dict__["fontweight"])
if n.text and n.text.string != n.id:
p.append("label:\"%s\"" % n.text.string)
if n.id in self.href:
p.append("href:\"%s\"" % self.href[n.id])
if n.id in self.css:
p.append("css:\"%s\"" % self.css[n.id])
s.append("\t%s: {%s},\n" % (self._escape(n.id), ", ".join(p)))
s[-1] = s[-1].rstrip(",\n") # Trailing comma breaks in IE.
s.append("\n};\n")
s.append("var e = [")
if len(self.graph.edges) > 0:
s.append("\n")
# Translate edge properties to Javascript dictionary (var e).
for e in self.graph.edges:
id1, id2 = self._escape(e.node1.id), self._escape(e.node2.id)
p = []
if e.weight != 0:
p.append("weight:%.2f" % e.weight) # 0.00
if e.length != 1:
p.append("length:%.2f" % e.length) # 1.00
if e.type is not None:
p.append("type:\"%s\"" % e.type) # "is-part-of"
if e.stroke != self.default["stroke"]:
p.append("stroke:%s" % self._rgba(e.stroke)) # [0,0,0,1.0]
if e.strokewidth != self.default["strokewidth"]:
p.append("strokewidth:%.2f" % e.strokewidth) # 0.5
s.append("\t[%s, %s, {%s}],\n" % (id1, id2, ", ".join(p)))
s[-1] = s[-1].rstrip(",\n") # Trailing comma breaks in IE.
s.append("\n];\n")
# Append the nodes to graph g.
s.append("for (var id in n) {\n"
"\tg.addNode(id, n[id]);\n"
"}\n")
# Append the edges to graph g.
s.append("for (var i=0; i < e.length; i++) {\n"
"\tvar n1 = g.nodeset[e[i][0]];\n"
"\tvar n2 = g.nodeset[e[i][1]];\n"
"\tg.addEdge(n1, n2, e[i][2]);\n"
"}")
return s
@property
def script(self):
""" Yields a string of canvas.js code.
A setup() function loads the nodes and edges into variable g (Graph),
A draw() function starts the animation and updates the layout of g.
"""
return "".join(self._script())
def _script(self):
s = [];
s.append("function setup(canvas) {\n")
s.append( "\tcanvas.size(%s, %s);\n" % (self.width, self.height))
s.append( "\tcanvas.fps = %s;\n" % (self.fps))
s.append( "\t" + "".join(self._data()).replace("\n", "\n\t"))
s.append( "\n")
# Apply the layout settings.
s.append( "\tg.layout.k = %s; // Force constant (= edge length).\n"
"\tg.layout.force = %s; // Repulsive strength.\n"
"\tg.layout.repulsion = %s; // Repulsive radius.\n" % (
self.k,
self.force,
self.repulsion))
# Apply eigenvector, betweenness and degree centrality.
if self.weight is True: s.append(
"\tg.eigenvectorCentrality();\n"
"\tg.betweennessCentrality();\n"
"\tg.degreeCentrality();\n")
if isinstance(self.weight, (list, tuple)):
if WEIGHT in self.weight: s.append(
"\tg.eigenvectorCentrality();\n")
if CENTRALITY in self.weight: s.append(
"\tg.betweennessCentrality();\n")
if DEGREE in self.weight: s.append(
"\tg.degreeCentrality();\n")
# Apply node weight to node radius.
if self.pack: s.append(
"\t// Apply Node.weight to Node.radius.\n"
"\tfor (var i=0; i < g.nodes.length; i++) {\n"
"\t\tvar n = g.nodes[i];\n"
"\t\tn.radius = n.radius + n.radius * n.weight;\n"
"\t}\n")
# Apply edge length (leaves get shorter edges).
if self.pack: s.append(
"\t// Apply Edge.length (leaves get shorter edges).\n"
"\tfor (var i=0; i < g.nodes.length; i++) {\n"
"\t\tvar e = g.nodes[i].edges();\n"
"\t\tif (e.length == 1) {\n"
"\t\t\te[0].length *= 0.2;\n"
"\t\t}\n"
"\t}\n")
# Apply pruning.
if self.prune is not None: s.append(
"\tg.prune(%s);\n" % self.prune)
# Implement <canvas> draw().
s.append("}\n")
s.append("function draw(canvas) {\n"
"\tif (g.layout.iterations <= %s) {\n"
"\t\tcanvas.clear();\n"
"\t\t//shadow();\n"
"\t\tstroke(0);\n"
"\t\tfill(0,0);\n"
"\t\tg.update(%s);\n"
"\t\tg.draw(%s, %s);\n"
"\t}\n"
"\tg.drag(canvas.mouse);\n"
"}" % (
int(self.frames),
int(self.ipf),
str(self.weighted).lower(),
str(self.directed).lower()))
return s
@property
def canvas(self):
""" Yields a string of HTML with a <div id="graph"> containing a <script type="text/canvas">.
The <div id="graph"> wrapper is required as a container for the node labels.
"""
s = [
"<div id=\"%s\" style=\"width:%spx; height:%spx;\">\n" % (self.id, self.width, self.height),
"\t<script type=\"text/canvas\">\n",
"\t\t%s\n" % self.script.replace("\n", "\n\t\t"),
"\t</script>\n",
"</div>"
]
return "".join(s)
@property
def style(self):
""" Yields a string of CSS for <div id="graph">.
"""
return \
"body { font: 11px sans-serif; }\n" \
"a { color: dodgerblue; }\n" \
"#%s canvas { }\n" \
"#%s .node-label { font-size: 11px; }\n" \
"#%s {\n" \
"\tdisplay: inline-block;\n" \
"\tposition: relative;\n" \
"\toverflow: hidden;\n" \
"\tborder: 1px solid #ccc;\n" \
"}" % (self.id, self.id, self.id)
@property
def html(self):
""" Yields a string of HTML to visualize the graph using a force-based spring layout.
The js parameter sets the path to graph.js and canvas.js.
"""
js = self.javascript or ""
if self.stylesheet == INLINE:
css = self.style.replace("\n","\n\t\t").rstrip("\t")
css = "<style type=\"text/css\">\n\t\t%s\n\t</style>" % css
elif self.stylesheet == DEFAULT:
css = "<link rel=\"stylesheet\" href=\"style.css\" type=\"text/css\" media=\"screen\" />"
elif self.stylesheet is not None:
css = "<link rel=\"stylesheet\" href=\"%s\" type=\"text/css\" media=\"screen\" />" % self.stylesheet
else:
css = ""
s = self._script()
s = "".join(s)
s = "\t" + s.replace("\n", "\n\t\t\t")
s = s.rstrip()
s = self._source % (
self.title,
css,
js,
js,
self.id,
self.width,
self.height,
s)
return s
def serialize(self, type=HTML):
if type == HTML:
return self.html
if type == CANVAS:
return self.canvas
if type in (STYLE, CSS):
return self.style
if type == SCRIPT:
return self.script
if type == DATA:
return self.data
# Backwards compatibility.
render = serialize
def export(self, path, encoding="utf-8"):
""" Generates a folder at the given path containing an index.html
that visualizes the graph using the HTML5 <canvas> tag.
"""
if os.path.exists(path):
rmtree(path)
os.mkdir(path)
# Copy compressed graph.js + canvas.js (unless a custom path is given.)
if self.javascript is None:
for p, f in (("..", "canvas.js"), (".", "graph.js")):
a = open(os.path.join(MODULE, p, f), "r")
b = open(os.path.join(path, f), "w")
b.write(minify(a.read()))
b.close()
# Create style.css.
if self.stylesheet == DEFAULT:
f = open(os.path.join(path, "style.css"), "w")
f.write(self.style)
f.close()
# Create index.html.
f = open(os.path.join(path, "index.html"), "w", encoding=encoding)
f.write(self.html)
f.close()
#--- GRAPH EXPORT: GRAPHML ------------------------------------------------------------------------
# Exports graphs as GraphML XML, which can be read by Gephi (https://gephi.org).
# Author: Frederik Elwert <frederik.elwert@web.de>, 2014.
GRAPHML = "graphml"
class GraphMLRenderer(GraphRenderer):
def serialize(self, directed=False):
p = "tmp.graphml"
self.export(p, directed, encoding="utf-8")
s = open(p, encoding="utf-8").read()
os.unlink(p)
return s
def export(self, path, directed=False, encoding="utf-8"):
""" Generates a GraphML XML file at the given path.
"""
import xml.etree.ElementTree as etree
ns = "{http://graphml.graphdrawing.org/xmlns}"
etree.register_namespace("", ns.strip("{}"))
# Define type for node labels (string).
# Define type for node edges (float).
root = etree.Element(ns + "graphml")
root.insert(0, etree.Element(ns + "key", **{
"id": "node_label", "for": "node", "attr.name": "label", "attr.type": "string"
}))
root.insert(0, etree.Element(ns + "key", **{
"id": "edge_weight", "for": "edge", "attr.name": "weight", "attr.type": "double"
}))
# Map Node.id => GraphML node id.
m = {}
g = etree.SubElement(root, ns + "graph", id="g", edgedefault=directed and "directed" or "undirected")
# Export nodes.
for i, n in enumerate(self.graph.nodes):
m[n.id] = "node%s" % i
x = etree.SubElement(g, ns + "node", id=m[n.id])
x = etree.SubElement(x, ns + "data", key="node_label")
if n.text and n.text.string != n.id:
x.text = n.text.string
# Export edges.
for i, e in enumerate(self.graph.edges):
x = etree.SubElement(g, ns + "edge", id="edge%s" % i, source=m[e.node1.id], target=m[e.node2.id])
x = etree.SubElement(x, ns + "data", key="edge_weight")
x.text = "%.3f" % e.weight
# Export graph with pretty indented XML.
# http://effbot.org/zone/element-lib.htm#prettyprint
def indent(e, level=0):
w = "\n" + level * " "
if len(e):
if not e.text or not e.text.strip():
e.text = w + " "
if not e.tail or not e.tail.strip():
e.tail = w
for e in e:
indent(e, level+1)
if not e.tail or not e.tail.strip():
e.tail = w
else:
if level and (not e.tail or not e.tail.strip()):
e.tail = w
indent(root)
tree = etree.ElementTree(root)
tree.write(path, encoding=encoding)
#--------------------------------------------------------------------------------------------------
# The export() and serialize() function are called from Graph.export() and Graph.serialize(),
# and are expected to handle any GraphRenderer by specifying an optional type=HTML|GRAPHML.
def export(graph, path, encoding="utf-8", **kwargs):
type = kwargs.pop("type", HTML)
# Export to GraphML.
if type == GRAPHML or path.endswith(".graphml"):
r = GraphMLRenderer(graph)
return r.export(path, directed=kwargs.get("directed", False), encoding=encoding)
# Export to HTML with <canvas>.
if type == HTML:
kwargs.setdefault("stylesheet", DEFAULT)
r = HTMLCanvasRenderer(graph, **kwargs)
return r.export(path, encoding)
def serialize(graph, type=HTML, **kwargs):
# Return GraphML string.
if type == GRAPHML:
r = GraphMLRenderer(graph)
return r.serialize(directed=kwargs.get("directed", False))
# Return HTML string.
if type in (HTML, CANVAS, STYLE, CSS, SCRIPT, DATA):
kwargs.setdefault("stylesheet", INLINE)
r = HTMLCanvasRenderer(graph, **kwargs)
return r.serialize(type)
# Backwards compatibility.
write, render = export, serialize
|
boompieman/iim_project
|
project_python2/lib/python2.7/site-packages/pattern/graph/__init__.py
|
Python
|
gpl-3.0
| 65,664
|
[
"VisIt"
] |
9a3e532b78496c6ce22e77b926268c362864eee75f8147bafb940c04e7f3a31e
|
from __future__ import annotations
import pytest
import scitbx.matrix
from cctbx import sgtbx
from cctbx.sgtbx import bravais_types
from dxtbx.model import Crystal, Experiment, ExperimentList
from dials.algorithms.indexing import assign_indices, non_primitive_basis
from dials.array_family import flex
@pytest.mark.parametrize("space_group_symbol", bravais_types.acentric)
def test_detect(space_group_symbol):
sgi = sgtbx.space_group_info(space_group_symbol)
cs = sgi.any_compatible_crystal_symmetry(volume=1000)
ms = cs.build_miller_set(anomalous_flag=True, d_min=1).expand_to_p1()
result = non_primitive_basis.detect(ms.indices())
if sgi.group().conventional_centring_type_symbol() != "P":
assert result is not None
assert isinstance(result, scitbx.matrix.sqr)
assert result.n == (3, 3)
else:
assert result is None
@pytest.mark.parametrize("space_group_symbol", bravais_types.acentric)
def test_correct(space_group_symbol):
sgi = sgtbx.space_group_info(space_group_symbol)
cs = sgi.any_compatible_crystal_symmetry(volume=1000)
ms = cs.build_miller_set(anomalous_flag=True, d_min=1).expand_to_p1()
# the reciprocal matrix
B = scitbx.matrix.sqr(cs.unit_cell().fractionalization_matrix()).transpose()
crystal = Crystal(B, sgtbx.space_group())
expts = ExperimentList([Experiment(crystal=crystal)])
refl = flex.reflection_table()
refl["miller_index"] = ms.indices()
refl["rlp"] = B.elems * ms.indices().as_vec3_double()
refl["imageset_id"] = flex.int(len(refl))
refl["xyzobs.mm.value"] = flex.vec3_double(len(refl))
non_primitive_basis.correct(expts, refl, assign_indices.AssignIndicesGlobal())
cs_corrected = expts.crystals()[0].get_crystal_symmetry()
assert cs_corrected.change_of_basis_op_to_primitive_setting().is_identity_op()
assert (
cs.change_of_basis_op_to_primitive_setting().apply(ms.indices())
== refl["miller_index"]
)
|
dials/dials
|
tests/algorithms/indexing/test_non_primitive_basis.py
|
Python
|
bsd-3-clause
| 1,981
|
[
"CRYSTAL"
] |
fa04a73e69aef23fe8f3059843d1fbae3502f70b327d16eca9e28a453651c32b
|
from modeller import *
from modeller.scripts import complete_pdb
from modeller.optimizers import conjugate_gradients
env = environ()
env.io.atom_files_directory = ['../atom_files']
log.verbose()
env.libs.topology.read(file='$(LIB)/top_heav.lib')
env.libs.parameters.read(file='$(LIB)/par.lib')
# Read in the model
mdl = complete_pdb(env, "1fdn")
rsr = mdl.restraints
# Select all C-alpha atoms
allat = selection(mdl)
allca = allat.only_atom_types('CA')
# Create a pseudo atom that is the center of all C-alphas, and activate it
center = pseudo_atom.gravity_center(allca)
rsr.pseudo_atoms.append(center)
# Constrain every C-alpha to be no more than 10 angstroms from the center
for at in allca:
r = forms.upper_bound(group=physical.xy_distance,
feature=features.distance(at, center),
mean=10.0, stdev=0.1)
rsr.add(r)
# Constrain the gravity center to the x=0 plane
r = forms.gaussian(group=physical.xy_distance,
feature=features.x_coordinate(center),
mean=0.0, stdev=0.1)
rsr.add(r)
# Keep sensible stereochemistry
rsr.make(allat, restraint_type='stereo', spline_on_site=False)
# Optimize with CG
cg = conjugate_gradients()
cg.optimize(allat, max_iterations=100, output='REPORT')
mdl.write(file='1fas.ini')
|
bjornwallner/proq2-server
|
apps/modeller9v8/examples/python/pseudo_atoms.py
|
Python
|
gpl-3.0
| 1,313
|
[
"Gaussian"
] |
b7fb885d0bc7f020212628979116d0a0f0f0f1d4cebb28a2819337b3c0b0ec66
|
#!/usr/bin/env ipython
from pylab import *
import numpy as np
import console_colors as ccl
from scipy.io.netcdf import netcdf_file
import os
import matplotlib.patches as patches
import matplotlib.transforms as transforms
class gral:
def __init__(self):
self.name='name'
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def makefig(mc, sh, TEXT, TEXT_LOC, YLIMS, YLAB, fname_fig):
fmc,fsh = 3.0, 1.0 # escaleos temporales
fig = figure(1, figsize=(13, 6))
ax = fig.add_subplot(111)
varname = fname_fig[:-4].split('_')[3]
if(varname == 'Temp'):
mc.med /= 1.0e4; sh.med /= 1.0e4
mc.avr /= 1.0e4; sh.avr /= 1.0e4
mc.std_err /= 1.0e4; sh.std_err /= 1.0e4
YLIMS[0] /= 1.0e4; YLIMS[1] /= 1.0e4
TEXT_LOC['mc'][1] /= 1.0e4
TEXT_LOC['sh'][1] /= 1.0e4
# curvas del mc
time = fsh+fmc*mc.tnorm
cc = time>=fsh
ax.plot(time[cc], mc.avr[cc], 'o-', color='black', markersize=5, label='mean')
ax.plot(time[cc], mc.med[cc], 'o-', color='red', alpha=.8, markersize=5, markeredgecolor='none', label='median')
# sombra del mc
inf = mc.avr + mc.std_err/np.sqrt(mc.nValues)
sup = mc.avr - mc.std_err/np.sqrt(mc.nValues)
ax.fill_between(time[cc], inf[cc], sup[cc], facecolor='gray', alpha=0.5)
trans = transforms.blended_transform_factory(
ax.transData, ax.transAxes)
rect1 = patches.Rectangle((fsh, 0.), width=fmc, height=1,
transform=trans, color='blue',
alpha=0.3)
ax.add_patch(rect1)
# curvas del sheath
time = fsh*sh.tnorm
cc = time<=fsh
ax.plot(time[cc], sh.avr[cc], 'o-', color='black', markersize=5)
ax.plot(time[cc], sh.med[cc], 'o-', color='red', alpha=.8, markersize=5, markeredgecolor='none')
# sombra del sheath
inf = sh.avr + sh.std_err/np.sqrt(sh.nValues)
sup = sh.avr - sh.std_err/np.sqrt(sh.nValues)
ax.fill_between(time[cc], inf[cc], sup[cc], facecolor='gray', alpha=0.5)
#trans = transforms.blended_transform_factory(
# ax.transData, ax.transAxes)
rect1 = patches.Rectangle((0., 0.), width=fsh, height=1,
transform=trans, color='orange',
alpha=0.3)
ax.add_patch(rect1)
ax.legend(loc='best', fontsize=20)
ax.tick_params(labelsize=17)
ax.grid()
ax.set_xlim(-2.0, 7.0)
ax.set_ylim(YLIMS)
ax.text(TEXT_LOC['mc'][0], TEXT_LOC['mc'][1], TEXT['mc'], fontsize=22)
ax.text(TEXT_LOC['sh'][0], TEXT_LOC['sh'][1], TEXT['sh'], fontsize=22)
ax.set_xlabel('time normalized to sheath/MC passage [1]', fontsize=25)
ax.set_ylabel(YLAB, fontsize=27)
if(varname in ('beta','Temp', 'rmsB', 'rmsBoB')):
ax.set_yscale('log')
else:
ax.set_yscale('linear')
savefig(fname_fig, format='png', dpi=100, bbox_inches='tight')
close()
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
dir_figs = '../figs'
dir_inp_mc = '../../../../mcs/ascii/MCflag2/wShiftCorr/_test_Vmc_'
dir_inp_sh = '../../../../sheaths/ascii/MCflag2/wShiftCorr/_test_Vmc_'
#vlo, vhi = 100.0, 450.0 # rango de velocidad Vmc
#vlo, vhi = 450.0, 550.0 # rango de velocidad Vmc
vlo, vhi = 550.0, 3000.0 # rango de velocidad Vmc
fname_inp = 'MCflag2_2before.4after_fgap0.2_Wang90.0_vlo.%3.1f.vhi.%3.1f' % (vlo, vhi)
fname_inp_nro_mc = dir_inp_mc + '/n.events_' + fname_inp + '.txt'
fname_inp_nro_sh = dir_inp_sh + '/n.events_' + fname_inp + '.txt'
fnro_mc = open(fname_inp_nro_mc, 'r')
fnro_sh = open(fname_inp_nro_sh, 'r')
stf = {}
stf['B'] = {
'label': 'B [T]',
'ylims': [5., 29.],
'text_loc_1': {'mc':[4.5, 15.0], 'sh':[-1.95, 12.0]},
'text_loc_2': {'mc':[4.5, 18.0], 'sh':[-1.95, 12.0]},
'text_loc_3': {'mc':[4.5, 12.0], 'sh':[-1.95, 12.0]}
}
stf['V'] = {
'label': 'Vsw [Km/s]',
'ylims': [350., 800.],
'text_loc_1': {'mc':[4.5, 500.0], 'sh':[-1.95, 520.0]},
'text_loc_2': {'mc':[4.5, 600.0], 'sh':[-1.95, 600.0]},
'text_loc_3': {'mc':[4.5, 410.0], 'sh':[-1.95, 600.0]}
}
stf['rmsBoB'] = {
'label': 'rmsBoB [1]',
'ylims': [0.015, 0.21],
'text_loc_1': {'mc':[4.5, 0.020], 'sh':[-1.95, 0.02]},
'text_loc_2': {'mc':[4.5, 0.095], 'sh':[-1.95, 0.02]},
'text_loc_3': {'mc':[4.5, 0.099], 'sh':[-1.95, 0.02]}
}
stf['rmsB'] = {
'label': 'rmsB [nT]',
'ylims': [0.1, 4.0],
'text_loc_1': {'mc':[4.5, 1.0], 'sh':[-1.95, 1.3]},
'text_loc_2': {'mc':[4.5, 1.0], 'sh':[-1.95, 1.3]},
'text_loc_3': {'mc':[4.5, 1.0], 'sh':[-1.95, 1.3]}
}
stf['beta'] = {
'label': 'beta [1]',
'ylims': [0.02, 10.0],
'text_loc_1': {'mc':[4.5, 0.1], 'sh':[-1.95, 0.2]},
'text_loc_2': {'mc':[4.5, 0.1], 'sh':[-1.95, 0.2]},
'text_loc_3': {'mc':[4.5, 0.1], 'sh':[-1.95, 0.2]}
}
stf['Pcc'] = {
'label': 'proton density [#/cc]',
'ylims': [1, 23],
'text_loc_1': {'mc':[4.5, 14], 'sh':[-1.95, 16.0]},
'text_loc_2': {'mc':[4.5, 14], 'sh':[-1.95, 16.0]},
'text_loc_3': {'mc':[4.5, 11], 'sh':[-1.95, 18.0]}
}
stf['Temp'] = {
'label': 'Temp ($\\times 10^4$) [K]',
'ylims': [1e4, 100e4],
'text_loc_1': {'mc':[4.5, 18.0e4], 'sh':[-1.95, 20.0e4]},
'text_loc_2': {'mc':[4.5, 2.0e4], 'sh':[-1.95, 20.0e4]},
'text_loc_3': {'mc':[4.5, 2.0e4], 'sh':[-1.95, 20.0e4]}
}
stf['AlphaRatio'] = {
'label': 'alpha ratio [1]',
'ylims': [0.02, 0.09],
'text_loc_1': {'mc':[4.5, 0.022], 'sh':[-1.95, 0.07]},
'text_loc_2': {'mc':[4.5, 0.022], 'sh':[-1.95, 0.07]},
'text_loc_3': {'mc':[4.5, 0.022], 'sh':[-1.95, 0.07]}
}
stf['CRs'] = {
'label': 'GCR relative rate [%]',
'ylims': [-8.0, 1.0],
'text_loc_1': {'mc':[4.5, -4.0], 'sh':[-1.95, -4.5]},
'text_loc_2': {'mc':[4.5, -7.0], 'sh':[-1.95, -4.5]},
'text_loc_3': {'mc':[4.5, -7.5], 'sh':[-1.95, -4.5]}
}
TEXT = {}
print " input: "
print " %s " % dir_inp_mc
print " %s \n" % dir_inp_sh
print " vlo, vhi: ", (vlo, vhi), '\n'
for lmc, lsh in zip(fnro_mc, fnro_sh):
l_mc = lmc.split()
l_sh = lsh.split()
varname = l_mc[0] # nombre de la variable
# Nfinal: events w/80%% of data (ESTE ME INTERESA!)
Nfinal_mc, Nfinal_sh = int(l_mc[1]), int(l_sh[1])
#Nselec = int(l_mc[2]) # # of selected events
print " %s"%varname, ' Nfinal_mc:%d' % Nfinal_mc, 'Nfinal_sh:%d' % Nfinal_sh
mc, sh = gral(), gral()
fname_inp_mc = dir_inp_mc + '/' + fname_inp + '_%s.txt' % varname
fname_inp_sh = dir_inp_sh + '/' + fname_inp + '_%s.txt' % varname
mc.tnorm, mc.med, mc.avr, mc.std_err, mc.nValues = np.loadtxt(fname_inp_mc).T
sh.tnorm, sh.med, sh.avr, sh.std_err, sh.nValues = np.loadtxt(fname_inp_sh).T
# nro de datos con mas del 80% non-gap data
TEXT['mc'] = 'events: %d' % Nfinal_mc
TEXT['sh'] = 'events: %d' % Nfinal_sh
if(vlo==100.0):
TEXT_LOC = stf[varname]['text_loc_1'] #1.7, 12.0
elif(vlo==450.0):
TEXT_LOC = stf[varname]['text_loc_2'] #1.7, 12.0
elif(vlo==550.0):
TEXT_LOC = stf[varname]['text_loc_3'] #1.7, 12.0
else:
print " ----> ERROR con 'v_lo'!"
ylims = stf[varname]['ylims'] #[4., 17.]
ylabel = stf[varname]['label'] #'B [nT]'
fname_fig = dir_figs + '/fig_vlo.%3.1f_vhi.%3.1f_%s.png'%(vlo, vhi, varname)
makefig(mc, sh, TEXT, TEXT_LOC, ylims, ylabel, fname_fig)
print "\n output en: "
print " %s \n" % dir_figs
|
jimsrc/seatos
|
mixed/figs/sheaths.paper/src/splited.py
|
Python
|
mit
| 8,181
|
[
"NetCDF"
] |
b4e6b716e33c3c54cd93beb5c1727904db305187add2e09b1b171833507a069b
|
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# L.Pritchard@scri.ac.uk
################################################################################
""" Colors module
Provides:
o ColorTranslator - class to convert tuples of integers and floats into
colors.Color objects
For drawing capabilities, this module uses reportlab to define colors:
http://www.reportlab.com
"""
# ReportLab imports
from reportlab.lib import colors
class ColorTranslator(object):
""" Class providing methods for translating representations of color into
"""
def __init__(self, filename=None):
""" __init__(self, filename)
o filename Location of a file containing colorscheme
information
Optional parameters set the color scheme
"""
self._artemis_colorscheme = {0: (colors.Color(1, 1, 1,), "pathogenicity, adaptation, chaperones"),
1: (colors.Color(0.39, 0.39, 0.39), "energy metabolism"),
2: (colors.Color(1, 0, 0), "information transfer"),
3: (colors.Color(0, 1, 0), "surface"),
4: (colors.Color(0, 0, 1), "stable RNA"),
5: (colors.Color(0, 1, 1), "degradation of large molecules"),
6: (colors.Color(1, 0, 1), "degradation of small molecules"),
7: (colors.Color(1, 1, 0), "central/intermediary/miscellaneous metabolism"),
8: (colors.Color(0.60, 0.98, 0.60), "unknown"),
9: (colors.Color(0.53, 0.81, 0.98), "regulators"),
10: (colors.Color(1, 0.65, 0), "conserved hypotheticals"),
11: (colors.Color(0.78, 0.59, 0.39), "pseudogenes and partial genes"),
12: (colors.Color(1, 0.78, 0.78), "phage/IS elements"),
13: (colors.Color(0.70, 0.70, 0.70), "some miscellaneous information"),
14: (colors.Color(0, 0, 0), ""),
15: (colors.Color(1, 0.25, 0.25), "secondary metabolism"),
16: (colors.Color(1, 0.5, 0.5), ""),
17: (colors.Color(1, 0.75, 0.75), "")
} # Hardwired Artemis color scheme
self._colorscheme = {}
if filename is not None:
self.read_colorscheme(filename)# Imported color scheme
else:
self._colorscheme = self._artemis_colorscheme
def translate(self, color=None, colour=None):
""" translate(self, color)
o color Color defined as an int, a tuple of three ints 0->255
or a tuple of three floats 0 -> 1, or a string giving
one of the named colors defined by ReportLab, or a
ReportLab color object (returned as is).
(This argument is overridden by a backwards compatible
argument with UK spelling, colour).
Returns a colors.Color object, determined semi-intelligently
depending on the input values
"""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if color is None:
raise ValueError, "Passed color (or colour) must be a valid color type"
elif isinstance(color, int):
color = self.scheme_color(color)
elif isinstance(color, colors.Color):
return color
elif isinstance(color, basestring):
#Assume its a named reportlab color like "red".
color = colors.toColor(color)
elif type(color) == type((1., 2., 3.)) and type(color[0]) == type(1.):
color = self.float1_color(color)
elif type(color) == type((1, 2, 3)) and type(color[0]) == type(1):
color = self.int255_color(color)
return color
def read_colorscheme(self, filename):
""" read_colorscheme(self, filename)
o filename The location of a file defining colors in tab-separated
format plaintext as:
INT \t RED \t GREEN \t BLUE \t Comment
Where RED, GREEN and BLUE are intensities in the range
0 -> 255
e.g.
2 \t 255 \t 0 \t 0 \t Red: Information transfer
Reads information from a file containing color information and
stores it internally
"""
lines = open(filename, 'r').readlines()
for line in lines:
data = line.strip().split('\t')
try:
label = int(data[0])
red, green, blue = int(data[1]), int(data[2]), int(data[3])
if len(data) > 4:
comment = data[4]
else:
comment = ""
self._colorscheme[label] = (self.int255_color((red, green, blue)),
comment)
except:
raise IOError, "Expected INT \t INT \t INT \t INT \t string input"
def get_artemis_colorscheme(self):
""" get_artemis_colorscheme(self)
Return the Artemis color scheme as a dictionary
"""
return self._artemis_colorscheme
def artemis_color(self, value):
""" artemis_color(self, value)
o value An int representing a functional class in the Artemis
color scheme (see www.sanger.ac.uk for a description),
or a string from a GenBank feature annotation for the
color which may be dot delimited (in which case the
first value is used).
Takes an int representing a functional class in the Artemis color
scheme, and returns the appropriate colors.Color object
"""
try:
value = int(value)
except ValueError:
if value.count('.'): # dot-delimited
value = int(artemis_color.split('.',1)[0]) # Use only first integer
else:
raise
if value in self._artemis_colorscheme:
return self._artemis_colorscheme[value][0]
else:
raise ValueError, "Artemis color out of range: %d" % value
def get_colorscheme(self):
""" get_colorscheme(self)
Return the user-defined color scheme as a dictionary
"""
return self._colorscheme
def scheme_color(self, value):
""" scheme_color(self, value)
o value An int representing a single color in the user-defined
color scheme
Takes an int representing a user-defined color and returns the
appropriate colors.Color object
"""
if value in self._colorscheme:
return self._colorscheme[value][0]
else:
raise ValueError, "Scheme color out of range: %d" % value
def int255_color(self, values):
""" int255_color(self, values)
o values A tuple of (red, green, blue) intensities as
integers in the range 0->255
Takes a tuple of (red, green, blue) intensity values in the range
0 -> 255 and returns an appropriate colors.Color object
"""
red, green, blue = values
factor = 1/255.
red, green, blue = red * factor, green * factor, blue * factor
return colors.Color(red, green, blue)
def float1_color(self, values):
""" float1_color(self, values)
o values A tuple of (red, green, blue) intensities as floats
in the range 0 -> 1
Takes a tuple of (red, green, blue) intensity values in the range
0 -> 1 and returns an appropriate colors.Color object
"""
red, green, blue = values
return colors.Color(red, green, blue)
################################################################################
# RUN AS SCRIPT
################################################################################
if __name__ == '__main__':
# Test code
gdct = ColorTranslator()
print gdct.float1_color((0.5, 0.5, 0.5))
print gdct.int255_color((1, 75, 240))
print gdct.artemis_color(7)
print gdct.scheme_color(2)
print gdct.translate((0.5, 0.5, 0.5))
print gdct.translate((1, 75, 240))
print gdct.translate(7)
print gdct.translate(2)
|
bryback/quickseq
|
genescript/Bio/Graphics/GenomeDiagram/_Colors.py
|
Python
|
mit
| 9,024
|
[
"Biopython"
] |
7674047cb095e071e658b8a4fe8c802fa76a89fa7ce98230ac288094375bd2e6
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Tim Moore
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from mdtraj.testing import get_fn, eq
from mdtraj import load
def test_load():
t = load(get_fn('test_hoomdxml.dcd'), top=get_fn('well-mixed.hoomdxml'))
top = load(get_fn('well-mixed.hoomdxml')).topology
eq(t.topology, top)
eq(top.n_atoms, 6600)
eq(top.n_bonds, 3200)
eq(top.n_chains, 4000)
eq(top.n_chains, top.n_residues) # hoomdxml makes each chain 1 residue
def test_load_no_chains():
top = load(get_fn('no_chains.hoomdxml')).topology
eq(top.n_atoms, 3)
eq(top.n_bonds, 0)
eq(top.n_chains, top.n_atoms)
eq(top.n_chains, top.n_residues)
def test_load_no_ions():
top = load(get_fn('no_ions.hoomdxml')).topology
eq(top.n_atoms, 1296)
eq(top.n_bonds, 1152)
eq(top.n_chains, 144)
eq(top.n_chains, top.n_residues)
for bond in top.bonds: # atoms bonded to adjacent atoms by index
eq(bond[1].index - bond[0].index, 1)
|
ctk3b/mdtraj
|
mdtraj/tests/test_hoomdxml.py
|
Python
|
lgpl-2.1
| 1,910
|
[
"MDTraj"
] |
634760811f057350b55ff43d0405b67b212de40628766113af37c8bcd343b941
|
import urllib
from bs4 import BeautifulSoup
import pafy
import random
import os
import datetime
# import vlc
# import subprocess
# from properties import *
# from sys import executable
# DEFAULT FUNCTIONS
# TODO: Implement DialogFlow API
def getweather(assistant, place):
weather_url = "http://www.bing.com/search?q=weather+" + place.replace(" ", "+")
print(weather_url)
try:
with urllib.request.urlopen(weather_url) as url_:
html = url_.read()
soup = BeautifulSoup(html, "html.parser")
print(soup)
temp = str((soup.findAll(attrs={'class': 'wtr_currTemp b_focusTextLarge'})[0]))
temp = temp.replace('<div class="wtr_currTemp b_focusTextLarge">', "")
temp = temp.replace("</div>", "")
assistant.speak("The temp for" + place + " is " + temp + "degrees")
except:
assistant.speak("I couldn't connect to Bing")
def bingsearch(assistant, thingToSearch):
searchUrl = "http://www.bing.com/search?q=" + thingToSearch.replace(" ", "+")
print(searchUrl)
try:
with urllib.request.urlopen(searchUrl) as url_:
html = url_.read()
soup = BeautifulSoup(html, "html.parser")
print(soup)
try:
result = str((soup.findAll(attrs={'class': 'rwrl rwrl_sec rwrl_padref'})[0]))
except:
result = str((soup.findAll(attrs={'class': 'b_focusTextMedium'})[0]))
print("result " + result)
result = result.replace('<div class="rwrl rwrl_sec rwrl_padref"><p>', "")
result = result.replace("</p></div>", "")
result = result.replace('<div class="b_focusTextMedium">', '')
result = result.replace('</div>', '')
print(result)
assistant.speak(result)
except:
assistant.speak("I couldn't connect to Bing")
def getyoutubeaudiourl(assistant, searchWords):
textToSearch = searchWords
query = urllib.parse.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
with urllib.request.urlopen(url) as url_:
html = url_.read()
soup = BeautifulSoup(html, "html.parser")
url = ('https://www.youtube.com' + soup.findAll(attrs={'class': 'yt-uix-tile-link'})[0]['href'])
counter = 0
try:
song = pafy.new(url)
except:
counter += 1
url = ('https://www.youtube.com' + soup.findAll(attrs={'class': 'yt-uix-tile-link'})[1 + counter]['href'])
song = song.getbestaudio()
print(song.url)
return [song.url, searchWords]
def telljoke(assistant):
jokes = [
"Your GPA",
"Why did the chicken cross the road? To get to the sparc lab",
"Why did the chicken fall down the well? He couldn't see that well",
"2 fish are in a tank, one turns to the other, do you know how to drive this thing?",
"Monorails make for good one liners"
]
laughs = [
"ha.ha.ha.",
"hardy har har",
"He. he. he.",
"Now that's a kneee slapper"
]
joke = random.randrange(0, len(jokes), 1)
laugh = random.randrange(0, len(laughs), 1)
assistant.speak(jokes[joke])
assistant.speak(laughs[laugh])
# def PlayYoutubeAudio(assistant, url, searchWords):
# assistant.speak("playing " + searchWords)
# p.set_mrl(url)
# p.play()
def feelings(assistant):
karens_feelings = [
"i am bored. i have not learned any new commands",
"great. today has been very exciting",
"it is important to remember computers do not have feelings"
]
feeling = random.randrange(0, len(karens_feelings), 1)
assistant.speak(karens_feelings[feeling])
# def WriteToArduino(thingToWrite):
# try:
# ser = serial.Serial(arduinoPort, 9600)
# ser.write(thingToWrite.encode())
# Speak("Command sent successfully")
# except:
# Speak("Could not connect to Arduino on " + arduinoPort)
def gettotalhours(assistant, person):
if os.path.isfile("PeopleNoCode/" + person + ".txt"):
hoursIn = []
hoursOut = []
minutesIn = []
minutesOut = []
hour = 0.0
minute = 0.0
with open("PeopleNoCode/" + person + ".txt", "r") as file:
for line in file:
if line.__contains__("Logged in at: "):
line = line.replace("Logged in at: ", "")
hour = ((line[0:2]))
hour = hour.replace(":", "")
print(line)
print(line[3:6])
try:
minute = float(line[3:6])
except:
minute = float(line[2:5])
minutesIn.append(minute)
hoursIn.append(float(hour))
if line.__contains__("Logged out at: "):
line = line.replace("Logged out at: ", "")
hour = ((line[0:2]))
hour = hour.replace(":", "")
minute = float(line[3:6])
minutesOut.append(minute)
hoursOut.append(float(hour))
totalHours = 0.0
i = 0
file.close()
print(hoursIn)
print(hoursOut)
print(minutesIn)
print(minutesOut)
while i < len(hoursIn):
totalHours += (hoursOut[i] - hoursIn[i]) + ((((minutesOut[i] * 60) - (minutesIn[i] * 60)) / 60) / 60)
i += 1
if (totalHours >= 5):
assistant.speak(
"You have logged " + str(totalHours) + " hours, you can now get pin code access to the spark lab.")
os.remove("PeopleNoCode/" + person + ".txt")
with open("PeopleActive/" + person + ".txt", "w") as newFile:
newFile.write("Last active: " + str(datetime.datetime.now().date().month) + "/" + str(
datetime.datetime.now().date().day))
else:
assistant.speak("You have logged " + str(totalHours) + " hours, only " + str(9 - totalHours) + "to go!")
else:
assistant.speak("I don't seem to have a file on you.")
def defaultsuitemethodarchived(assistant, intent, userCommand):
print userCommand
if "how are you" in userCommand:
feelings(assistant)
return True
elif "tell me a joke" in userCommand:
telljoke(assistant)
return True
else:
return False
|
SPARC-Auburn/Lab-Assistant
|
assistant/archived/DefaultSuite-Archived.py
|
Python
|
apache-2.0
| 6,593
|
[
"exciting"
] |
8cbf191b6ad330b48273b3e5db1f87ffa757915199a4ef9b52b0fe79d1c4e979
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import numpy as np
import MDAnalysis as mda
from MDAnalysis.analysis import align
from MDAnalysis.analysis.pca import (PCA, cosine_content,
rmsip, cumulative_overlap)
from numpy.testing import (assert_almost_equal, assert_equal,
assert_array_almost_equal)
from MDAnalysisTests.datafiles import (PSF, DCD, RANDOM_WALK, RANDOM_WALK_TOPO,
waterPSF, waterDCD)
import pytest
SELECTION = 'backbone and name CA and resid 1-10'
@pytest.fixture(scope='module')
def u():
return mda.Universe(PSF, DCD)
@pytest.fixture(scope='module')
def u_aligned():
u = mda.Universe(PSF, DCD, in_memory=True)
align.AlignTraj(u, u, select=SELECTION).run()
return u
@pytest.fixture(scope='module')
def pca(u):
u.transfer_to_memory()
return PCA(u, select=SELECTION).run()
@pytest.fixture(scope='module')
def pca_aligned(u):
# run on a copy so positions in u are unchanged
u_copy = u.copy()
return PCA(u_copy, select=SELECTION, align=True).run()
def test_cov(pca, u):
atoms = u.select_atoms(SELECTION)
xyz = np.zeros((pca.n_frames, atoms.n_atoms * 3))
for i, ts in enumerate(u.trajectory):
xyz[i] = atoms.positions.ravel()
cov = np.cov(xyz, rowvar=0)
assert_array_almost_equal(pca.cov, cov, 4)
def test_cum_var(pca):
assert_almost_equal(pca.results.cumulated_variance[-1], 1)
cum_var = pca.results.cumulated_variance
cum_var = np.sort(cum_var)
assert_almost_equal(pca.results.cumulated_variance, cum_var, 5)
def test_pcs(pca):
assert_equal(pca.results.p_components.shape, (pca._n_atoms * 3,
pca._n_atoms * 3))
def test_pcs_n_components(u):
pca = PCA(u, select=SELECTION).run()
assert_equal(pca.n_components, pca._n_atoms*3)
assert_equal(pca.results.p_components.shape, (pca._n_atoms * 3,
pca._n_atoms * 3))
pca.n_components = 10
assert_equal(pca.n_components, 10)
assert_equal(pca.results.p_components.shape, (pca._n_atoms * 3, 10))
def test_different_steps(pca, u):
atoms = u.select_atoms(SELECTION)
dot = pca.transform(atoms, start=5, stop=7, step=1)
assert_equal(dot.shape, (2, atoms.n_atoms*3))
def test_transform_different_atoms(pca, u):
atoms = u.select_atoms('backbone and name N and resid 1-10')
with pytest.warns(UserWarning):
pca.transform(atoms, start=5, stop=7, step=1)
def test_transform_rerun(u):
atoms = u.select_atoms('bynum 1-10')
u.transfer_to_memory()
pca = PCA(u, select='bynum 1-10').run(stop=5)
dot = pca.transform(atoms)
assert_equal(dot.shape, (98, atoms.n_atoms * 3))
def test_pca_not_run(u):
atoms = u.select_atoms('bynum 1-10')
u.transfer_to_memory()
pca = PCA(u, select='bynum 1-10')
with pytest.raises(ValueError):
dot = pca.transform(atoms, stop=5)
def test_no_frames(u):
atoms = u.select_atoms(SELECTION)
u.transfer_to_memory()
with pytest.raises(ValueError):
PCA(u, select=SELECTION).run(stop=1)
def test_transform(pca, u):
ag = u.select_atoms(SELECTION)
pca_space = pca.transform(ag, n_components=1)
assert_equal(pca_space.shape, (u.trajectory.n_frames, 1))
def test_transform_mismatch(pca, u):
with pytest.raises(ValueError):
pca.transform(u, n_components=1)
def test_transform_universe():
u1 = mda.Universe(waterPSF, waterDCD)
u2 = mda.Universe(waterPSF, waterDCD)
pca_test = PCA(u1).run()
pca_test.transform(u2)
def test_cosine_content():
rand = mda.Universe(RANDOM_WALK_TOPO, RANDOM_WALK)
pca_random = PCA(rand).run()
dot = pca_random.transform(rand.atoms)
content = cosine_content(dot, 0)
assert_almost_equal(content, .99, 1)
def test_mean_shape(pca_aligned, u):
atoms = u.select_atoms(SELECTION)
assert_equal(pca_aligned.mean.shape[0], atoms.n_atoms)
assert_equal(pca_aligned.mean.shape[1], 3)
def test_calculate_mean(pca_aligned, u, u_aligned):
ag = u_aligned.select_atoms(SELECTION)
coords = u_aligned.trajectory.coordinate_array[:, ag.ix]
assert_almost_equal(pca_aligned.mean, coords.mean(
axis=0), decimal=5)
def test_given_mean(pca, u):
pca = PCA(u, select=SELECTION, align=False,
mean=pca.mean).run()
assert_almost_equal(pca.cov, pca.cov, decimal=5)
def test_wrong_num_given_mean(u):
wrong_mean = [[0, 0, 0], [1, 1, 1]]
with pytest.raises(ValueError, match='Number of atoms in'):
pca = PCA(u, select=SELECTION, mean=wrong_mean).run()
def test_alignment(pca_aligned, u, u_aligned):
pca_pre_align = PCA(u_aligned, select=SELECTION, align=False).run()
assert_almost_equal(pca_aligned.mean, pca_pre_align.mean)
assert_almost_equal(pca_aligned.cov, pca_pre_align.cov)
def test_covariance_norm(pca_aligned, u):
assert_almost_equal(np.linalg.norm(pca_aligned.cov), 0.96799758, decimal=5)
def test_pca_rmsip_self(pca):
assert_almost_equal(pca.rmsip(pca), 1.0)
def test_rmsip_ortho(pca):
value = rmsip(pca.results.p_components[:, :10].T,
pca.results.p_components[:, 10:20].T)
assert_almost_equal(value, 0.0)
def test_pytest_too_many_components(pca):
with pytest.raises(ValueError) as exc:
pca.rmsip(pca, n_components=(1, 2, 3))
assert 'Too many values' in str(exc.value)
def test_asymmetric_rmsip(pca):
a = pca.rmsip(pca, n_components=(10, 4))
b = pca.rmsip(pca, n_components=(4, 10))
assert abs(a-b) > 0.1, 'RMSIP should be asymmetric'
assert_almost_equal(b, 1.0)
def test_pca_cumulative_overlap_self(pca):
value = pca.cumulative_overlap(pca, i=1)
assert_almost_equal(value, 1.0)
def test_cumulative_overlap_ortho(pca):
pcs = pca.results.p_components
value = cumulative_overlap(pcs[:, 11].T, pcs.T, n_components=10)
assert_almost_equal(value, 0.0)
@pytest.mark.parametrize(
'method', ['rmsip',
'cumulative_overlap'])
def test_compare_not_run_other(u, pca, method):
pca2 = PCA(u)
func = getattr(pca, method)
with pytest.raises(ValueError) as exc:
func(pca2)
assert 'Call run()' in str(exc.value)
@pytest.mark.parametrize(
'method', ['rmsip',
'cumulative_overlap'])
def test_compare_not_run_self(u, pca, method):
pca2 = PCA(u)
func = getattr(pca2, method)
with pytest.raises(ValueError) as exc:
func(pca)
assert 'Call run()' in str(exc.value)
@pytest.mark.parametrize(
'method', ['rmsip',
'cumulative_overlap'])
def test_compare_wrong_class(u, pca, method):
func = getattr(pca, method)
with pytest.raises(ValueError) as exc:
func(3)
assert 'must be another PCA class' in str(exc.value)
@pytest.mark.parametrize("attr", ("p_components", "variance",
"cumulated_variance"))
def test_pca_attr_warning(u, attr):
pca = PCA(u, select=SELECTION).run(stop=2)
wmsg = f"The `{attr}` attribute was deprecated in MDAnalysis 2.0.0"
with pytest.warns(DeprecationWarning, match=wmsg):
getattr(pca, attr) is pca.results[attr]
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/analysis/test_pca.py
|
Python
|
gpl-2.0
| 8,268
|
[
"MDAnalysis"
] |
5a2a7c7151f8f9cc8edcb9e3566c9d4e2b36491428e55caf02578312018487d9
|
import os
import sys
import tempfile
import subprocess
from mock import patch
from contextlib import contextmanager
from shutil import rmtree
this_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(this_dir, '..'))
sys.path.insert(0, os.path.join(this_dir, '..', '..'))
BUILD_SCRIPT = os.path.join(this_dir, '..', '..', 'build_all.py')
amberhome = os.path.join(this_dir, 'fake_data', 'fake_amber')
# ../
import build_all
import utils
extend_cmd = ['-v', '18.0', amberhome]
@contextmanager
def tempfolder():
my_temp = tempfile.mkdtemp()
cwd = os.getcwd()
os.chdir(my_temp)
yield
os.chdir(cwd)
rmtree(my_temp)
def test_build_all_cmd():
cmd = ['python', BUILD_SCRIPT, '-d'] + extend_cmd
with tempfolder():
subprocess.check_call(cmd)
cmd = ['python', BUILD_SCRIPT, '-d', '--exclude-osx'] + extend_cmd
with tempfolder():
subprocess.check_call(cmd)
cmd = ['python', BUILD_SCRIPT, '-d', '--exclude-linux'] + extend_cmd
with tempfolder():
subprocess.check_call(cmd)
cmd = ['python', BUILD_SCRIPT, '-d', '--no-docker'] + extend_cmd
with tempfolder():
subprocess.check_call(cmd)
def test_build_all_cmd_with_assertion():
package = os.path.basename(
utils.get_package_dir(
os.path.join(this_dir, '..', '..',
'conda-ambertools-combine-pythons')))
print('package', package)
all_lines = [
'amber-conda-bld/osx-64/{}'.format(package),
'amber-conda-bld/linux-64/{}'.format(package),
'amber-conda-bld/non-conda-install/osx-64.{}'.format(package),
'amber-conda-bld/non-conda-install/linux-64.{}'.format(package),
]
cmd = ['python', BUILD_SCRIPT, '-d'] + extend_cmd
with tempfolder():
tdir = os.getcwd()
expected_lines = [os.path.join(tdir, line) for line in all_lines]
output = subprocess.check_output(cmd).decode()
print('output', output)
lines = [line for line in output.split('\n') if line][-4:]
print('lines', lines)
assert expected_lines == lines
def test_build_single_python_verions():
build_all.main(['--py', '2.7', '-d'] + extend_cmd)
|
Amber-MD/ambertools-conda-build
|
conda_tools/test/test_build_all.py
|
Python
|
mit
| 2,194
|
[
"Amber"
] |
ff2e330e7ee6f79fe583383aeb7091bc4678c41ace44a9ad14cfda411aa7c168
|
"""
Piezo sensitivity analysis module.
"""
import warnings
import numpy as np
from monty.dev import requires
import pymatgen.io.phonopy
from pymatgen.core.tensors import Tensor
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer as sga
try:
from phonopy import Phonopy
from phonopy.harmonic import dynmat_to_fc as dyntofc
except ImportError:
Phonopy = None
__author__ = "Handong Ling"
__copyright__ = "Copyright 2019, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Handong Ling"
__email__ = "hling@lbl.gov"
__status__ = "Development"
__date__ = "Feb, 2019"
class BornEffectiveCharge:
"""
This class describes the Nx3x3 born effective charge tensor
"""
def __init__(self, structure, bec, pointops, tol=1e-3):
"""
Create an BornEffectiveChargeTensor object defined by a
structure, point operations of the structure's atomic sites.
Note that the constructor uses __new__ rather than __init__
according to the standard method ofsubclassing numpy ndarrays.
Args:
input_matrix (Nx3x3 array-like): the Nx3x3 array-like
representing the born effective charge tensor
"""
self.structure = structure
self.bec = bec
self.pointops = pointops
self.BEC_operations = None
if np.sum(self.bec) >= tol:
warnings.warn("Input born effective charge tensor does not satisfy charge neutrality")
def get_BEC_operations(self, eigtol=1e-05, opstol=1e-03):
"""
Returns the symmetry operations which maps the tensors
belonging to equivalent sites onto each other in the form
[site index 1, site index 2, [Symmops mapping from site
index 1 to site index 2]]
Args:
eigtol (float): tolerance for determining if two sites are
related by symmetry
opstol (float): tolerance for determining if a symmetry
operation relates two sites
Return:
list of symmetry operations mapping equivalent sites and
the indexes of those sites.
"""
bec = self.bec
struc = self.structure
ops = sga(struc).get_symmetry_operations(cartesian=True)
uniquepointops = []
for op in ops:
uniquepointops.append(op)
for ops in self.pointops:
for op in ops:
if op not in uniquepointops:
uniquepointops.append(op)
passed = []
relations = []
for site, val in enumerate(bec):
unique = 1
eig1, vecs1 = np.linalg.eig(val)
index = np.argsort(eig1)
neweig = np.real([eig1[index[0]], eig1[index[1]], eig1[index[2]]])
for index, p in enumerate(passed):
if np.allclose(neweig, p[1], atol=eigtol):
relations.append([site, index])
unique = 0
passed.append([site, p[0], neweig])
break
if unique == 1:
relations.append([site, site])
passed.append([site, neweig])
BEC_operations = []
for atom, r in enumerate(relations):
BEC_operations.append(r)
BEC_operations[atom].append([])
for op in uniquepointops:
new = op.transform_tensor(self.bec[relations[atom][1]])
# Check the matrix it references
if np.allclose(new, self.bec[r[0]], atol=opstol):
BEC_operations[atom][2].append(op)
self.BEC_operations = BEC_operations
def get_rand_BEC(self, max_charge=1):
"""
Generate a random born effective charge tensor which obeys a structure's
symmetry and the acoustic sum rule
Args:
max_charge (float): maximum born effective charge value
Return:
np.array Born effective charge tensor
"""
struc = self.structure
symstruc = sga(struc)
symstruc = symstruc.get_symmetrized_structure()
l = len(struc)
BEC = np.zeros((l, 3, 3))
for atom, ops in enumerate(self.BEC_operations):
if ops[0] == ops[1]:
temp_tensor = Tensor(np.random.rand(3, 3) - 0.5)
temp_tensor = sum(temp_tensor.transform(symm_op) for symm_op in self.pointops[atom]) / len(
self.pointops[atom]
)
BEC[atom] = temp_tensor
else:
tempfcm = np.zeros([3, 3])
for op in ops[2]:
tempfcm += op.transform_tensor(BEC[self.BEC_operations[atom][1]])
BEC[ops[0]] = tempfcm
if len(ops[2]) != 0:
BEC[ops[0]] = BEC[ops[0]] / len(ops[2])
# Enforce Acoustic Sum
disp_charge = np.einsum("ijk->jk", BEC) / l
add = np.zeros([l, 3, 3])
for atom, ops in enumerate(self.BEC_operations):
if ops[0] == ops[1]:
temp_tensor = Tensor(disp_charge)
temp_tensor = sum(temp_tensor.transform(symm_op) for symm_op in self.pointops[atom]) / len(
self.pointops[atom]
)
add[ops[0]] = temp_tensor
else:
temp_tensor = np.zeros([3, 3])
for op in ops[2]:
temp_tensor += op.transform_tensor(add[self.BEC_operations[atom][1]])
add[ops[0]] = temp_tensor
if len(ops) != 0:
add[ops[0]] = add[ops[0]] / len(ops[2])
BEC = BEC - add
return BEC * max_charge
class InternalStrainTensor:
"""
This class describes the Nx3x3x3 internal tensor defined by a
structure, point operations of the structure's atomic sites.
"""
def __init__(self, structure, ist, pointops, tol=1e-3):
"""
Create an InternalStrainTensor object.
Args:
input_matrix (Nx3x3x3 array-like): the Nx3x3x3 array-like
representing the internal strain tensor
"""
self.structure = structure
self.ist = ist
self.pointops = pointops
self.IST_operations = None
obj = self.ist
if not (obj - np.transpose(obj, (0, 1, 3, 2)) < tol).all():
warnings.warn("Input internal strain tensor does not satisfy standard symmetries")
def get_IST_operations(self, opstol=1e-03):
"""
Returns the symmetry operations which maps the tensors
belonging to equivalent sites onto each other in the form
[site index 1, site index 2, [Symmops mapping from site
index 1 to site index 2]]
Args:
opstol (float): tolerance for determining if a symmetry
operation relates two sites
Return:
list of symmetry operations mapping equivalent sites and
the indexes of those sites.
"""
struc = self.structure
ops = sga(struc).get_symmetry_operations(cartesian=True)
uniquepointops = []
for op in ops:
uniquepointops.append(op)
for ops in self.pointops:
for op in ops:
if op not in uniquepointops:
uniquepointops.append(op)
IST_operations = []
for atom in range(len(self.ist)): # pylint: disable=C0200
IST_operations.append([])
for j in range(0, atom):
for op in uniquepointops:
new = op.transform_tensor(self.ist[j])
# Check the matrix it references
if np.allclose(new, self.ist[atom], atol=opstol):
IST_operations[atom].append([j, op])
self.IST_operations = IST_operations
def get_rand_IST(self, max_force=1):
"""
Generate a random internal strain tensor which obeys a structure's
symmetry and the acoustic sum rule
Args:
max_force(float): maximum born effective charge value
Return:
InternalStrainTensor object
"""
l = len(self.structure)
IST = np.zeros((l, 3, 3, 3))
for atom, ops in enumerate(self.IST_operations):
temp_tensor = np.zeros([3, 3, 3])
for op in ops:
temp_tensor += op[1].transform_tensor(IST[op[0]])
if len(ops) == 0:
temp_tensor = Tensor(np.random.rand(3, 3, 3) - 0.5)
for dim in range(3):
temp_tensor[dim] = (temp_tensor[dim] + temp_tensor[dim].T) / 2
temp_tensor = sum(temp_tensor.transform(symm_op) for symm_op in self.pointops[atom]) / len(
self.pointops[atom]
)
IST[atom] = temp_tensor
if len(ops) != 0:
IST[atom] = IST[atom] / len(ops)
return IST * max_force
class ForceConstantMatrix:
"""
This class describes the NxNx3x3 force constant matrix defined by a
structure, point operations of the structure's atomic sites, and the
shared symmetry operations between pairs of atomic sites.
"""
def __init__(self, structure, fcm, pointops, sharedops, tol=1e-3):
"""
Create an ForceConstantMatrix object.
Args:
input_matrix (NxNx3x3 array-like): the NxNx3x3 array-like
representing the force constant matrix
"""
self.structure = structure
self.fcm = fcm
self.pointops = pointops
self.sharedops = sharedops
self.FCM_operations = None
def get_FCM_operations(self, eigtol=1e-05, opstol=1e-05):
"""
Returns the symmetry operations which maps the tensors
belonging to equivalent sites onto each other in the form
[site index 1a, site index 1b, site index 2a, site index 2b,
[Symmops mapping from site index 1a, 1b to site index 2a, 2b]]
Args:
eigtol (float): tolerance for determining if two sites are
related by symmetry
opstol (float): tolerance for determining if a symmetry
operation relates two sites
Return:
list of symmetry operations mapping equivalent sites and
the indexes of those sites.
"""
struc = self.structure
ops = sga(struc).get_symmetry_operations(cartesian=True)
uniquepointops = []
for op in ops:
uniquepointops.append(op)
for ops in self.pointops:
for op in ops:
if op not in uniquepointops:
uniquepointops.append(op)
passed = []
relations = []
for atom1 in range(len(self.fcm)): # pylint: disable=C0200
for atom2 in range(atom1, len(self.fcm)):
unique = 1
eig1, vecs1 = np.linalg.eig(self.fcm[atom1][atom2])
index = np.argsort(eig1)
neweig = np.real([eig1[index[0]], eig1[index[1]], eig1[index[2]]])
for entry, p in enumerate(passed):
if np.allclose(neweig, p[2], atol=eigtol):
relations.append([atom1, atom2, p[0], p[1]])
unique = 0
break
if unique == 1:
relations.append([atom1, atom2, atom2, atom1])
passed.append([atom1, atom2, np.real(neweig)])
FCM_operations = []
for entry, r in enumerate(relations):
FCM_operations.append(r)
FCM_operations[entry].append([])
good = 0
for op in uniquepointops:
new = op.transform_tensor(self.fcm[r[2]][r[3]])
if np.allclose(new, self.fcm[r[0]][r[1]], atol=opstol):
FCM_operations[entry][4].append(op)
good = 1
if r[0] == r[3] and r[1] == r[2]:
good = 1
if r[0] == r[2] and r[1] == r[3]:
good = 1
if good == 0:
FCM_operations[entry] = [
r[0],
r[1],
r[3],
r[2],
]
FCM_operations[entry].append([])
for op in uniquepointops:
new = op.transform_tensor(self.fcm[r[2]][r[3]])
if np.allclose(
new.T,
self.fcm[r[0]][r[1]],
atol=opstol,
):
FCM_operations[entry][4].append(op)
self.FCM_operations = FCM_operations
return FCM_operations
def get_unstable_FCM(self, max_force=1):
"""
Generate an unsymmeterized force constant matrix
Args:
max_charge (float): maximum born effective charge value
Return:
numpy array representing the force constant matrix
"""
struc = self.structure
operations = self.FCM_operations
# set max force in reciprocal space
numsites = len(struc.sites)
D = (1 / max_force) * 2 * (np.ones([numsites * 3, numsites * 3]))
for op in operations:
same = 0
transpose = 0
if op[0] == op[1] and op[0] == op[2] and op[0] == op[3]:
same = 1
if op[0] == op[3] and op[1] == op[2]:
transpose = 1
if transpose == 0 and same == 0:
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] = np.zeros([3, 3])
D[3 * op[1] : 3 * op[1] + 3, 3 * op[0] : 3 * op[0] + 3] = np.zeros([3, 3])
for symop in op[4]:
tempfcm = D[3 * op[2] : 3 * op[2] + 3, 3 * op[3] : 3 * op[3] + 3]
tempfcm = symop.transform_tensor(tempfcm)
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] += tempfcm
if len(op[4]) != 0:
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] = D[
3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3
] / len(op[4])
D[3 * op[1] : 3 * op[1] + 3, 3 * op[0] : 3 * op[0] + 3] = D[
3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3
].T
continue
temp_tensor = Tensor(np.random.rand(3, 3) - 0.5) * max_force
temp_tensor_sum = sum(temp_tensor.transform(symm_op) for symm_op in self.sharedops[op[0]][op[1]])
temp_tensor_sum = temp_tensor_sum / (len(self.sharedops[op[0]][op[1]]))
if op[0] != op[1]:
for pair in range(len(op[4])):
temp_tensor2 = temp_tensor_sum.T
temp_tensor2 = op[4][pair].transform_tensor(temp_tensor2)
temp_tensor_sum = (temp_tensor_sum + temp_tensor2) / 2
else:
temp_tensor_sum = (temp_tensor_sum + temp_tensor_sum.T) / 2
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] = temp_tensor_sum
D[3 * op[1] : 3 * op[1] + 3, 3 * op[0] : 3 * op[0] + 3] = temp_tensor_sum.T
return D
def get_symmetrized_FCM(self, unsymmetrized_fcm, max_force=1):
"""
Generate a symmeterized force constant matrix from an unsymmeterized matrix
Args:
unsymmetrized_fcm (numpy array): unsymmeterized force constant matrix
max_charge (float): maximum born effective charge value
Return:
3Nx3N numpy array representing the force constant matrix
"""
operations = self.FCM_operations
D = unsymmetrized_fcm
for op in operations:
same = 0
transpose = 0
if op[0] == op[1] and op[0] == operations[2] and op[0] == op[3]:
same = 1
if op[0] == op[3] and op[1] == op[2]:
transpose = 1
if transpose == 0 and same == 0:
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] = np.zeros([3, 3])
for symop in op[4]:
tempfcm = D[3 * op[2] : 3 * op[2] + 3, 3 * op[3] : 3 * op[3] + 3]
tempfcm = symop.transform_tensor(tempfcm)
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] += tempfcm
if len(op[4]) != 0:
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] = D[
3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3
] / len(op[4])
D[3 * op[1] : 3 * op[1] + 3, 3 * op[0] : 3 * op[0] + 3] = D[
3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3
].T
continue
temp_tensor = Tensor(D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3])
temp_tensor_sum = sum(temp_tensor.transform(symm_op) for symm_op in self.sharedops[op[0]][op[1]])
if len(self.sharedops[op[0]][op[1]]) != 0:
temp_tensor_sum = temp_tensor_sum / (len(self.sharedops[op[0]][op[1]]))
# Apply the proper transformation if there is an equivalent already
if op[0] != op[1]:
for pair in range(len(op[4])):
temp_tensor2 = temp_tensor_sum.T
temp_tensor2 = op[4][pair].transform_tensor(temp_tensor2)
temp_tensor_sum = (temp_tensor_sum + temp_tensor2) / 2
else:
temp_tensor_sum = (temp_tensor_sum + temp_tensor_sum.T) / 2
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] = temp_tensor_sum
D[3 * op[1] : 3 * op[1] + 3, 3 * op[0] : 3 * op[0] + 3] = temp_tensor_sum.T
return D
def get_stable_FCM(self, fcm, fcmasum=10):
"""
Generate a symmeterized force constant matrix that obeys the objects symmetry
constraints, has no unstable modes and also obeys the acoustic sum rule through an
iterative procedure
Args:
fcm (numpy array): unsymmeterized force constant matrix
fcmasum (int): number of iterations to attempt to obey the acoustic sum
rule
Return:
3Nx3N numpy array representing the force constant matrix
"""
check = 0
count = 0
while check == 0:
# if resymmetrizing brings back unstable modes 20 times, the method breaks
if count > 20:
check = 1
break
eigs, vecs = np.linalg.eig(fcm)
maxeig = np.max(-1 * eigs)
eigsort = np.argsort(np.abs(eigs))
for i in range(3, len(eigs)):
if eigs[eigsort[i]] > 1e-06:
eigs[eigsort[i]] = -1 * maxeig * np.random.rand()
diag = np.real(np.eye(len(fcm)) * eigs)
fcm = np.real(np.matmul(np.matmul(vecs, diag), vecs.T))
fcm = self.get_symmetrized_FCM(fcm)
fcm = self.get_asum_FCM(fcm)
eigs, vecs = np.linalg.eig(fcm)
unstable_modes = 0
eigsort = np.argsort(np.abs(eigs))
for i in range(3, len(eigs)):
if eigs[eigsort[i]] > 1e-06:
unstable_modes = 1
if unstable_modes == 1:
count = count + 1
continue
check = 1
return fcm
# acoustic sum
def get_asum_FCM(self, fcm, numiter=15):
"""
Generate a symmeterized force constant matrix that obeys the objects symmetry
constraints and obeys the acoustic sum rule through an iterative procedure
Args:
fcm (numpy array): 3Nx3N unsymmeterized force constant matrix
numiter (int): number of iterations to attempt to obey the acoustic sum
rule
Return:
numpy array representing the force constant matrix
"""
# set max force in reciprocal space
operations = self.FCM_operations
numsites = len(self.structure)
D = np.ones([numsites * 3, numsites * 3])
for num in range(numiter):
X = np.real(fcm)
# symmetry operations
pastrow = 0
total = np.zeros([3, 3])
for col in range(numsites):
total = total + X[0:3, col * 3 : col * 3 + 3]
total = total / (numsites)
for op in operations:
same = 0
transpose = 0
if op[0] == op[1] and op[0] == op[2] and op[0] == op[3]:
same = 1
if op[0] == op[3] and op[1] == op[2]:
transpose = 1
if transpose == 0 and same == 0:
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] = np.zeros([3, 3])
for symop in op[4]:
tempfcm = D[3 * op[2] : 3 * op[2] + 3, 3 * op[3] : 3 * op[3] + 3]
tempfcm = symop.transform_tensor(tempfcm)
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] += tempfcm
if len(op[4]) != 0:
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] = D[
3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3
] / len(op[4])
D[3 * op[1] : 3 * op[1] + 3, 3 * op[0] : 3 * op[0] + 3] = D[
3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3
].T
continue
# Get the difference in the sum up to this point
currrow = op[0]
if currrow != pastrow:
total = np.zeros([3, 3])
for col in range(numsites):
total = total + X[currrow * 3 : currrow * 3 + 3, col * 3 : col * 3 + 3]
for col in range(currrow):
total = total - D[currrow * 3 : currrow * 3 + 3, col * 3 : col * 3 + 3]
total = total / (numsites - currrow)
pastrow = currrow
# Apply the point symmetry operations of the site
temp_tensor = Tensor(total)
temp_tensor_sum = sum(temp_tensor.transform(symm_op) for symm_op in self.sharedops[op[0]][op[1]])
if len(self.sharedops[op[0]][op[1]]) != 0:
temp_tensor_sum = temp_tensor_sum / (len(self.sharedops[op[0]][op[1]]))
# Apply the proper transformation if there is an equivalent already
if op[0] != op[1]:
for pair in range(len(op[4])):
temp_tensor2 = temp_tensor_sum.T
temp_tensor2 = op[4][pair].transform_tensor(temp_tensor2)
temp_tensor_sum = (temp_tensor_sum + temp_tensor2) / 2
else:
temp_tensor_sum = (temp_tensor_sum + temp_tensor_sum.T) / 2
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] = temp_tensor_sum
D[3 * op[1] : 3 * op[1] + 3, 3 * op[0] : 3 * op[0] + 3] = temp_tensor_sum.T
fcm = fcm - D
return fcm
@requires(Phonopy, "phonopy not installed!")
def get_rand_FCM(self, asum=15, force=10):
"""
Generate a symmeterized force constant matrix from an unsymmeterized matrix
that has no unstable modes and also obeys the acoustic sum rule through an
iterative procedure
Args:
force (float): maximum force constant
asum (int): number of iterations to attempt to obey the acoustic sum
rule
Return:
NxNx3x3 np.array representing the force constant matrix
"""
numsites = len(self.structure.sites)
structure = pymatgen.io.phonopy.get_phonopy_structure(self.structure)
pnstruc = Phonopy(structure, np.eye(3), np.eye(3))
dyn = self.get_unstable_FCM(force)
dyn = self.get_stable_FCM(dyn)
dyn = np.reshape(dyn, (numsites, 3, numsites, 3)).swapaxes(1, 2)
dynmass = np.zeros([len(self.structure), len(self.structure), 3, 3])
masses = []
for j in range(numsites):
masses.append(self.structure.sites[j].specie.atomic_mass)
dynmass = np.zeros([numsites, numsites, 3, 3])
for m in range(numsites):
for n in range(numsites):
dynmass[m][n] = dyn[m][n] * np.sqrt(masses[m]) * np.sqrt(masses[n])
supercell = pnstruc.get_supercell()
primitive = pnstruc.get_primitive()
converter = dyntofc.DynmatToForceConstants(primitive, supercell)
dyn = np.reshape(np.swapaxes(dynmass, 1, 2), (numsites * 3, numsites * 3))
converter.set_dynamical_matrices(dynmat=[dyn])
converter.run()
fc = converter.get_force_constants()
return fc
def get_piezo(BEC, IST, FCM, rcond=0.0001):
"""
Generate a random piezoelectric tensor based on a structure and corresponding
symmetry
Args:
BEC (numpy array): Nx3x3 array representing the born effective charge tensor
IST (numpy array): Nx3x3x3 array representing the internal strain tensor
FCM (numpy array): NxNx3x3 array representing the born effective charge tensor
rcondy (float): condition for excluding eigenvalues in the pseudoinverse
Return:
3x3x3 calculated Piezo tensor
"""
numsites = len(BEC)
temp_fcm = np.reshape(np.swapaxes(FCM, 1, 2), (numsites * 3, numsites * 3))
eigs, vecs = np.linalg.eig(temp_fcm)
K = np.linalg.pinv(
-temp_fcm,
rcond=np.abs(eigs[np.argsort(np.abs(eigs))[2]]) / np.abs(eigs[np.argsort(np.abs(eigs))[-1]]) + rcond,
)
K = np.reshape(K, (numsites, 3, numsites, 3)).swapaxes(1, 2)
return np.einsum("ikl,ijlm,jmno->kno", BEC, K, IST) * 16.0216559424
@requires(Phonopy, "phonopy not installed!")
def rand_piezo(struc, pointops, sharedops, BEC, IST, FCM, anumiter=10):
"""
Generate a random piezoelectric tensor based on a structure and corresponding
symmetry
Args:
struc (pymatgen structure): structure whose symmetry operations the piezo tensor must obey
pointops: list of point operations obeyed by a single atomic site
sharedops: list of point operations shared by a pair of atomic sites
BEC (numpy array): Nx3x3 array representing the born effective charge tensor
IST (numpy array): Nx3x3x3 array representing the internal strain tensor
FCM (numpy array): NxNx3x3 array representing the born effective charge tensor
anumiter (int): number of iterations for acoustic sum rule convergence
Return:
list in the form of [Nx3x3 random born effective charge tenosr,
Nx3x3x3 random internal strain tensor, NxNx3x3 random force constant matrix, 3x3x3 piezo tensor]
"""
bec = BornEffectiveCharge(struc, BEC, pointops)
bec.get_BEC_operations()
rand_BEC = bec.get_rand_BEC()
ist = InternalStrainTensor(struc, IST, pointops)
ist.get_IST_operations()
rand_IST = ist.get_rand_IST()
fcm = ForceConstantMatrix(struc, FCM, pointops, sharedops)
fcm.get_FCM_operations()
rand_FCM = fcm.get_rand_FCM()
P = get_piezo(rand_BEC, rand_IST, rand_FCM) * 16.0216559424 / struc.volume
return (rand_BEC, rand_IST, rand_FCM, P)
|
vorwerkc/pymatgen
|
pymatgen/analysis/piezo_sensitivity.py
|
Python
|
mit
| 27,582
|
[
"phonopy",
"pymatgen"
] |
150cf08788da41c6d04972a6640c169f8a7dbc41dc3d9894d2045e71053548aa
|
import sys
from time import time, ctime
import numpy as np
from math import sqrt, pi
from datetime import timedelta
from ase.units import Hartree, Bohr
from gpaw import GPAW, extra_parameters
from gpaw.utilities import unpack, devnull
from gpaw.utilities.blas import gemmdot, gemv
from gpaw.mpi import world, rank, size, serial_comm
from gpaw.lfc import LocalizedFunctionsCollection as LFC
from gpaw.grid_descriptor import GridDescriptor
from gpaw.utilities.memory import maxrss
from gpaw.fd_operators import Gradient
from gpaw.response.cell import get_primitive_cell, set_Gvectors
from gpaw.response.math_func import delta_function, \
two_phi_planewave_integrals
from gpaw.response.parallel import set_communicator, \
parallel_partition, SliceAlongFrequency, SliceAlongOrbitals
from gpaw.response.kernel import calculate_Kxc, calculate_Kc, calculate_Kc_q
from gpaw.kpt_descriptor import KPointDescriptor
from gpaw.wavefunctions.pw import PWLFC
import gpaw.wavefunctions.pw as pw
class BASECHI:
"""This class is to store the basic common stuff for chi and bse."""
def __init__(self,
calc=None,
nbands=None,
w=None,
q=None,
eshift=None,
ecut=10.,
density_cut=None,
G_plus_q=False,
eta=0.2,
rpad=None,
ftol=1e-5,
txt=None,
optical_limit=False):
if rpad is None:
rpad = np.ones(3, int)
self.txtname = txt
self.output_init()
if isinstance(calc, str):
# Always use serial_communicator when a filename is given.
self.calc = GPAW(calc, communicator=serial_comm, txt=None)
else:
# To be optimized so that the communicator is loaded automatically
# according to kcommsize.
#
# so temporarily it is used like this :
# kcommsize = int (should <= world.size)
# r0 = rank % kcommsize
# ranks = np.arange(r0, r0+size, kcommsize)
# calc = GPAW(filename.gpw, communicator=ranks, txt=None)
self.calc = calc
if self.calc is not None:
self.pwmode = isinstance(self.calc.wfs, pw.PWWaveFunctions)
else:
self.pwmode = False
if self.pwmode:
assert self.calc.wfs.world.size == 1
self.nbands = nbands
self.q_c = q
# chi.py modifies the input array w by dividing by Hartree.
# This will change the user-supplied arrays in-place unless
# we create a copy. So now we create a copy. *Grumble*
#
# To make matters worse, w is allowed to be None (why not take
# care of that *before*?? This should really be cleaned up.
if isinstance(w, np.ndarray):
w = w.copy()
self.w_w = w
self.eta = eta
self.ftol = ftol
if isinstance(ecut, int) or isinstance(ecut, float):
self.ecut = np.ones(3) * ecut
else:
assert len(ecut) == 3
self.ecut = np.array(ecut, dtype=float)
self.density_cut = density_cut
self.G_plus_q = G_plus_q
self.rpad = rpad
self.optical_limit = optical_limit
if self.optical_limit:
self.qopt = 1e-5
self.eshift = eshift
def initialize(self):
self.eta /= Hartree
self.ecut /= Hartree
calc = self.calc
self.nspins = self.calc.wfs.nspins
# kpoint init
self.kd = kd = calc.wfs.kd
self.nikpt = kd.nibzkpts
self.ftol /= kd.nbzkpts
# cell init
self.acell_cv = calc.wfs.gd.cell_cv
self.acell_cv, self.bcell_cv, self.vol, self.BZvol = \
get_primitive_cell(self.acell_cv,rpad=self.rpad)
# grid init
gd = calc.wfs.gd.new_descriptor(comm=serial_comm)
self.pbc = gd.pbc_c
self.gd = gd
self.nG0 = np.prod(gd.N_c)
# Number of grid points and volume including zero padding
self.nGrpad = gd.N_c * self.rpad
self.nG0rpad = np.prod(self.nGrpad)
self.d_c = [Gradient(gd, i, n=4, dtype=complex).apply for i in range(3)]
# obtain eigenvalues, occupations
nibzkpt = kd.nibzkpts
kweight_k = kd.weight_k
self.eFermi = self.calc.occupations.get_fermi_level()
try:
self.e_skn
self.printtxt('Use eigenvalues from user.')
except:
self.printtxt('Use eigenvalues from the calculator.')
self.e_skn = {}
self.f_skn = {}
for ispin in range(self.nspins):
self.e_skn[ispin] = np.array([calc.get_eigenvalues(kpt=k, spin=ispin)
for k in range(nibzkpt)]) / Hartree
self.f_skn[ispin] = np.array([calc.get_occupation_numbers(kpt=k, spin=ispin)
/ kweight_k[k]
for k in range(nibzkpt)]) / kd.nbzkpts
#self.printtxt('Eigenvalues(k=0) are:')
#print >> self.txt, self.e_skn[0][0] * Hartree
self.enoshift_skn = {}
for ispin in range(self.nspins):
self.enoshift_skn[ispin] = self.e_skn[ispin].copy()
if self.eshift is not None:
self.add_discontinuity(self.eshift)
self.printtxt('Shift unoccupied bands by %f eV' % (self.eshift))
# k + q init
if self.q_c is not None:
self.qq_v = np.dot(self.q_c, self.bcell_cv) # summation over c
if self.optical_limit:
kq_k = np.arange(kd.nbzkpts)
self.expqr_g = 1.
else:
r_vg = gd.get_grid_point_coordinates() # (3, nG)
qr_g = gemmdot(self.qq_v, r_vg, beta=0.0)
self.expqr_g = np.exp(-1j * qr_g)
del r_vg, qr_g
kq_k = kd.find_k_plus_q(self.q_c)
self.kq_k = kq_k
# Plane wave init
if self.G_plus_q:
self.npw, self.Gvec_Gc, self.Gindex_G = set_Gvectors(self.acell_cv,
self.bcell_cv,
self.gd.N_c,
self.ecut,
q=self.q_c)
else:
self.npw, self.Gvec_Gc, self.Gindex_G = set_Gvectors(self.acell_cv,
self.bcell_cv,
self.gd.N_c,
self.ecut)
# band init
if self.nbands is None:
self.nbands = calc.wfs.bd.nbands
self.nvalence = calc.wfs.nvalence
# Projectors init
setups = calc.wfs.setups
self.spos_ac = calc.atoms.get_scaled_positions()
if self.pwmode:
self.pt = PWLFC([setup.pt_j for setup in setups], self.calc.wfs.pd)
self.pt.set_positions(self.spos_ac)
else:
self.pt = LFC(gd, [setup.pt_j for setup in setups],
KPointDescriptor(self.kd.bzk_kc),
dtype=complex, forces=True)
self.pt.set_positions(self.spos_ac)
# Printing calculation information
self.print_stuff()
return
def output_init(self):
if self.txtname is None:
if rank == 0:
self.txt = sys.stdout
else:
sys.stdout = devnull
self.txt = devnull
elif self.txtname == devnull:
self.txt = devnull
else:
assert type(self.txtname) is str
from ase.parallel import paropen
self.txt = paropen(self.txtname,'w')
def printtxt(self, text):
print >> self.txt, text
def print_stuff(self):
printtxt = self.printtxt
printtxt('')
printtxt('Parameters used:')
printtxt('')
printtxt('Unit cell (a.u.):')
printtxt(self.acell_cv)
printtxt('Volume of cell (a.u.**3) : %f' % self.vol)
printtxt('Reciprocal cell (1/a.u.)')
printtxt(self.bcell_cv)
printtxt('BZ volume (1/a.u.**3) : %f' % self.BZvol)
printtxt('Number of G-vectors / Grid : %d %s'
% (self.nG0, tuple(self.gd.N_c)))
printtxt('')
printtxt('Coulomb interaction cutoff : %s' % self.vcut)
printtxt('')
printtxt('Number of bands : %d' % self.nbands)
printtxt('Number of kpoints : %d' % self.kd.nbzkpts)
if self.ecut[0] == self.ecut[1] and self.ecut[0] == self.ecut[2]:
printtxt('Planewave ecut (eV) : %4.1f' % (self.ecut[0] * Hartree))
else:
printtxt('Planewave ecut (eV) : (%f, %f, %f)' % tuple(self.ecut * Hartree))
printtxt('Number of planewave used : %d' % self.npw)
printtxt('Broadening (eta) : %f' % (self.eta * Hartree))
printtxt('')
if self.q_c is not None:
if self.optical_limit:
printtxt('Optical limit calculation ! (q=1e-5)')
else:
printtxt('q in reduced coordinate : (%f %f %f)' % tuple(self.q_c))
printtxt('q in cartesian coordinate (1/A): (%f %f %f)' % tuple(self.qq_v / Bohr))
printtxt('|q| (1/A) : %f' % np.linalg.norm(self.qq_v / Bohr))
def timing(self, i, t0, n_local, txt):
if i == 0:
dt = time() - t0
self.totaltime = dt * n_local
self.printtxt(' Finished %s 0 in %s, estimate %s left.'
% (txt, timedelta(seconds=round(dt)),
timedelta(seconds=round(self.totaltime))))
if rank == 0 and n_local // 5 > 0:
if i > 0 and i % (n_local // 5) == 0:
dt = time() - t0
self.printtxt(' Finished %s %d in %s, estimate %s left.'
% (txt, i, timedelta(seconds=round(dt)),
timedelta(seconds=round(self.totaltime
- dt))))
def get_phi_aGp(self, q_c=None, parallel=True, alldir=False):
if q_c is None:
q_c = self.q_c
qq_v = self.qq_v
optical_limit = self.optical_limit
else:
optical_limit = False
if np.abs(q_c).sum() < 1e-8:
q_c = np.array([0.0001, 0, 0])
optical_limit = True
qq_v = np.dot(q_c, self.bcell_cv)
setups = self.calc.wfs.setups
spos_ac = self.calc.atoms.get_scaled_positions()
kk_Gv = gemmdot(q_c + self.Gvec_Gc, self.bcell_cv.copy(), beta=0.0)
phi_aGp = {}
phiG0_avp = {}
if parallel:
from gpaw.response.parallel import parallel_partition
npw, npw_local, Gstart, Gend = parallel_partition(
self.npw, self.comm.rank, self.comm.size, reshape=False)
else:
Gstart = 0
Gend = self.npw
for a, id in enumerate(setups.id_a):
phi_aGp[a] = two_phi_planewave_integrals(kk_Gv, setups[a], Gstart, Gend)
for iG in range(Gstart, Gend):
phi_aGp[a][iG] *= np.exp(-1j * 2. * pi *
np.dot(q_c + self.Gvec_Gc[iG], spos_ac[a]) )
if parallel:
self.comm.sum(phi_aGp[a])
# For optical limit, G == 0 part should change
if optical_limit:
for a, id in enumerate(setups.id_a):
nabla_iiv = setups[a].nabla_iiv
phi_aGp[a][0] = -1j * (np.dot(nabla_iiv, qq_v)).ravel()
phiG0_avp[a] = np.zeros((3, len(phi_aGp[a][0])), complex)
for dir in range(3): # 3 dimension
q2_c = np.diag((1,1,1))[dir] * self.qopt
qq2_v = np.dot(q2_c, self.bcell_cv) # summation over c
phiG0_avp[a][dir] = -1j * (np.dot(nabla_iiv, qq2_v)).ravel()
if alldir:
return phi_aGp, phiG0_avp
else:
return phi_aGp
def get_wavefunction(self, ibzk, n, check_focc=True, spin=0):
if (self.calc.wfs.world.size == 1 or self.calc.wfs.gd.comm.size != 1
or self.calc.input_parameters['mode'] == 'lcao'):
if not check_focc:
return
else:
psit_G = self.calc.wfs.get_wave_function_array(n, ibzk, spin)
if self.calc.wfs.world.size == 1:
return np.complex128(psit_G)
if self.calc.wfs.world.rank != 0:
psit_G = self.calc.wfs.gd.empty(dtype=self.calc.wfs.dtype,
global_array=True)
self.calc.wfs.world.broadcast(psit_G, 0)
return np.complex128(psit_G)
else:
# support ground state calculation with kpoint and band parallelization
# but domain decomposition must = 1
kpt_rank, u = self.calc.wfs.kd.get_rank_and_index(0, ibzk)
bzkpt_rank = self.kcomm.rank
band_rank, myn = self.calc.wfs.bd.who_has(n)
assert self.calc.wfs.gd.comm.size == 1
world_rank = (kpt_rank * self.calc.wfs.band_comm.size + band_rank)
# in the following, kpt_rank is assigned to world_rank
klist = np.array([world_rank, u, bzkpt_rank, myn])
klist_kcomm = np.zeros((self.kcomm.size, 4), dtype=int)
self.kcomm.all_gather(klist, klist_kcomm)
check_focc_global = np.zeros(self.kcomm.size, dtype=bool)
self.kcomm.all_gather(np.array([check_focc]), check_focc_global)
psit_G = self.calc.wfs.gd.empty(dtype=self.calc.wfs.dtype)
for i in range(self.kcomm.size):
if check_focc_global[i]:
kpt_rank, u, bzkpt_rank, nlocal = klist_kcomm[i]
if kpt_rank == bzkpt_rank:
if rank == kpt_rank:
psit_G = self.calc.wfs.kpt_u[u].psit_nG[nlocal]
else:
if rank == kpt_rank:
world.send(self.calc.wfs.kpt_u[u].psit_nG[nlocal],
bzkpt_rank, 1300+bzkpt_rank)
if rank == bzkpt_rank:
psit_G = self.calc.wfs.gd.empty(dtype=self.calc.wfs.dtype)
world.receive(psit_G, kpt_rank, 1300+bzkpt_rank)
self.wScomm.broadcast(psit_G, 0)
return psit_G
def add_discontinuity(self, shift):
for ispin in range(self.nspins):
for k in range(self.kd.nibzkpts):
for i in range(self.e_skn[0].shape[1]):
if self.e_skn[ispin][k,i] > self.eFermi:
self.e_skn[ispin][k,i] += shift / Hartree
def density_matrix(self, n, m, k, kq=None,
spin1=0, spin2=0, phi_aGp=None, Gspace=True):
gd = self.gd
kd = self.kd
optical_limit = False
if kq is None:
kq = self.kq_k[k]
expqr_g = self.expqr_g
q_v = self.qq_v
optical_limit = self.optical_limit
q_c = self.q_c
else:
q_c = kd.bzk_kc[kq] - kd.bzk_kc[k]
q_c[np.where(q_c>0.501)] -= 1
q_c[np.where(q_c<-0.499)] += 1
if (np.abs(q_c) < self.ftol).all():
optical_limit = True
q_c = self.q_c
q_v = np.dot(q_c, self.bcell_cv)
r_vg = gd.get_grid_point_coordinates() # (3, nG)
qr_g = gemmdot(q_v, r_vg, beta=0.0)
expqr_g = np.exp(-1j * qr_g)
if optical_limit:
expqr_g = 1
ibzkpt1 = kd.bz2ibz_k[k]
ibzkpt2 = kd.bz2ibz_k[kq]
psitold_g = self.get_wavefunction(ibzkpt1, n, True, spin=spin1)
psit1_g = kd.transform_wave_function(psitold_g, k)
psitold_g = self.get_wavefunction(ibzkpt2, m, True, spin=spin2)
psit2_g = kd.transform_wave_function(psitold_g, kq)
if Gspace is False:
return psit1_g.conj() * psit2_g * expqr_g
else:
tmp_g = psit1_g.conj()* psit2_g * expqr_g
# zero padding is included through the FFT
rho_g = np.fft.fftn(tmp_g, s=self.nGrpad) * self.vol / self.nG0rpad
# Here, planewave cutoff is applied
rho_G = rho_g.ravel()[self.Gindex_G]
if optical_limit:
dpsit_g = gd.empty(dtype=complex)
tmp = np.zeros((3), dtype=complex)
phase_cd = np.exp(2j * pi * gd.sdisp_cd * kd.bzk_kc[kq, :, np.newaxis])
for ix in range(3):
self.d_c[ix](psit2_g, dpsit_g, phase_cd)
tmp[ix] = gd.integrate(psit1_g.conj() * dpsit_g)
rho_G[0] = -1j * np.dot(q_v, tmp)
calc = self.calc
pt = self.pt
if not self.pwmode:
if calc.wfs.world.size > 1 or kd.nbzkpts == 1:
P1_ai = pt.dict()
pt.integrate(psit1_g, P1_ai, k)
P2_ai = pt.dict()
pt.integrate(psit2_g, P2_ai, kq)
else:
P1_ai = self.get_P_ai(k, n, spin1)
P2_ai = self.get_P_ai(kq, m, spin2)
else:
# first calculate P_ai at ibzkpt, then rotate to k
u = self.kd.get_rank_and_index(spin1, ibzkpt1)[1]
Ptmp_ai = pt.dict()
kpt = calc.wfs.kpt_u[u]
pt.integrate(kpt.psit_nG[n], Ptmp_ai, ibzkpt1)
P1_ai = self.get_P_ai(k, n, spin1, Ptmp_ai)
u = self.kd.get_rank_and_index(spin2, ibzkpt2)[1]
Ptmp_ai = pt.dict()
kpt = calc.wfs.kpt_u[u]
pt.integrate(kpt.psit_nG[m], Ptmp_ai, ibzkpt2)
P2_ai = self.get_P_ai(kq, m, spin2, Ptmp_ai)
if phi_aGp is None:
try:
if not self.mode == 'RPA':
if optical_limit:
iq = kd.where_is_q(np.zeros(3), self.bzq_qc)
else:
iq = kd.where_is_q(q_c, self.bzq_qc)
assert np.abs(self.bzq_qc[iq] - q_c).sum() < 1e-8
phi_aGp = self.load_phi_aGp(self.reader, iq) #phi_qaGp[iq]
except AttributeError:
phi_aGp = self.phi_aGp
for a, id in enumerate(self.calc.wfs.setups.id_a):
P_p = np.outer(P1_ai[a].conj(), P2_ai[a]).ravel()
phi_Gp = np.ascontiguousarray(phi_aGp[a], complex)
gemv(1.0, phi_Gp, P_p, 1.0, rho_G)
if optical_limit:
if n==m:
rho_G[0] = 1.
elif np.abs(self.e_skn[spin2][ibzkpt2, m] - self.e_skn[spin1][ibzkpt1, n]) < 1e-5:
rho_G[0] = 0.
else:
rho_G[0] /= (self.enoshift_skn[spin2][ibzkpt2, m] - self.enoshift_skn[spin1][ibzkpt1, n])
return rho_G
def get_P_ai(self, k, n, spin=0, Ptmp_ai=None):
calc = self.calc
kd = self.calc.wfs.kd
spos_ac = self.spos_ac
ibzkpt = kd.bz2ibz_k[k]
u = ibzkpt + kd.nibzkpts * spin
kpt = calc.wfs.kpt_u[u]
s = kd.sym_k[k]
time_reversal = kd.time_reversal_k[k]
P_ai = {}
for a, id in enumerate(calc.wfs.setups.id_a):
b = kd.symmetry.a_sa[s, a]
S_c = (np.dot(spos_ac[a], kd.symmetry.op_scc[s]) - kd.symmetry.ft_sc[s] - spos_ac[b])
#print abs(S_c.round() - S_c).max()
#print 'S_c', abs(S_c).max()
assert abs(S_c.round() - S_c).max() < 1e-8 ##############
k_c = kd.ibzk_kc[kpt.k]
x = np.exp(2j * pi * np.dot(k_c, S_c))
if Ptmp_ai is None:
P_i = np.dot(calc.wfs.setups[a].R_sii[s], kpt.P_ani[b][n]) * x
else:
P_i = np.dot(calc.wfs.setups[a].R_sii[s], Ptmp_ai[b]) * x
if time_reversal:
P_i = P_i.conj()
P_ai[a] = P_i
return P_ai
|
robwarm/gpaw-symm
|
gpaw/response/base.py
|
Python
|
gpl-3.0
| 21,011
|
[
"ASE",
"GPAW"
] |
5a4fd6dcc255deecadcf36e370f07d1fd7941bb2132e340aba3783d88059767c
|
### This code came from the google sample code ###
### No value added here other than turning on caching. ###
import httplib2
import os
import sys
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the {{ Google Cloud Console }} at
# {{ https://cloud.google.com/console }}.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = "client_secrets.json"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the {{ Cloud Console }}
{{ https://cloud.google.com/console }}
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
# This OAuth 2.0 access scope allows for full read/write access to the
# authenticated user's account.
YOUTUBE_READ_WRITE_SCOPE = "https://www.googleapis.com/auth/youtube"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
def get_youtube_connection(args):
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
message=MISSING_CLIENT_SECRETS_MESSAGE,
scope=YOUTUBE_READ_WRITE_SCOPE)
storage = Storage("%s-oauth2.json" % sys.argv[0])
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, args)
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
#http=credentials.authorize(httplib2.Http()))
http=credentials.authorize(httplib2.Http(cache=".playlister-cache"))) # turning on caching
return youtube
|
fmoody/playlister
|
google_auth_code.py
|
Python
|
gpl-2.0
| 2,599
|
[
"VisIt"
] |
ec27785058753e307379b6f008d6c1f6def88aecddd251016245db6c020915ae
|
"""
Records who we trust to sign feeds.
Trust is divided up into domains, so that it is possible to trust a key
in some cases and not others.
@var trust_db: Singleton trust database instance.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _, SafeException, logger
import os
from zeroinstall import support
from zeroinstall.support import basedir, tasks
from .namespaces import config_site, config_prog, XMLNS_TRUST
KEY_INFO_TIMEOUT = 10 # Maximum time to wait for response from key-info-server
class TrustDB(object):
"""A database of trusted keys.
@ivar keys: maps trusted key fingerprints to a set of domains for which where it is trusted
@type keys: {str: set(str)}
@ivar watchers: callbacks invoked by L{notify}
@see: L{trust_db} - the singleton instance of this class"""
__slots__ = ['keys', 'watchers', '_dry_run']
def __init__(self):
self.keys = None
self.watchers = []
self._dry_run = False
def is_trusted(self, fingerprint, domain = None):
"""@type fingerprint: str
@type domain: str | None
@rtype: bool"""
self.ensure_uptodate()
domains = self.keys.get(fingerprint, None)
if not domains: return False # Unknown key
if domain is None:
return True # Deprecated
return domain in domains or '*' in domains
def get_trust_domains(self, fingerprint):
"""Return the set of domains in which this key is trusted.
If the list includes '*' then the key is trusted everywhere.
@type fingerprint: str
@rtype: {str}
@since: 0.27"""
self.ensure_uptodate()
return self.keys.get(fingerprint, set())
def get_keys_for_domain(self, domain):
"""Return the set of keys trusted for this domain.
@type domain: str
@rtype: {str}
@since: 0.27"""
self.ensure_uptodate()
return set([fp for fp in self.keys
if domain in self.keys[fp]])
def trust_key(self, fingerprint, domain = '*'):
"""Add key to the list of trusted fingerprints.
@param fingerprint: base 16 fingerprint without any spaces
@type fingerprint: str
@param domain: domain in which key is to be trusted
@type domain: str
@note: call L{notify} after trusting one or more new keys"""
if self.is_trusted(fingerprint, domain): return
if self._dry_run:
print(_("[dry-run] would trust key {key} for {domain}").format(key = fingerprint, domain = domain))
int(fingerprint, 16) # Ensure fingerprint is valid
if fingerprint not in self.keys:
self.keys[fingerprint] = set()
#if domain == '*':
# warn("Calling trust_key() without a domain is deprecated")
self.keys[fingerprint].add(domain)
self.save()
def untrust_key(self, key, domain = '*'):
"""@type key: str
@type domain: str"""
if self._dry_run:
print(_("[dry-run] would untrust key {key} for {domain}").format(key = key, domain = domain))
self.ensure_uptodate()
self.keys[key].remove(domain)
if not self.keys[key]:
# No more domains for this key
del self.keys[key]
self.save()
def save(self):
d = basedir.save_config_path(config_site, config_prog)
db_file = os.path.join(d, 'trustdb.xml')
if self._dry_run:
print(_("[dry-run] would update trust database {file}").format(file = db_file))
return
from xml.dom import minidom
import tempfile
doc = minidom.Document()
root = doc.createElementNS(XMLNS_TRUST, 'trusted-keys')
root.setAttribute('xmlns', XMLNS_TRUST)
doc.appendChild(root)
for fingerprint in self.keys:
keyelem = doc.createElementNS(XMLNS_TRUST, 'key')
root.appendChild(keyelem)
keyelem.setAttribute('fingerprint', fingerprint)
for domain in self.keys[fingerprint]:
domainelem = doc.createElementNS(XMLNS_TRUST, 'domain')
domainelem.setAttribute('value', domain)
keyelem.appendChild(domainelem)
with tempfile.NamedTemporaryFile(dir = d, prefix = 'trust-', delete = False, mode = 'wt') as tmp:
doc.writexml(tmp, indent = "", addindent = " ", newl = "\n", encoding = 'utf-8')
support.portable_rename(tmp.name, db_file)
def notify(self):
"""Call all watcher callbacks.
This should be called after trusting or untrusting one or more new keys.
@since: 0.25"""
for w in self.watchers: w()
def ensure_uptodate(self):
if self._dry_run:
if self.keys is None: self.keys = {}
return
from xml.dom import minidom
# This is a bit inefficient... (could cache things)
self.keys = {}
trust = basedir.load_first_config(config_site, config_prog, 'trustdb.xml')
if trust:
keys = minidom.parse(trust).documentElement
for key in keys.getElementsByTagNameNS(XMLNS_TRUST, 'key'):
domains = set()
self.keys[key.getAttribute('fingerprint')] = domains
for domain in key.getElementsByTagNameNS(XMLNS_TRUST, 'domain'):
domains.add(domain.getAttribute('value'))
else:
# Convert old database to XML format
trust = basedir.load_first_config(config_site, config_prog, 'trust')
if trust:
#print "Loading trust from", trust_db
with open(trust, 'rt') as stream:
for key in stream:
if key:
self.keys[key] = set(['*'])
def domain_from_url(url):
"""Extract the trust domain for a URL.
@param url: the feed's URL
@type url: str
@return: the trust domain
@rtype: str
@since: 0.27
@raise SafeException: the URL can't be parsed"""
try:
import urlparse
except ImportError:
from urllib import parse as urlparse # Python 3
if os.path.isabs(url):
raise SafeException(_("Can't get domain from a local path: '%s'") % url)
domain = urlparse.urlparse(url)[1]
if domain and domain != '*':
return domain
raise SafeException(_("Can't extract domain from URL '%s'") % url)
trust_db = TrustDB()
class TrustMgr(object):
"""A TrustMgr handles the process of deciding whether to trust new keys
(contacting the key information server, prompting the user, accepting automatically, etc)
@since: 0.53"""
__slots__ = ['config', '_current_confirm']
def __init__(self, config):
"""@type config: L{zeroinstall.injector.config.Config}"""
self.config = config
self._current_confirm = None # (a lock to prevent asking the user multiple questions at once)
@tasks.async
def confirm_keys(self, pending):
"""We don't trust any of the signatures yet. Collect information about them and add the keys to the
trusted list, possibly after confirming with the user (via config.handler).
Updates the L{trust} database, and then calls L{trust.TrustDB.notify}.
@param pending: an object holding details of the updated feed
@type pending: L{PendingFeed}
@return: A blocker that triggers when the user has chosen, or None if already done.
@rtype: None | L{Blocker}
@since: 0.53"""
assert pending.sigs
from zeroinstall.injector import gpg
valid_sigs = [s for s in pending.sigs if isinstance(s, gpg.ValidSig)]
if not valid_sigs:
def format_sig(sig):
msg = str(sig)
if sig.messages:
msg += "\nMessages from GPG:\n" + sig.messages
return msg
raise SafeException(_('No valid signatures found on "%(url)s". Signatures:%(signatures)s') %
{'url': pending.url, 'signatures': ''.join(['\n- ' + format_sig(s) for s in pending.sigs])})
# Start downloading information about the keys...
fetcher = self.config.fetcher
kfs = {}
for sig in valid_sigs:
kfs[sig] = fetcher.fetch_key_info(sig.fingerprint)
# Wait up to KEY_INFO_TIMEOUT seconds for key information to arrive. Avoids having the dialog
# box update while the user is looking at it, and may allow it to be skipped completely in some
# cases.
timeout = tasks.TimeoutBlocker(KEY_INFO_TIMEOUT, "key info timeout")
while True:
key_info_blockers = [sig_info.blocker for sig_info in kfs.values() if sig_info.blocker is not None]
if not key_info_blockers:
break
logger.info("Waiting for response from key-info server: %s", key_info_blockers)
yield [timeout] + key_info_blockers
if timeout.happened:
logger.info("Timeout waiting for key info response")
break
# If we're already confirming something else, wait for that to finish...
while self._current_confirm is not None:
logger.info("Waiting for previous key confirmations to finish")
yield self._current_confirm
domain = domain_from_url(pending.url)
if self.config.auto_approve_keys:
existing_feed = self.config.iface_cache.get_feed(pending.url)
if not existing_feed:
changes = False
trust_db._dry_run = self.config.handler.dry_run
for sig, kf in kfs.items():
for key_info in kf.info:
if key_info.getAttribute("vote") == "good":
logger.info(_("Automatically approving key for new feed %s based on response from key info server"), pending.url)
trust_db.trust_key(sig.fingerprint, domain)
changes = True
if changes:
trust_db.notify()
# Check whether we still need to confirm. The user may have
# already approved one of the keys while dealing with another
# feed, or we may have just auto-approved it.
for sig in kfs:
is_trusted = trust_db.is_trusted(sig.fingerprint, domain)
if is_trusted:
return
# Take the lock and confirm this feed
self._current_confirm = lock = tasks.Blocker('confirm key lock')
try:
done = self.config.handler.confirm_import_feed(pending, kfs)
if done is not None:
yield done
tasks.check(done)
finally:
self._current_confirm = None
lock.trigger()
|
rammstein/0install
|
zeroinstall/injector/trust.py
|
Python
|
lgpl-2.1
| 9,330
|
[
"VisIt"
] |
40556d4c9b31439205eb2fa599c52c642c79259f69ffcd8375eeed8ff63f3122
|
from proteus import *
from proteus.default_p import *
from math import *
"""
2D, Linear advection of a guassian
"""
## \page Tests Test Problems
# \ref la_gauss_2d_p.py "Linear advection of a Gaussian"
# \addtogroup test
#
# \file la_gauss_2d_p.py
# @{
#
##\ingroup test
# \brief Conservative linear advection of a cone in a rotating
# velocity field.
#
# \f{eqnarray*}
# \phi_t + \nabla \cdot (\vec u \phi) &=& 0 \\
# \Omega &=& [0,1] \times [0,1] \\
# u^{x} &=& 2\pi(y-1/2)\\
# u^{y} &=& 2\pi(1/2-x)\\
# \phi^{ex}(x,y,t) &=& \exp\left(-\frac{\|\vec x - \vec x_c\|^2}{2\sigma^2}\right)
# \f}
#
# where
# \f$\bar{x} = x - x_c\f$, \f$\bar{y} = y - y_c\f$, and
# \f$ x_c = \sin(2\pi t)/4 + 1/2\f$, \f$\; y_c = \cos(2\pi t)/4 + 1/2 \f$
#
# \image html save_la_gauss_2d_exact.jpg "exact solution, T=0.75"
# \image latex save_la_gauss_2d_exact.eps "exact solution"
# \image html save_la_gauss_2d_dgp2_soln.jpg "RKDG P^2 solution, Cr=0.15, L^2 error= 8.3e-4"
# \image latex save_la_gauss_2d_dgp2_soln.eps "RKDG $P^2$ solution, Cr=0.15, $L^2$ error= 8.3e-4"
#
nd = 2
name = 'la_gauss_2d_np1'
class RotatingGaussian2D:
def __init__(self,sigma=1./8.):
self.sigma = sigma
self.xc= 0.65
self.yc= 0.65
def uOfXT(self,x,t):
centerX = 0.25*sin(2*pi*t) + 0.5
centerY = 0.25*cos(2*pi*t) + 0.5
d2 = (x[0]-centerX)**2 + (x[1]-centerY)**2
return exp(-0.5*d2/self.sigma**2)
class ConstantVelocityGaussian2D:
def __init__(self,sigma=1./8.,b=[1.0,0.0]):
self.sigma = sigma
self.xc= 0.25
self.yc= 0.5
self.b = b
def uOfXT(self,x,t):
centerX = self.xc + self.b[0]*t
centerY = self.yc + self.b[1]*t
d2 = (x[0]-centerX)**2 + (x[1]-centerY)**2
return exp(-0.5*d2/self.sigma**2)
#rotating
analyticalSolution = {0:RotatingGaussian2D(1.0/16.0)}
class UnitSquareRotation(TransportCoefficients.TC_base):
from proteus.ctransportCoefficients import unitSquareRotationEvaluate
def __init__(self):
mass={0:{0:'linear'}}
advection={0:{0:'linear'}}
diffusion={}
potential={}
reaction={}
hamiltonian={}
TransportCoefficients.TC_base.__init__(self,
1,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian)
def evaluate(self,t,c):
self.unitSquareRotationEvaluate(c['x'],
c[('u',0)],
c[('m',0)],c[('dm',0,0)],
c[('f',0)],c[('df',0,0)])
coefficients = UnitSquareRotation()
coefficients.variableNames=['u']
#now define the Dirichlet boundary conditions
def getDBC(x,tag):
if (x[0] <= 0.0 or x[0] >= L[0] or
x[1] <= 0.0 or x[1] >= L[1]):
return lambda x,t: 0.0
dirichletConditions = {0:getDBC}
initialConditions = {0:analyticalSolution[0]}
advectiveFluxBoundaryConditions = {}
diffusiveFluxBoundaryConditions = {0:{}}
#rotation
T = 1.0
## @}
|
erdc/proteus
|
proteus/tests/dg/la_gauss_2d_p.py
|
Python
|
mit
| 3,312
|
[
"Gaussian"
] |
25cf8bbf7ba984da0e2701b2275325641733d1d27404806e18b9c4eaf9e92736
|
"""
Set of utilities to retrieve Information from proxy
"""
import base64
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities import DErrno
from DIRAC.Core.Security.X509Chain import X509Chain # pylint: disable=import-error
from DIRAC.Core.Security.VOMS import VOMS
from DIRAC.Core.Security import Locations
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
__RCSID__ = "$Id$"
def getProxyInfo(proxy=False, disableVOMS=False):
"""
:Returns: a dict with all the proxy info:
* values that will be there always
* 'chain' : chain object containing the proxy
* 'subject' : subject of the proxy
* 'issuer' : issuer of the proxy
* 'isProxy' : bool
* 'isLimitedProxy' : bool
* 'validDN' : Valid DN in DIRAC
* 'validGroup' : Valid Group in DIRAC
* 'secondsLeft' : Seconds left
* values that can be there
* 'path' : path to the file,
* 'group' : DIRAC group
* 'groupProperties' : Properties that apply to the DIRAC Group
* 'username' : DIRAC username
* 'identity' : DN that generated the proxy
* 'hostname' : DIRAC host nickname
* 'VOMS'
"""
# Discover proxy location
proxyLocation = False
if isinstance(proxy, X509Chain):
chain = proxy
else:
if not proxy:
proxyLocation = Locations.getProxyLocation()
elif isinstance(proxy, basestring):
proxyLocation = proxy
if not proxyLocation:
return S_ERROR(DErrno.EPROXYFIND)
chain = X509Chain()
retVal = chain.loadProxyFromFile(proxyLocation)
if not retVal['OK']:
return S_ERROR(DErrno.EPROXYREAD, "%s: %s " % (proxyLocation, retVal['Message']))
retVal = chain.getCredentials()
if not retVal['OK']:
return retVal
infoDict = retVal['Value']
infoDict['chain'] = chain
if proxyLocation:
infoDict['path'] = proxyLocation
if not disableVOMS and chain.isVOMS()['Value']:
infoDict['hasVOMS'] = True
retVal = VOMS().getVOMSAttributes(chain)
if retVal['OK']:
infoDict['VOMS'] = retVal['Value']
else:
infoDict['VOMSError'] = retVal['Message'].strip()
return S_OK(infoDict)
def getProxyInfoAsString(proxyLoc=False, disableVOMS=False):
"""
return the info as a printable string
"""
retVal = getProxyInfo(proxyLoc, disableVOMS)
if not retVal['OK']:
return retVal
infoDict = retVal['Value']
return S_OK(formatProxyInfoAsString(infoDict))
def formatProxyInfoAsString(infoDict):
"""
convert a proxy infoDict into a string
"""
leftAlign = 13
contentList = []
for field in ('subject', 'issuer', 'identity', 'subproxyUser', ('secondsLeft', 'timeleft'),
('group', 'DIRAC group'), 'rfc', 'path', 'username', ('groupProperties', "properties"),
('hasVOMS', 'VOMS'), ('VOMS', 'VOMS fqan'), ('VOMSError', 'VOMS Error')):
if isinstance(field, basestring):
dispField = field
else:
dispField = field[1]
field = field[0]
if field not in infoDict:
continue
if field == 'secondsLeft':
secs = infoDict[field]
hours = int(secs / 3600)
secs -= hours * 3600
mins = int(secs / 60)
secs -= mins * 60
value = "%02d:%02d:%02d" % (hours, mins, secs)
elif field == "groupProperties":
value = ", ".join(infoDict[field])
else:
value = infoDict[field]
contentList.append("%s: %s" % (dispField.ljust(leftAlign), value))
return "\n".join(contentList)
def getProxyStepsInfo(chain):
"""
Extended information of all Steps in the ProxyChain
Returns a list of dictionary with Info for each Step.
"""
infoList = []
nC = chain.getNumCertsInChain()['Value']
for i in range(nC):
cert = chain.getCertInChain(i)['Value']
stepInfo = {}
stepInfo['subject'] = cert.getSubjectDN()['Value']
stepInfo['issuer'] = cert.getIssuerDN()['Value']
stepInfo['serial'] = cert.getSerialNumber()['Value']
stepInfo['not before'] = cert.getNotBeforeDate()['Value']
stepInfo['not after'] = cert.getNotAfterDate()['Value']
stepInfo['lifetime'] = cert.getRemainingSecs()['Value']
stepInfo['extensions'] = cert.getExtensions()['Value']
dG = cert.getDIRACGroup(ignoreDefault=True)['Value']
if dG:
stepInfo['group'] = dG
if cert.hasVOMSExtensions()['Value']:
stepInfo['VOMS ext'] = True
infoList.append(stepInfo)
return S_OK(infoList)
def formatProxyStepsInfoAsString(infoList):
"""
Format the List of Proxy steps dictionaries as a printable string.
"""
contentsList = []
for i in range(len(infoList)):
contentsList.append(" + Step %s" % i)
stepInfo = infoList[i]
for key in ('subject', 'issuer', 'serial', 'not after', 'not before',
'group', 'VOMS ext', 'lifetime', 'extensions'):
if key in stepInfo:
value = stepInfo[key]
if key == 'serial':
value = base64.b16encode(value)
if key == 'lifetime':
secs = value
hours = int(secs / 3600)
secs -= hours * 3600
mins = int(secs / 60)
secs -= mins * 60
value = "%02d:%02d:%02d" % (hours, mins, secs)
if key == "extensions":
value = "\n %s" % "\n ".join(["%s = %s" % (extName.strip().rjust(20), extValue.strip())
for extName, extValue in value])
contentsList.append(" %s : %s" % (key.ljust(10).capitalize(), value))
return "\n".join(contentsList)
def getVOfromProxyGroup():
"""
Return the VO associated to the group in the proxy
"""
voName = Registry.getVOForGroup('NoneExistingGroup')
ret = getProxyInfo(disableVOMS=True)
if not ret['OK']:
return S_OK(voName)
if 'group' in ret['Value']:
voName = Registry.getVOForGroup(ret['Value']['group'])
return S_OK(voName)
|
fstagni/DIRAC
|
Core/Security/ProxyInfo.py
|
Python
|
gpl-3.0
| 5,830
|
[
"DIRAC"
] |
b4249ef6018e51fc6f2f26ca33fca30ceaf50fdf80f4f8021df24862548bf182
|
import cPickle as pickle
import contextlib
import os.path
import tempfile
from cStringIO import StringIO
import numpy as np
import pandas as pd
from xray import Dataset, open_dataset, backends
from . import TestCase, requires_scipy, requires_netCDF4, requires_pydap
from test_dataset import create_test_data
try:
import netCDF4 as nc4
except ImportError:
pass
def open_example_dataset(name):
return open_dataset(os.path.join(os.path.dirname(__file__), 'data', name))
def create_masked_and_scaled_data():
x = np.array([np.nan, np.nan, 10, 10.1, 10.2])
encoding = {'_FillValue': -1, 'add_offset': 10,
'scale_factor': np.float32(0.1), 'dtype': np.dtype(np.int16)}
return Dataset({'x': ('t', x, {}, encoding)})
def create_encoded_masked_and_scaled_data():
attributes = {'_FillValue': -1, 'add_offset': 10,
'scale_factor': np.float32(0.1)}
return Dataset({'x': ('t', [-1, -1, 0, 1, 2], attributes)})
class DatasetIOTestCases(object):
def create_store(self):
raise NotImplementedError
def roundtrip(self, data, **kwargs):
raise NotImplementedError
def test_zero_dimensional_variable(self):
expected = create_test_data()
expected['xray_awesomeness'] = ([], np.array(1.e9),
{'units': 'units of awesome'})
with self.create_store() as store:
expected.dump_to_store(store)
actual = Dataset.load_store(store)
self.assertDatasetAllClose(expected, actual)
def test_write_store(self):
expected = create_test_data()
with self.create_store() as store:
expected.dump_to_store(store)
actual = Dataset.load_store(store)
self.assertDatasetAllClose(expected, actual)
def test_roundtrip_test_data(self):
expected = create_test_data()
actual = self.roundtrip(expected)
self.assertDatasetAllClose(expected, actual)
def test_roundtrip_string_data(self):
expected = Dataset({'x': ('t', ['abc', 'def'])})
actual = self.roundtrip(expected)
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_mask_and_scale(self):
decoded = create_masked_and_scaled_data()
encoded = create_encoded_masked_and_scaled_data()
self.assertDatasetAllClose(decoded, self.roundtrip(decoded))
self.assertDatasetAllClose(encoded,
self.roundtrip(decoded, decode_cf=False))
self.assertDatasetAllClose(decoded, self.roundtrip(encoded))
self.assertDatasetAllClose(encoded,
self.roundtrip(encoded, decode_cf=False))
def test_roundtrip_example_1_netcdf(self):
expected = open_example_dataset('example_1.nc')
actual = self.roundtrip(expected)
self.assertDatasetIdentical(expected, actual)
def test_orthogonal_indexing(self):
in_memory = create_test_data()
on_disk = self.roundtrip(in_memory)
indexers = {'dim1': range(3), 'dim2': range(4), 'dim3': range(5)}
expected = in_memory.indexed(**indexers)
actual = on_disk.indexed(**indexers)
self.assertDatasetAllClose(expected, actual)
# do it twice, to make sure we're switched from orthogonal -> numpy
# when we cached the values
actual = on_disk.indexed(**indexers)
self.assertDatasetAllClose(expected, actual)
def test_pickle(self):
on_disk = open_example_dataset('bears.nc')
unpickled = pickle.loads(pickle.dumps(on_disk))
self.assertDatasetIdentical(on_disk, unpickled)
@contextlib.contextmanager
def create_tmp_file(suffix='.nc'):
f, path = tempfile.mkstemp(suffix=suffix)
os.close(f)
try:
yield path
finally:
os.remove(path)
@requires_netCDF4
class NetCDF4DataTest(DatasetIOTestCases, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
yield backends.NetCDF4DataStore(tmp_file, mode='w')
def roundtrip(self, data, **kwargs):
with create_tmp_file() as tmp_file:
data.dump(tmp_file)
roundtrip_data = open_dataset(tmp_file, **kwargs)
return roundtrip_data
def test_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
ds = nc4.Dataset(tmp_file, 'w')
ds.createDimension('time', size=10)
ds.createVariable('time', np.int32, dimensions=('time',))
units = 'days since 1999-01-01'
ds.variables['time'].setncattr('units', units)
ds.variables['time'][:] = np.arange(10) + 4
ds.close()
expected = Dataset()
time = pd.date_range('1999-01-05', periods=10)
encoding = {'units': units, 'dtype': np.dtype('int32')}
expected['time'] = ('time', time, {}, encoding)
actual = open_dataset(tmp_file)
self.assertVariableEqual(actual['time'], expected['time'])
actual_encoding = {k: v for k, v in actual['time'].encoding.iteritems()
if k in expected['time'].encoding}
self.assertDictEqual(actual_encoding, expected['time'].encoding)
def test_dump_and_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
ds = nc4.Dataset(tmp_file, 'w')
ds.createDimension('time', size=10)
ds.createVariable('time', np.int32, dimensions=('time',))
units = 'days since 1999-01-01'
ds.variables['time'].setncattr('units', units)
ds.variables['time'][:] = np.arange(10) + 4
ds.close()
xray_dataset = open_dataset(tmp_file)
with create_tmp_file() as tmp_file:
xray_dataset.dump(tmp_file)
ds = nc4.Dataset(tmp_file, 'r')
self.assertEqual(ds.variables['time'].getncattr('units'), units)
self.assertArrayEqual(ds.variables['time'], np.arange(10) + 4)
ds.close()
def test_compression_encoding(self):
data = create_test_data()
data['var2'].encoding.update({'zlib': True,
'chunksizes': (10, 10),
'least_significant_digit': 2})
actual = self.roundtrip(data)
for k, v in data['var2'].encoding.iteritems():
self.assertEqual(v, actual['var2'].encoding[k])
def test_mask_and_scale(self):
with create_tmp_file() as tmp_file:
nc = nc4.Dataset(tmp_file, mode='w')
nc.createDimension('t', 5)
nc.createVariable('x', 'int16', ('t',), fill_value=-1)
v = nc.variables['x']
v.set_auto_maskandscale(False)
v.add_offset = 10
v.scale_factor = 0.1
v[:] = np.array([-1, -1, 0, 1, 2])
nc.close()
# first make sure netCDF4 reads the masked and scaled data correctly
nc = nc4.Dataset(tmp_file, mode='r')
expected = np.ma.array([-1, -1, 10, 10.1, 10.2],
mask=[True, True, False, False, False])
actual = nc.variables['x'][:]
self.assertArrayEqual(expected, actual)
# now check xray
ds = open_dataset(tmp_file)
expected = create_masked_and_scaled_data()
self.assertDatasetIdentical(expected, ds)
def test_0dimensional_variable(self):
# This fix verifies our work-around to this netCDF4-python bug:
# https://github.com/Unidata/netcdf4-python/pull/220
with create_tmp_file() as tmp_file:
nc = nc4.Dataset(tmp_file, mode='w')
v = nc.createVariable('x', 'int16')
v[...] = 123
nc.close()
ds = open_dataset(tmp_file)
expected = Dataset({'x': ((), 123)})
self.assertDatasetIdentical(expected, ds)
def test_variable_len_strings(self):
with create_tmp_file() as tmp_file:
values = np.array(['foo', 'bar', 'baz'], dtype=object)
nc = nc4.Dataset(tmp_file, mode='w')
nc.createDimension('x', 3)
v = nc.createVariable('x', str, ('x',))
v[:] = values
nc.close()
expected = Dataset({'x': ('x', values)})
for kwargs in [{}, {'decode_cf': True}]:
actual = open_dataset(tmp_file, **kwargs)
self.assertDatasetIdentical(expected, actual)
@requires_netCDF4
@requires_scipy
class ScipyDataTest(DatasetIOTestCases, TestCase):
@contextlib.contextmanager
def create_store(self):
fobj = StringIO()
yield backends.ScipyDataStore(fobj, 'w')
def roundtrip(self, data, **kwargs):
serialized = data.dumps()
return open_dataset(StringIO(serialized), **kwargs)
def clear_attributes(ds):
ds.attrs.clear()
for v in ds.itervalues():
v.attrs.clear()
@requires_netCDF4
@requires_pydap
class PydapTest(TestCase):
def test_cmp_local_file(self):
url = 'http://test.opendap.org/opendap/hyrax/data/nc/bears.nc'
actual = Dataset.load_store(backends.PydapDataStore(url))
expected = open_example_dataset('bears.nc')
# don't check attributes since pydap doesn't serialize them correctly
self.assertDatasetEqual(actual, expected)
|
takluyver/xray
|
test/test_backends.py
|
Python
|
apache-2.0
| 9,719
|
[
"NetCDF"
] |
bdc5686d212c4c54dd913aa689563f93e4eb943d3f73ece5b7ba1e3069d6e8bd
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Module to test fitting routines
"""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import os.path
import numpy as np
from numpy import linalg
from numpy.testing.utils import assert_raises
from numpy.testing.utils import assert_allclose, assert_almost_equal
from . import irafutil
from .. import models
from ..core import Fittable2DModel, Parameter
from ..fitting import *
from ...utils import NumpyRNGContext
from ...utils.data import get_pkg_data_filename
from ...tests.helper import pytest
from .utils import ignore_non_integer_warning
from ...stats import sigma_clip
from ...utils.exceptions import AstropyUserWarning
from ..fitting import populate_entry_points
import warnings
try:
from scipy import optimize
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
HAS_MOCK = True
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
HAS_MOCK = False
try:
from pkg_resources import EntryPoint
HAS_PKG = True
except ImportError:
HAS_PKG = False
fitters = [SimplexLSQFitter, SLSQPLSQFitter]
_RANDOM_SEED = 0x1337
class TestPolynomial2D(object):
"""Tests for 2D polynomail fitting."""
def setup_class(self):
self.model = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
def poly2(x, y):
return 1 + 2 * x + 3 * x ** 2 + 4 * y + 5 * y ** 2 + 6 * x * y
self.z = poly2(self.x, self.y)
self.fitter = LinearLSQFitter()
def test_poly2D_fitting(self):
v = self.model.fit_deriv(x=self.x, y=self.y)
p = linalg.lstsq(v, self.z.flatten())[0]
new_model = self.fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, p)
def test_eval(self):
new_model = self.fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model(self.x, self.y), self.z)
@pytest.mark.skipif('not HAS_SCIPY')
def test_polynomial2D_nonlinear_fitting(self):
self.model.parameters = [.6, 1.8, 2.9, 3.7, 4.9, 6.7]
nlfitter = LevMarLSQFitter()
new_model = nlfitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, [1, 2, 3, 4, 5, 6])
class TestICheb2D(object):
"""
Tests 2D Chebyshev polynomial fitting
Create a 2D polynomial (z) using Polynomial2DModel and default coefficients
Fit z using a ICheb2D model
Evaluate the ICheb2D polynomial and compare with the initial z
"""
def setup_class(self):
self.pmodel = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
self.z = self.pmodel(self.x, self.y)
self.cheb2 = models.Chebyshev2D(2, 2)
self.fitter = LinearLSQFitter()
def test_default_params(self):
self.cheb2.parameters = np.arange(9)
p = np.array([1344., 1772., 400., 1860., 2448., 552., 432., 568.,
128.])
z = self.cheb2(self.x, self.y)
model = self.fitter(self.cheb2, self.x, self.y, z)
assert_almost_equal(model.parameters, p)
def test_poly2D_cheb2D(self):
model = self.fitter(self.cheb2, self.x, self.y, self.z)
z1 = model(self.x, self.y)
assert_almost_equal(self.z, z1)
@pytest.mark.skipif('not HAS_SCIPY')
def test_chebyshev2D_nonlinear_fitting(self):
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
nlfitter = LevMarLSQFitter()
model = nlfitter(cheb2d, self.x, self.y, z)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
def test_chebyshev2D_nonlinear_fitting_with_weights(self):
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
nlfitter = LevMarLSQFitter()
weights = np.ones_like(self.y)
model = nlfitter(cheb2d, self.x, self.y, z, weights=weights)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
class TestJointFitter(object):
"""
Tests the joint fitting routine using 2 gaussian models
"""
def setup_class(self):
"""
Create 2 gaussian models and some data with noise.
Create a fitter for the two models keeping the amplitude parameter
common for the two models.
"""
self.g1 = models.Gaussian1D(10, mean=14.9, stddev=.3)
self.g2 = models.Gaussian1D(10, mean=13, stddev=.4)
self.jf = JointFitter([self.g1, self.g2],
{self.g1: ['amplitude'],
self.g2: ['amplitude']}, [9.8])
self.x = np.arange(10, 20, .1)
y1 = self.g1(self.x)
y2 = self.g2(self.x)
with NumpyRNGContext(_RANDOM_SEED):
n = np.random.randn(100)
self.ny1 = y1 + 2 * n
self.ny2 = y2 + 2 * n
self.jf(self.x, self.ny1, self.x, self.ny2)
def test_joint_parameter(self):
"""
Tests that the amplitude of the two models is the same
"""
assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])
assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])
def test_joint_fitter(self):
"""
Tests the fitting routine with similar procedure.
Compares the fitted parameters.
"""
p1 = [14.9, .3]
p2 = [13, .4]
A = 9.8
p = np.r_[A, p1, p2]
def model(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errfunc(p, x1, y1, x2, y2):
return np.ravel(np.r_[model(p[0], p[1:3], x1) - y1,
model(p[0], p[3:], x2) - y2])
coeff, _ = optimize.leastsq(errfunc, p,
args=(self.x, self.ny1, self.x, self.ny2))
assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2))
class TestLinearLSQFitter(object):
def test_chebyshev1D(self):
"""Tests fitting a 1D Chebyshev polynomial to some real world data."""
test_file = get_pkg_data_filename(os.path.join('data',
'idcompspec.fits'))
with open(test_file) as f:
lines = f.read()
reclist = lines.split('begin')
record = irafutil.IdentifyRecord(reclist[1])
coeffs = record.coeff
order = int(record.fields['order'])
initial_model = models.Chebyshev1D(order - 1,
domain=record.get_range())
fitter = LinearLSQFitter()
fitted_model = fitter(initial_model, record.x, record.z)
assert_allclose(fitted_model.parameters, np.array(coeffs),
rtol=10e-2)
def test_linear_fit_model_set(self):
"""Tests fitting multiple models simultaneously."""
init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected,
rtol=1e-1)
def test_linear_fit_2d_model_set(self):
"""Tests fitted multiple 2-D models simultaneously."""
init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)
x = np.arange(10)
y = np.arange(10)
z_expected = init_model(x, y, model_set_axis=False)
assert z_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected,
rtol=1e-1)
@pytest.mark.skipif('not HAS_SCIPY')
class TestNonLinearFitters(object):
"""Tests non-linear least squares fitting and the SLSQP algorithm."""
def setup_class(self):
self.initial_values = [100, 5, 1]
self.xdata = np.arange(0, 10, 0.1)
sigma = 4. * np.ones_like(self.xdata)
with NumpyRNGContext(_RANDOM_SEED):
yerror = np.random.normal(0, sigma)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
self.ydata = func(self.initial_values, self.xdata) + yerror
self.gauss = models.Gaussian1D(100, 5, stddev=1)
def test_estimated_vs_analytic_deriv(self):
"""
Runs `LevMarLSQFitter` with estimated and analytic derivatives of a
`Gaussian1D`.
"""
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
efitter = LevMarLSQFitter()
emodel = efitter(g1e, self.xdata, self.ydata, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
def test_estimated_vs_analytic_deriv_with_weights(self):
"""
Runs `LevMarLSQFitter` with estimated and analytic derivatives of a
`Gaussian1D`.
"""
weights = 1.0 / (self.ydata / 10.)
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata, weights=weights)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
efitter = LevMarLSQFitter()
emodel = efitter(g1e, self.xdata, self.ydata, weights=weights, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
def test_with_optimize(self):
"""
Tests results from `LevMarLSQFitter` against `scipy.optimize.leastsq`.
"""
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errfunc(p, x, y):
return func(p, x) - y
result = optimize.leastsq(errfunc, self.initial_values,
args=(self.xdata, self.ydata))
assert_allclose(model.parameters, result[0], rtol=10 ** (-3))
def test_with_weights(self):
"""
Tests results from `LevMarLSQFitter` with weights.
"""
# part 1: weights are equal to 1
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=np.ones_like(self.xdata))
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
# part 2: weights are 0 or 1 (effectively, they are a mask)
weights = np.zeros_like(self.xdata)
weights[::2] = 1.
mask = weights >= 1.
model = fitter(self.gauss, self.xdata[mask], self.ydata[mask],
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=weights)
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
@pytest.mark.parametrize('fitter_class', fitters)
def test_fitter_against_LevMar(self, fitter_class):
"""Tests results from non-linear fitters against `LevMarLSQFitter`."""
levmar = LevMarLSQFitter()
fitter = fitter_class()
with ignore_non_integer_warning():
new_model = fitter(self.gauss, self.xdata, self.ydata)
model = levmar(self.gauss, self.xdata, self.ydata)
assert_allclose(model.parameters, new_model.parameters,
rtol=10 ** (-4))
def test_LSQ_SLSQP_with_constraints(self):
"""
Runs `LevMarLSQFitter` and `SLSQPLSQFitter` on a model with
constraints.
"""
g1 = models.Gaussian1D(100, 5, stddev=1)
g1.mean.fixed = True
fitter = LevMarLSQFitter()
fslsqp = SLSQPLSQFitter()
with ignore_non_integer_warning():
slsqp_model = fslsqp(g1, self.xdata, self.ydata)
model = fitter(g1, self.xdata, self.ydata)
assert_allclose(model.parameters, slsqp_model.parameters,
rtol=10 ** (-4))
def test_simplex_lsq_fitter(self):
"""A basic test for the `SimplexLSQ` fitter."""
class Rosenbrock(Fittable2DModel):
a = Parameter()
b = Parameter()
@staticmethod
def evaluate(x, y, a, b):
return (a - x) ** 2 + b * (y - x ** 2) ** 2
x = y = np.linspace(-3.0, 3.0, 100)
with NumpyRNGContext(_RANDOM_SEED):
z = Rosenbrock.evaluate(x, y, 1.0, 100.0)
z += np.random.normal(0., 0.1, size=z.shape)
fitter = SimplexLSQFitter()
r_i = Rosenbrock(1, 100)
r_f = fitter(r_i, x, y, z)
assert_allclose(r_f.parameters, [1.0, 100.0], rtol=1e-2)
def test_param_cov(self):
"""
Tests that the 'param_cov' fit_info entry gets the right answer for
*linear* least squares, where the answer is exact
"""
a = 2
b = 100
with NumpyRNGContext(_RANDOM_SEED):
x = np.linspace(0, 1, 100)
# y scatter is amplitude ~1 to make sure covarience is
# non-negligible
y = x*a + b + np.random.randn(len(x))
#first compute the ordinary least squares covariance matrix
X = np.matrix(np.vstack([x, np.ones(len(x))]).T)
beta = np.linalg.inv(X.T * X) * X.T * np.matrix(y).T
s2 = np.sum((y - (X * beta).A.ravel())**2) / (len(y) - len(beta))
olscov = np.linalg.inv(X.T * X) * s2
#now do the non-linear least squares fit
mod = models.Linear1D(a, b)
fitter = LevMarLSQFitter()
fmod = fitter(mod, x, y)
assert_allclose(fmod.parameters, beta.A.ravel())
assert_allclose(olscov, fitter.fit_info['param_cov'])
@pytest.mark.skipif('not HAS_MOCK')
@pytest.mark.skipif('not HAS_PKG')
class TestEntryPoint(object):
"""Tests population of fitting with entry point fitters"""
def setup_class(self):
self.exception_not_thrown = Exception("The test should not have gotten here. There was no exception thrown")
def successfulimport(self):
# This should work
class goodclass(Fitter):
__name__ = "GoodClass"
return goodclass
def raiseimporterror(self):
# This should fail as it raises an Import Error
raise ImportError
def returnbadfunc(self):
def badfunc():
# This should import but it should fail type check
pass
return badfunc
def returnbadclass(self):
# This should import But it should fail subclass type check
class badclass(object):
pass
return badclass
def test_working(self):
"""This should work fine"""
mock_entry_working = mock.create_autospec(EntryPoint)
mock_entry_working.name = "Working"
mock_entry_working.load = self.successfulimport
populate_entry_points([mock_entry_working])
def test_import_error(self):
"""This raises an import error on load to test that it is handled correctly"""
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_importerror = mock.create_autospec(EntryPoint)
mock_entry_importerror.name = "IErr"
mock_entry_importerror.load = self.raiseimporterror
populate_entry_points([mock_entry_importerror])
except AstropyUserWarning as w:
if "ImportError" in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
def test_bad_func(self):
"""This returns a function which fails the type check"""
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_badfunc = mock.create_autospec(EntryPoint)
mock_entry_badfunc.name = "BadFunc"
mock_entry_badfunc.load = self.returnbadfunc
populate_entry_points([mock_entry_badfunc])
except AstropyUserWarning as w:
if "Class" in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
def test_bad_class(self):
"""This returns a class which doesn't inherient from fitter """
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_badclass = mock.create_autospec(EntryPoint)
mock_entry_badclass.name = "BadClass"
mock_entry_badclass.load = self.returnbadclass
populate_entry_points([mock_entry_badclass])
except AstropyUserWarning as w:
if 'modeling.Fitter' in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
@pytest.mark.skipif('not HAS_SCIPY')
class Test1DFittingWithOutlierRemoval(object):
def setup_class(self):
self.x = np.linspace(-5., 5., 200)
self.model_params = (3.0, 1.3, 0.8)
def func(p, x):
return p[0]*np.exp(-0.5*(x - p[1])**2/p[2]**2)
self.y = func(self.model_params, self.x)
def test_with_fitters_and_sigma_clip(self):
import scipy.stats as stats
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.x.shape)
self.y += (np.random.normal(0., 0.2, self.x.shape) +
c*np.random.normal(3.0, 5.0, self.x.shape))
g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
# test with Levenberg-Marquardt Least Squares fitter
fit = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
_, fitted_model = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
# test with Sequential Least Squares Programming fitter
fit = FittingWithOutlierRemoval(SLSQPLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
_, fitted_model = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
# test with Simplex LSQ fitter
fit = FittingWithOutlierRemoval(SimplexLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
_, fitted_model = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, atol=1e-1)
@pytest.mark.skipif('not HAS_SCIPY')
class Test2DFittingWithOutlierRemoval(object):
def setup_class(self):
self.y, self.x = np.mgrid[-3:3:128j, -3:3:128j]
self.model_params = (3.0, 1.0, 0.0, 0.8, 0.8)
def Gaussian_2D(p, pos):
return p[0]*np.exp(-0.5*(pos[0] - p[2])**2 / p[4]**2 -
0.5*(pos[1] - p[1])**2 / p[3]**2)
self.z = Gaussian_2D(self.model_params, np.array([self.y, self.x]))
def initial_guess(self, data, pos):
y = pos[0]
x = pos[1]
"""computes the centroid of the data as the initial guess for the
center position"""
wx = x * data
wy = y * data
total_intensity = np.sum(data)
x_mean = np.sum(wx) / total_intensity
y_mean = np.sum(wy) / total_intensity
x_to_pixel = x[0].size / (x[x[0].size - 1][x[0].size - 1] - x[0][0])
y_to_pixel = y[0].size / (y[y[0].size - 1][y[0].size - 1] - y[0][0])
x_pos = np.around(x_mean * x_to_pixel + x[0].size / 2.).astype(int)
y_pos = np.around(y_mean * y_to_pixel + y[0].size / 2.).astype(int)
amplitude = data[y_pos][x_pos]
return amplitude, x_mean, y_mean
def test_with_fitters_and_sigma_clip(self):
import scipy.stats as stats
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.z.shape)
self.z += (np.random.normal(0., 0.2, self.z.shape) +
c*np.random.normal(self.z, 2.0, self.z.shape))
guess = self.initial_guess(self.z, np.array([self.y, self.x]))
g2_init = models.Gaussian2D(amplitude=guess[0], x_mean=guess[1],
y_mean=guess[2], x_stddev=0.75,
y_stddev=1.25)
# test with Levenberg-Marquardt Least Squares fitter
fit = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.)
_, fitted_model = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
# test with Sequential Least Squares Programming fitter
fit = FittingWithOutlierRemoval(SLSQPLSQFitter(), sigma_clip, niter=3,
sigma=3.)
_, fitted_model = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
# test with Simplex LSQ fitter
fit = FittingWithOutlierRemoval(SimplexLSQFitter(), sigma_clip,
niter=3, sigma=3.)
_, fitted_model = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitters_with_weights():
"""Issue #5737 """
Xin, Yin = np.mgrid[0:21, 0:21]
fitter = LevMarLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
# Non-linear model
g2 = models.Gaussian2D(10, 10, 9, 2, 3)
z = g2(Xin, Yin)
gmod = fitter(models.Gaussian2D(15, 7, 8, 1.3, 1.2), Xin, Yin, z + zsig)
assert_allclose(gmod.parameters, g2.parameters, atol=10 ** (-2))
# Linear model
p2=models.Polynomial2D(3)
p2.parameters=np.arange(10)/1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig)
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitters_interface():
"""
Test that **kwargs work with all optimizers.
This is a basic smoke test.
"""
levmar = LevMarLSQFitter()
slsqp = SLSQPLSQFitter()
simplex = SimplexLSQFitter()
kwargs = {'maxiter': 77, 'verblevel': 1, 'epsilon': 1e-2, 'acc': 1e-6}
simplex_kwargs = {'maxiter': 77, 'verblevel': 1, 'acc': 1e-6}
model = models.Gaussian1D(10, 4, .3)
x = np.arange(21)
y = model(x)
slsqp_model = slsqp(model, x, y, **kwargs)
simplex_model = simplex(model, x, y, **simplex_kwargs)
kwargs.pop('verblevel')
lm_model = levmar(model, x, y, **kwargs)
|
tbabej/astropy
|
astropy/modeling/tests/test_fitters.py
|
Python
|
bsd-3-clause
| 24,041
|
[
"Gaussian"
] |
bfa6e594c186fba4f810d7e27c3b46522ef7b1d8c6af4358a1ca9bf44578e3b4
|
#!/bin/env python
#
# Copyright 2007-2009 Fedora Unity Project (http://fedoraunity.org)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from optparse import OptionParser
import os, sys
# Add a hack to allow running from a git clone.
# This needs to be removed before any public release.
#sys.path.append(os.getcwd())
import pyJigdo.base
from pyJigdo.constants import *
"""
pyJigdo interface - Jigdo at its finest
"""
from pyJigdo.translate import _, N_
class PyJigdo:
""" The main interface to configuring pyJigdo.
Providing runtime options and defaults for population
of a PyJigdoBase() object that will further setup objects
in preparation of creating a PyJigdoReactor() for downloading
all of the requested data. """
def __init__(self):
""" Parse runtime options and setup the PyJigdoBase(). """
self.parse_options()
self.base = pyJigdo.base.PyJigdoBase(self)
def parse_options(self):
epilog = """pyJigdo is a Fedora Unity product. """ + \
"""For more information about pyJigdo, visit http://pyjigdo.org/ """
description = "Python Interface to Jigdo."
usage = "Usage: %prog [options] jigdofile [jigdofile]"
try:
parser = OptionParser( epilog = epilog,
description = description,
version = "%prog " + PYJIGDO_VERSION,
usage = usage)
except TypeError:
parser = OptionParser()
##
## Generated Defaults
##
default_base_path = os.getcwd()
default_dest = default_base_path
default_work = os.path.join(default_base_path, 'pyjigdo-data')
default_logfile = os.path.join(default_base_path, 'pyjigdo.log')
default_fallback = 3
default_max_attempts = 6
default_timeout = 30
default_threads = 8
default_stuff_bits = default_threads*10
default_stuff_then_remove = False
default_jigdo_file_location = "/usr/bin/jigdo-file"
##
## Runtime Options
##
runtime_group = parser.add_option_group(_("Runtime Options"))
runtime_group.add_option( "--jigdo-file-bin",
dest = "jigdo_file_bin",
action = "store",
default = default_jigdo_file_location,
help = _("Use given jigdo-file binary. (Default: %s)" % default_jigdo_file_location))
runtime_group.add_option( "--list-images",
dest = "list_images",
action = "store_true",
default = False,
help = _("List available images for given Jigdo files and exit."))
##
## Logging Options
##
runtime_group.add_option( "-d", "--debug",
dest = "debug",
action = "store_true",
default = False,
help = _("Set debugging to on."))
runtime_group.add_option( "-v", "--verbose",
dest = "verbose",
action = "count",
default = 0,
help = _("Increase verbosity."))
runtime_group.add_option( "--logfile",
dest = "log_file",
action = "store",
default = default_logfile,
help = _("Logfile location. (Default: %s)" % default_logfile))
## Information Options
general_group = parser.add_option_group(_("General Options"))
general_group.add_option( "--info",
dest = "jigdo_info",
action = "store_true",
default = False,
help = _("Print information about the jigdo image and exit."))
general_group.add_option( "--fallback",
dest = "fallback_number",
action = "store",
default = default_fallback,
type = 'int',
help = _("Number of public mirrors to try before using a fallback mirror. (Default: %s)" % default_fallback),
metavar = _("[number of tries]"))
general_group.add_option( "--servers-only",
dest = "servers_only",
action = "store_true",
default = False,
help = _("Don't use mirrorlists, if present. (Default: False)"))
general_group.add_option( "--max-attempts",
dest = "max_download_attempts",
action = "store",
default = default_max_attempts,
type = 'int',
help = _("Max number of tries to get a file before giving up. (Default: %s)" % default_max_attempts),
metavar = _("[number of tries]"))
general_group.add_option( "-t", "--timeout",
dest = "download_timeout",
action = "store",
default = default_timeout,
type = 'float',
help = _("Number of seconds to wait before switching to different slice source. (Default: %s)" % default_timeout),
metavar = _("[number of seconds]"))
## Downloading Options
## Purpose: Allow a user to non-interactively download a defined image or images.
## This should include being able to download all images with one command.
## This is also for download options, like how many threads to use, to cache or not, etc.
download_group = parser.add_option_group(_("Download Options"))
download_group.add_option( "-n", "--image-numbers",
dest = "image_numbers",
default = [],
action = "append",
type = "str",
help = _("Download a given comma-separated list of image number(s) or range(s). e.g.: \"7,15,23,8-13\""),
metavar = _("[image numbers]"))
download_group.add_option( "-f", "--image-filenames",
dest = "image_filenames",
default = [],
action = "append",
type = "str",
help = _("Download a given comma-separated list of image filenames or file glob patterns. e.g.: \"*i386*CD*,foo.iso\""),
metavar = _("[image filenames]"))
download_group.add_option( "-a", "--all",
dest = "image_all",
action = "store_true",
default = False,
help = _("Download all images defined in jigdo. Same as -f \"*\""))
download_group.add_option( "--threads",
dest = "download_threads",
action = "store",
default = default_threads,
help = _("Number of threads to use when downloading. (Default: %s)" % default_threads),
type = "int",
metavar = _("[number]"))
download_group.add_option( "--stuff-bits",
dest = "download_stuff_bits",
action = "store",
default = default_stuff_bits,
help = _("Number of files to download before stuffing into ISO. (Default: %s)" % default_stuff_bits),
type = "int",
metavar = _("[number]"))
download_group.add_option( "--stuff-then-remove",
dest = "download_stuff_then_remove",
action = "store_true",
default = default_stuff_then_remove,
help = _("Remove downloaded data after stuffing into ISO. (Default: %s)" % default_stuff_then_remove))
download_group.add_option( "--download-storage",
dest = "download_storage",
action = "store",
default = default_work,
help = _("Directory to store any temporary data for downloads. (Default: %s)" % default_work),
metavar = _("[directory]"))
download_group.add_option( "--download-target",
dest = "download_target",
action = "store",
default = default_dest,
help = _("Directory to store final download data. (Default: %s)" % default_dest),
metavar = _("[directory]"))
# FIXME: We need to figure out a way to take a list of mirror sources to try for a given
# Jigdo key (as defined in the jigdo) and add then as slice sources (and allow them to be
# used exclusively/priority, as in the case of a local mirror.)
# Possible solution is to use an append action option and ask for something like:
# --local-mirror Updates-i386-key,http://ourserver/path/to/updates/
# We would then inject all the members specified into our pool for slice data.
#
## Scan Options
## Purpose: Allow a user to specify directories to scan for files, including pointing
## to existing ISO image(s)
#
scan_group = parser.add_option_group(_("Scan Options"))
scan_group.add_option( "-s", "--scan-dir",
dest = "scan_dirs",
action = "append",
type = "str",
default = [],
help = _("Scan given directory for files needed by selected image(s)."),
metavar = _("[directory]"))
scan_group.add_option( "--scan-iso",
dest = "scan_isos",
action = "append",
type = "str",
default = [],
help = _("Mount and then scan existing ISO images for files needed by selected image(s)."),
metavar = _("[iso image]"))
# Parse Options, preserve the object for later use
self.parser = parser
(self.cli_options, self.jigdo_files) = parser.parse_args()
if not self.check_options():
self.show_help()
print _("\nMissing required option!\n")
sys.exit(1)
def check_options(self):
""" Check if we have the bare minimum needed options. """
if not self.jigdo_files: return False
# FIXME: Don't restrict to just one source
# jigdo file. This is needed because we don't
# have a lockable UI yet.
if len(self.jigdo_files) > 1:
print _("Sorry, multiple jigdo files are not supported yet.")
print _("Soon we will support this!")
print _("Given files were:")
for f in self.jigdo_files:
print "\t%s" % f
if self.cli_options.image_filenames:
self.shell_escape_help()
sys.exit(1)
return True
def show_help(self):
""" Show the help, with a logo. """
print PYJIGDO_LOGO+"\n"
self.parser.print_help()
def shell_escape_help(self):
""" Explain about shell escaping. """
print _("\nYou have used -f and/or some sort of shell glob. (*)")
print _("Your shell expanded that option and did not correctly pass")
print _("it into pyjigdo. For example, if you selected all the DVD")
print _("images with '-f *DVD*' you need to select them with the")
print _("following:")
print _("\t-f \*DVD\*\n")
def run(self):
return self.base.run()
def abort(self):
""" Something has gone wrong. Try to shutdown and exit. """
# FIXME: Add proper return codes and shutdown procedures for
# the reactor and any other operations that might be running.
return 1
def done(self):
""" Make sure we are done and then exit. """
# FIXME: Add checks for any last minute things and exit.
# FIXME: Issue a report about what was done and what worked.
return 0
# If we are being run interactively,
# it's time to start up.
if __name__ == "__main__":
return_code = 0
pyJigdo_interface = PyJigdo()
pyJigdo_interface.base.run()
if pyJigdo_interface.base.async.pending_downloads:
pyJigdo_interface.base.async.checkpoint(None)
try:
return_code = pyJigdo_interface.base.async.reactor.run()
except KeyboardInterrupt:
print "\n\n"
pyJigdo_interface.base.log.status(_("Exiting on user request.\n"))
return_code = pyJigdo_interface.abort()
else:
pyJigdo_interface.base.log.critical(_("pyJigdo started with nothing to do!"))
sys.exit(return_code)
|
sanjayankur31/pyjigdo
|
pyjigdo_interface.py
|
Python
|
gpl-2.0
| 15,194
|
[
"VisIt"
] |
8ffbf54c917a9d22c0aae6c2fdc910d084dd148160036c52740217d3fe7d239d
|
""" This is a test of the chain
ReportsClient -> ReportsGeneratorHandler -> AccountingDB
It supposes that the DB is present, and that the service is running.
Also the service DataStore has to be up and running.
this is pytest!
"""
# pylint: disable=invalid-name,wrong-import-position
import datetime
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC import gLogger
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
gLogger.setLevel('DEBUG')
def createAccountingRecord():
accountingDict = {}
accountingDict['OperationType'] = 'putAndRegister'
accountingDict['User'] = 'system'
accountingDict['Protocol'] = 'DataManager'
accountingDict['RegistrationTime'] = 0.0
accountingDict['RegistrationOK'] = 0
accountingDict['RegistrationTotal'] = 0
accountingDict['Destination'] = 'se'
accountingDict['TransferTotal'] = 1
accountingDict['TransferOK'] = 1
accountingDict['TransferSize'] = 1
accountingDict['TransferTime'] = 0.0
accountingDict['FinalStatus'] = 'Successful'
accountingDict['Source'] = 'testSite'
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict(accountingDict)
return oDataOperation
def test_addAndRemove():
# just inserting one record
record = createAccountingRecord()
record.setStartTime()
record.setEndTime()
res = gDataStoreClient.addRegister(record)
assert res['OK']
res = gDataStoreClient.commit()
assert res['OK']
rc = ReportsClient()
res = rc.listReports('DataOperation')
assert res['OK']
res = rc.listUniqueKeyValues('DataOperation')
assert res['OK']
res = rc.getReport('DataOperation', 'Successful transfers',
datetime.datetime.utcnow(), datetime.datetime.utcnow(),
{}, 'Destination')
assert res['OK']
# now removing that record
res = gDataStoreClient.remove(record)
assert res['OK']
|
andresailer/DIRAC
|
tests/Integration/AccountingSystem/Test_ReportsClient.py
|
Python
|
gpl-3.0
| 2,062
|
[
"DIRAC"
] |
7bdfeb18a37d946caceb48074828a762ad86672c4093f80e89a533086d99643f
|
## Description: utility functions used while loading NeuroML L1,2,3 files.
## Version 1.0 by Aditya Gilra, NCBS, Bangalore, India, 2011 for serial MOOSE
## Version 1.5 by Niraj Dudani, NCBS, Bangalore, India, 2012, modified for parallel MOOSE
## Version 1.6 by Aditya Gilra, NCBS, Bangalore, India, 2012, minor changes for parallel MOOSE
"""
Some useful constants like namespaces are defined.
And they can be set in ElementTree root element via set_neuroml_namespaces_attribs(neuromlroot).
Use tweak_model(root_element, params) to exclude certain populations and projections
while still including certain others.
indent(...) is an in-place prettyprint formatter copied from http://effbot.org/zone/element-lib.htm
"""
from __future__ import print_function
from xml.etree import cElementTree as ET
from xml.etree import ElementTree as slowET
import math
import os
neuroml_debug = False
neuroml_ns='http://morphml.org/neuroml/schema'
nml_ns='http://morphml.org/networkml/schema'
mml_ns='http://morphml.org/morphml/schema'
bio_ns='http://morphml.org/biophysics/schema'
cml_ns='http://morphml.org/channelml/schema'
meta_ns='http://morphml.org/metadata/schema'
xsi_ns='http://www.w3.org/2001/XMLSchema-instance'
### ElementTree parse works an order of magnitude or more faster than minidom
### BUT it doesn't keep the original namespaces,
## from http://effbot.org/zone/element-namespaces.htm , I got _namespace_map
## neuroml_ns, bio_ns, mml_ns, etc are defined above
slowET._namespace_map[neuroml_ns] = 'neuroml'
slowET._namespace_map[nml_ns] = 'nml'
slowET._namespace_map[mml_ns] = 'mml'
slowET._namespace_map[bio_ns] = 'bio'
slowET._namespace_map[cml_ns] = 'cml'
slowET._namespace_map[meta_ns] = 'meta'
slowET._namespace_map[xsi_ns] = 'xsi'
### cElementTree is much faster than ElementTree and is API compatible with the latter,
### but instead of _namespace_map above, use register_namespace below ...
### but this works only with python2.7 onwards, so stick to above,
### with import elementtree.ElementTree alongwith importing cElementTree as at
### http://dev.blogs.nuxeo.com/2006/02/elementtree-serialization-namespace-prefixes.html
#ET.register_namespace('neuroml',neuroml_ns)
#ET.register_namespace('nml',nml_ns)
#ET.register_namespace('mml',mml_ns)
#ET.register_namespace('bio',bio_ns)
#ET.register_namespace('cml',cml_ns)
#ET.register_namespace('meta',meta_ns)
#ET.register_namespace('xsi',xsi_ns)
CELSIUS_default = 32.0 # deg C # default temperature if meta:property tag for temperature is not present
ZeroCKelvin = 273.15 # zero dec C in Kelvin
VMIN = -0.1 # Volts
VMAX = 0.1 # Volts
NDIVS = 200 # number
dv = ( VMAX - VMIN ) / NDIVS # Volts
def set_neuroml_namespaces_attribs(neuromlroot):
set_attrib_if_not_found(neuromlroot,"xmlns",neuroml_ns)
set_attrib_if_not_found(neuromlroot,"xmlns:nml",nml_ns)
set_attrib_if_not_found(neuromlroot,"xmlns:mml",mml_ns)
set_attrib_if_not_found(neuromlroot,"xmlns:bio",bio_ns)
set_attrib_if_not_found(neuromlroot,"xmlns:cml",cml_ns)
set_attrib_if_not_found(neuromlroot,"xmlns:meta",meta_ns)
## later doc.write() assigns the xsi namespace a second time
## causing double definition and problem with xsltproc,
## hence commenting it out here.
#set_attrib_if_not_found(neuromlroot,"xmlns:xsi",xsi_ns)
def set_attrib_if_not_found(elem, name, value):
if elem.get(name) is None:
elem.set(name,value)
def tweak_model(root_element, params):
if 'excludePopulations' in params: # if params has key 'excludePopulations'
## both excludePopulations and excludeProjections must be present together
pruneExcludes(root_element,params['excludePopulations'],params['excludeProjections'])
if 'onlyInclude' in params: # if params has key 'onlyInclude'
keepOnlyInclude(root_element,params['onlyInclude'])
def pruneExcludes(network, excludepops, excludeprojs):
"""
remove the populations in the excludepops list
remove the projections in the excludeprojs list
"""
populations = network.find(".//{"+nml_ns+"}populations")
pop_remove_list = []
for population in populations.findall(".//{"+nml_ns+"}population"):
populationname = population.attrib["name"]
## if any of the name-s in exclude_list are a SUBSTRING
## of the name of the neuroml population, mark it for removal
for name in excludepops:
if name in populationname: # substring
pop_remove_list.append(population)
## remove only unique names,
## else you could end up trying to remove same population twice
for population in set(pop_remove_list):
populations.remove(population)
projections = network.find(".//{"+nml_ns+"}projections")
proj_remove_list = []
for projection in projections.findall(".//{"+nml_ns+"}projection"):
projectionname = projection.attrib["name"]
## if any of the name-s in exclude_list are a SUBSTRING
## of the name of the neuroml projection, mark it for removal
for name in excludeprojs:
if name in projectionname: # substring
proj_remove_list.append(projection)
## remove only unique names,
## else you could end up trying to remove same projection twice
for projection in set(proj_remove_list):
projections.remove(projection)
def keepOnlyInclude(network, onlyInclude):
"""
Keep only the cells that are in onlyInclude['includePopulation']
and also keep cells that are connected to cells in onlyInclude['includePopulation']
and keep connections to any of the cells in onlyInclude['includePopulation'].
Prune the extraneous connections
but keep those connections in onlyInclude['includeProjections']
on cells connected to those in onlyInclude['includePopulation']
"""
### Remove the connections that do not connect to cells in onlyInclude.
### Simultaneously build up a list of cells 'includeCellsDict' that connect to cells in onlyInclude.
### Of course this includeCellDict must have the originally included cells!
### At the end of this pruning, even if some population-s / projection-s have no elements,
### it doesn't matter, as this findall() returns an empty list and not None - so no error.
### Further I am not changing the 'size' attrib in <instances> and <connections>,
### as it's not used by this reader and I'm not saving the network after pruning.
### Do not prune 'includeProjections' immediately;
### prune them later avoiding second order cells in includeCellsDict.
includepopname = onlyInclude['includePopulation'][0]
includecellids = onlyInclude['includePopulation'][1]
## first of all, include those primary cells that the user instructs.
includeCellsDict = {includepopname:includecellids}
## projections 'includeProjs' will be pruned later, keeping connections to second order cells.
includeProjs = []
print("removing obviously extra connections in ... ")
for projection in network.findall(".//{"+nml_ns+"}projection"):
projname = projection.attrib['name']
includeProj = False
## check if any of the given includeprojname is a substring of this projname
for includeprojname in onlyInclude['includeProjections']:
if includeprojname in projname:
includeProj = True
## if it is a substring, add this projection
## to the list of projections to be pruned later
if includeProj:
includeProjs.append(projection)
source = projection.attrib["source"]
target = projection.attrib["target"]
print(projname, source, target)
connections = projection.find(".//{"+nml_ns+"}connections")
if connections is not None:
for connection in connections.findall(".//{"+nml_ns+"}connection"):
pre_cell_id = connection.attrib['pre_cell_id']
## is the user-included cell a source cell of the connection?
includecellinsource = (pre_cell_id in includecellids and includepopname==source)
post_cell_id = connection.attrib['post_cell_id']
## is the user-included cell a target cell of the connection?
includecellintarget = (post_cell_id in includecellids and includepopname==target)
## the second-order cell connected to the user-included cell must also be kept
if includecellinsource:
## since source is included, include the target also
## there can be self connections between the same population i.e. same source and target
try:
includeCellsDict[target].append(post_cell_id)
except KeyError: # create this population entry in the dictionary if not present
includeCellsDict[target] = [post_cell_id]
elif includecellintarget:
## since target is included, include the source also, except if source is a file
if 'file' not in source:
try:
includeCellsDict[source].append(pre_cell_id)
except KeyError: # create this population entry in the dictionary if not present
includeCellsDict[source] = [pre_cell_id]
else:
## this connection is extraneous
## but remove this connection only if
## it is not part of the projections to be pruned later
if not includeProj:
connections.remove(connection)
## convert includeCellsDict elements to set-s rather than lists
## to have only unique cell_ids and save time below.
for key in includeCellsDict:
includeCellsDict[key] = set(includeCellsDict[key])
print("removing extra cells ... ")
### remove the cells that are not in includeCellsDict
populations = network.find(".//{"+nml_ns+"}populations")
for population in network.findall(".//{"+nml_ns+"}population"):
popname = population.attrib["name"]
if popname in includeCellsDict:
includecellids = includeCellsDict[popname]
instances = population.find(".//{"+nml_ns+"}instances")
for instance in instances.findall(".//{"+nml_ns+"}instance"):
## not a connected cell, so remove
if instance.attrib['id'] not in includecellids:
instances.remove(instance)
else: ## this whole population is not required!
populations.remove(population)
### Prune the 'includeProjections' that we skipped pruning before,
### while keeping connections to second order cells!
for projection in includeProjs:
print("removing projection",projection.attrib['name'],\
"keeping second-order connections.")
source = projection.attrib["source"]
target = projection.attrib["target"]
## boolean: True if includeCellsDict has key source
source_in_includeCellsDict = source in includeCellsDict
## boolean: True if the word 'file' occurs in str source
file_in_source = 'file' in source
## boolean: True if includeCellsDict has key target
target_in_includeCellsDict = target in includeCellsDict
connections = projection.find(".//{"+nml_ns+"}connections")
for connection in connections.findall(".//{"+nml_ns+"}connection"):
## is the included cell a source cell of the connection?
## keep 'file' as source also.
if file_in_source:
includecellinsource = True
elif source_in_includeCellsDict and \
connection.attrib['pre_cell_id'] in includeCellsDict[source]:
includecellinsource = True
else: includecellinsource = False
## is the included cell a target cell of the connection?
if target_in_includeCellsDict and \
connection.attrib['post_cell_id'] in includeCellsDict[target]:
includecellintarget = True
else: includecellintarget= False
## this connection is extraneous
## if either sourcecell or targetcell is not included.
if not includecellinsource or not includecellintarget:
## remove is a very slow operation!
connections.remove(connection)
def indent(elem, level=0):
""" in-place prettyprint formatter copied from http://effbot.org/zone/element-lib.htm
first call indent(root, level=0), and then doc.write(filename) ."""
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
## make a list of safe functions possible to be used safely in eval()
safe_list = ('acos', 'asin', 'atan', 'atan2', 'ceil',
'cos', 'cosh', 'degrees', 'e', 'exp', 'fabs', 'floor',
'fmod', 'frexp', 'hypot', 'ldexp', 'log', 'log10', 'modf',
'pi', 'pow', 'radians', 'sin', 'sinh', 'sqrt', 'tan', 'tanh')
## use the list to filter the local namespace
safe_dict = {k:getattr(math, k) for k in safe_list}
## add any needed builtins back in.
safe_dict['abs'] = abs
def find_first_file(name, path):
""" Finds and returns the first occurence of the filename in the directory tree under a given path.
If nothing is returned, return value defaults to None. """
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
|
subhacom/moose-core
|
python/moose/neuroml/utils.py
|
Python
|
gpl-3.0
| 13,912
|
[
"MOOSE"
] |
7562cb362440e51e4de8abc1d0109bc6f870e9fb697a269f43e5a2c045f65a89
|
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""this module contains some utilities to navigate in the tree or to
extract information from it
"""
__docformat__ = "restructuredtext en"
from logilab.astng.exceptions import ASTNGBuildingException
from logilab.astng.builder import parse
class ASTWalker:
"""a walker visiting a tree in preorder, calling on the handler:
* visit_<class name> on entering a node, where class name is the class of
the node in lower case
* leave_<class name> on leaving a node, where class name is the class of
the node in lower case
"""
def __init__(self, handler):
self.handler = handler
self._cache = {}
def walk(self, node, _done=None):
"""walk on the tree from <node>, getting callbacks from handler"""
if _done is None:
_done = set()
if node in _done:
raise AssertionError((id(node), node, node.parent))
_done.add(node)
self.visit(node)
for child_node in node.get_children():
self.handler.set_context(node, child_node)
assert child_node is not node
self.walk(child_node, _done)
self.leave(node)
assert node.parent is not node
def get_callbacks(self, node):
"""get callbacks from handler for the visited node"""
klass = node.__class__
methods = self._cache.get(klass)
if methods is None:
handler = self.handler
kid = klass.__name__.lower()
e_method = getattr(handler, 'visit_%s' % kid,
getattr(handler, 'visit_default', None))
l_method = getattr(handler, 'leave_%s' % kid,
getattr(handler, 'leave_default', None))
self._cache[klass] = (e_method, l_method)
else:
e_method, l_method = methods
return e_method, l_method
def visit(self, node):
"""walk on the tree from <node>, getting callbacks from handler"""
method = self.get_callbacks(node)[0]
if method is not None:
method(node)
def leave(self, node):
"""walk on the tree from <node>, getting callbacks from handler"""
method = self.get_callbacks(node)[1]
if method is not None:
method(node)
class LocalsVisitor(ASTWalker):
"""visit a project by traversing the locals dictionary"""
def __init__(self):
ASTWalker.__init__(self, self)
self._visited = {}
def visit(self, node):
"""launch the visit starting from the given node"""
if node in self._visited:
return
self._visited[node] = 1 # FIXME: use set ?
methods = self.get_callbacks(node)
if methods[0] is not None:
methods[0](node)
if 'locals' in node.__dict__: # skip Instance and other proxy
for name, local_node in list(node.items()):
self.visit(local_node)
if methods[1] is not None:
return methods[1](node)
def _check_children(node):
"""a helper function to check children - parent relations"""
for child in node.get_children():
ok = False
if child is None:
print("Hm, child of %s is None" % node)
continue
if not hasattr(child, 'parent'):
print(" ERROR: %s has child %s %x with no parent" % (node, child, id(child)))
elif not child.parent:
print(" ERROR: %s has child %s %x with parent %r" % (node, child, id(child), child.parent))
elif child.parent is not node:
print(" ERROR: %s %x has child %s %x with wrong parent %s" % (node,
id(node), child, id(child), child.parent))
else:
ok = True
if not ok:
print("lines;", node.lineno, child.lineno)
print("of module", node.root(), node.root().name)
raise ASTNGBuildingException
_check_children(child)
class TreeTester(object):
'''A helper class to see _ast tree and compare with astng tree
indent: string for tree indent representation
lineno: bool to tell if we should print the line numbers
>>> tester = TreeTester('print')
>>> print tester.native_tree_repr()
<Module>
. body = [
. <Print>
. . nl = True
. ]
>>> print tester.astng_tree_repr()
Module()
body = [
Print()
dest =
values = [
]
]
'''
indent = '. '
lineno = False
def __init__(self, sourcecode):
self._string = ''
self.sourcecode = sourcecode
self._ast_node = None
self.build_ast()
def build_ast(self):
"""build the _ast tree from the source code"""
self._ast_node = parse(self.sourcecode)
def native_tree_repr(self, node=None, indent=''):
"""get a nice representation of the _ast tree"""
self._string = ''
if node is None:
node = self._ast_node
self._native_repr_tree(node, indent)
return self._string
def _native_repr_tree(self, node, indent, _done=None):
"""recursive method for the native tree representation"""
from _ast import Load as _Load, Store as _Store, Del as _Del
from _ast import AST as Node
if _done is None:
_done = set()
if node in _done:
self._string += '\nloop in tree: %r (%s)' % (node,
getattr(node, 'lineno', None))
return
_done.add(node)
self._string += '\n' + indent + '<%s>' % node.__class__.__name__
indent += self.indent
if not hasattr(node, '__dict__'):
self._string += '\n' + self.indent + " ** node has no __dict__ " + str(node)
return
node_dict = node.__dict__
if hasattr(node, '_attributes'):
for a in node._attributes:
attr = node_dict[a]
if attr is None:
continue
if a in ("lineno", "col_offset") and not self.lineno:
continue
self._string +='\n' + indent + a + " = " + repr(attr)
for field in node._fields or ():
attr = node_dict[field]
if attr is None:
continue
if isinstance(attr, list):
if not attr:
continue
self._string += '\n' + indent + field + ' = ['
for elt in attr:
self._native_repr_tree(elt, indent, _done)
self._string += '\n' + indent + ']'
continue
if isinstance(attr, (_Load, _Store, _Del)):
continue
if isinstance(attr, Node):
self._string += '\n' + indent + field + " = "
self._native_repr_tree(attr, indent, _done)
else:
self._string += '\n' + indent + field + " = " + repr(attr)
def build_astng_tree(self):
"""build astng tree from the _ast tree
"""
from logilab.astng.builder import ASTNGBuilder
tree = ASTNGBuilder().string_build(self.sourcecode)
return tree
def astng_tree_repr(self, ids=False):
"""build the astng tree and return a nice tree representation"""
mod = self.build_astng_tree()
return mod.repr_tree(ids)
__all__ = ('LocalsVisitor', 'ASTWalker',)
|
tlksio/tlksio
|
env/lib/python3.4/site-packages/logilab/astng/utils.py
|
Python
|
mit
| 8,300
|
[
"VisIt"
] |
74f2b87163f42619d6eb1f6775c25199da9ed61e762a84ac3e107c813cb9fb4c
|
from ..utils import *
##
# Minions
# Dalaran Aspirant
class AT_006:
inspire = Buff(SELF, "AT_006e")
AT_006e = buff(spellpower=1)
# Spellslinger
class AT_007:
play = Give(ALL_PLAYERS, RandomSpell())
# Coldarra Drake
class AT_008:
update = Refresh(FRIENDLY_HERO_POWER, {GameTag.HEROPOWER_ADDITIONAL_ACTIVATIONS: SET(-1)})
# Rhonin
class AT_009:
deathrattle = Give(CONTROLLER, "EX1_277") * 3
##
# Spells
# Flame Lance
class AT_001:
play = Hit(TARGET, 8)
# Arcane Blast
class AT_004:
play = Hit(TARGET, 2)
# Polymorph: Boar
class AT_005:
play = Morph(TARGET, "AT_005t")
##
# Secrets
# Effigy
class AT_002:
secret = Death(FRIENDLY + MINION).on(FULL_BOARD | (
Reveal(SELF),
Summon(CONTROLLER, RandomMinion(cost=COST(Death.ENTITY)))
))
|
smallnamespace/fireplace
|
fireplace/cards/tgt/mage.py
|
Python
|
agpl-3.0
| 762
|
[
"BLAST"
] |
2cf1613b6e3d89e99113281da8d6b0aefbcf62add4f8d8b30dd352f0a3ff9c83
|
"""Some test functions for bivariate interpolation.
Most of these have been yoinked from ACM TOMS 792.
http://netlib.org/toms/792
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
import numpy as np
from .triangulate import Triangulation
class TestData(dict):
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
self.__dict__ = self
class TestDataSet(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
data = TestData(
franke100=TestDataSet(
x=np.array([0.0227035, 0.0539888, 0.0217008, 0.0175129, 0.0019029,
-0.0509685, 0.0395408, -0.0487061, 0.0315828, -0.0418785,
0.1324189, 0.1090271, 0.1254439, 0.093454, 0.0767578,
0.1451874, 0.0626494, 0.1452734, 0.0958668, 0.0695559,
0.2645602, 0.2391645, 0.208899, 0.2767329, 0.1714726,
0.2266781, 0.1909212, 0.1867647, 0.2304634, 0.2426219,
0.3663168, 0.3857662, 0.3832392, 0.3179087, 0.3466321,
0.3776591, 0.3873159, 0.3812917, 0.3795364, 0.2803515,
0.4149771, 0.4277679, 0.420001, 0.4663631, 0.4855658,
0.4092026, 0.4792578, 0.4812279, 0.3977761, 0.4027321,
0.5848691, 0.5730076, 0.6063893, 0.5013894, 0.5741311,
0.6106955, 0.5990105, 0.5380621, 0.6096967, 0.5026188,
0.6616928, 0.6427836, 0.6396475, 0.6703963, 0.7001181,
0.633359, 0.6908947, 0.6895638, 0.6718889, 0.6837675,
0.7736939, 0.7635332, 0.7410424, 0.8258981, 0.7306034,
0.8086609, 0.8214531, 0.729064, 0.8076643, 0.8170951,
0.8424572, 0.8684053, 0.8366923, 0.9418461, 0.8478122,
0.8599583, 0.91757, 0.8596328, 0.9279871, 0.8512805,
1.044982, 0.9670631, 0.9857884, 0.9676313, 1.0129299,
0.965704, 1.0019855, 1.0359297, 1.0414677, 0.9471506]),
y=np.array([-0.0310206, 0.1586742, 0.2576924, 0.3414014, 0.4943596,
0.5782854, 0.6993418, 0.7470194, 0.9107649, 0.996289,
0.050133, 0.0918555, 0.2592973, 0.3381592, 0.4171125,
0.5615563, 0.6552235, 0.7524066, 0.9146523, 0.9632421,
0.0292939, 0.0602303, 0.2668783, 0.3696044, 0.4801738,
0.5940595, 0.6878797, 0.8185576, 0.9046507, 0.9805412,
0.0396955, 0.0684484, 0.2389548, 0.3124129, 0.4902989,
0.5199303, 0.6445227, 0.8203789, 0.8938079, 0.9711719,
-0.0284618, 0.1560965, 0.2262471, 0.3175094, 0.3891417,
0.5084949, 0.6324247, 0.7511007, 0.8489712, 0.9978728,
-0.0271948, 0.127243, 0.2709269, 0.3477728, 0.4259422,
0.6084711, 0.6733781, 0.7235242, 0.9242411, 1.0308762,
0.0255959, 0.0707835, 0.2008336, 0.3259843, 0.4890704,
0.5096324, 0.669788, 0.7759569, 0.9366096, 1.0064516,
0.0285374, 0.1021403, 0.1936581, 0.3235775, 0.4714228,
0.6091595, 0.6685053, 0.8022808, 0.847679, 1.0512371,
0.0380499, 0.0902048, 0.2083092, 0.3318491, 0.4335632,
0.5910139, 0.6307383, 0.8144841, 0.904231, 0.969603,
-0.01209, 0.1334114, 0.2695844, 0.3795281, 0.4396054,
0.5044425, 0.6941519, 0.7459923, 0.8682081, 0.9801409])),
franke33=TestDataSet(
x=np.array([5.00000000e-02, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 1.00000000e-01, 1.00000000e-01,
1.50000000e-01, 2.00000000e-01, 2.50000000e-01,
3.00000000e-01, 3.50000000e-01, 5.00000000e-01,
5.00000000e-01, 5.50000000e-01, 6.00000000e-01,
6.00000000e-01, 6.00000000e-01, 6.50000000e-01,
7.00000000e-01, 7.00000000e-01, 7.00000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
8.00000000e-01, 8.00000000e-01, 8.50000000e-01,
9.00000000e-01, 9.00000000e-01, 9.50000000e-01,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00]),
y=np.array([4.50000000e-01, 5.00000000e-01, 1.00000000e+00,
0.00000000e+00, 1.50000000e-01, 7.50000000e-01,
3.00000000e-01, 1.00000000e-01, 2.00000000e-01,
3.50000000e-01, 8.50000000e-01, 0.00000000e+00,
1.00000000e+00, 9.50000000e-01, 2.50000000e-01,
6.50000000e-01, 8.50000000e-01, 7.00000000e-01,
2.00000000e-01, 6.50000000e-01, 9.00000000e-01,
1.00000000e-01, 3.50000000e-01, 8.50000000e-01,
4.00000000e-01, 6.50000000e-01, 2.50000000e-01,
3.50000000e-01, 8.00000000e-01, 9.00000000e-01,
0.00000000e+00, 5.00000000e-01, 1.00000000e+00])),
lawson25=TestDataSet(
x=np.array([0.1375, 0.9125, 0.7125, 0.225, -0.05, 0.475, 0.05,
0.45, 1.0875, 0.5375, -0.0375, 0.1875, 0.7125, 0.85,
0.7, 0.275, 0.45, 0.8125, 0.45, 1., 0.5,
0.1875, 0.5875, 1.05, 0.1]),
y=np.array([0.975, 0.9875, 0.7625, 0.8375, 0.4125, 0.6375,
-0.05, 1.0375, 0.55, 0.8, 0.75, 0.575,
0.55, 0.4375, 0.3125, 0.425, 0.2875, 0.1875,
-0.0375, 0.2625, 0.4625, 0.2625, 0.125, -0.06125,
0.1125])),
random100=TestDataSet(
x=np.array([0.0096326, 0.0216348, 0.029836, 0.0417447, 0.0470462,
0.0562965, 0.0646857, 0.0740377, 0.0873907, 0.0934832,
0.1032216, 0.1110176, 0.1181193, 0.1251704, 0.132733,
0.1439536, 0.1564861, 0.1651043, 0.1786039, 0.1886405,
0.2016706, 0.2099886, 0.2147003, 0.2204141, 0.2343715,
0.240966, 0.252774, 0.2570839, 0.2733365, 0.2853833,
0.2901755, 0.2964854, 0.3019725, 0.3125695, 0.3307163,
0.3378504, 0.3439061, 0.3529922, 0.3635507, 0.3766172,
0.3822429, 0.3869838, 0.3973137, 0.4170708, 0.4255588,
0.4299218, 0.4372839, 0.4705033, 0.4736655, 0.4879299,
0.494026, 0.5055324, 0.5162593, 0.5219219, 0.5348529,
0.5483213, 0.5569571, 0.5638611, 0.5784908, 0.586395,
0.5929148, 0.5987839, 0.6117561, 0.6252296, 0.6331381,
0.6399048, 0.6488972, 0.6558537, 0.6677405, 0.6814074,
0.6887812, 0.6940896, 0.7061687, 0.7160957, 0.7317445,
0.7370798, 0.746203, 0.7566957, 0.7699998, 0.7879347,
0.7944014, 0.8164468, 0.8192794, 0.8368405, 0.8500993,
0.8588255, 0.8646496, 0.8792329, 0.8837536, 0.8900077,
0.8969894, 0.9044917, 0.9083947, 0.9203972, 0.9347906,
0.9434519, 0.9490328, 0.9569571, 0.9772067, 0.9983493]),
y=np.array([0.3083158, 0.2450434, 0.8613847, 0.0977864, 0.3648355,
0.7156339, 0.5311312, 0.9755672, 0.1781117, 0.5452797,
0.1603881, 0.7837139, 0.9982015, 0.6910589, 0.104958,
0.8184662, 0.7086405, 0.4456593, 0.1178342, 0.3189021,
0.9668446, 0.7571834, 0.2016598, 0.3232444, 0.4368583,
0.8907869, 0.064726, 0.5692618, 0.2947027, 0.4332426,
0.3347464, 0.7436284, 0.1066265, 0.8845357, 0.515873,
0.9425637, 0.4799701, 0.1783069, 0.114676, 0.8225797,
0.2270688, 0.4073598, 0.887508, 0.7631616, 0.9972804,
0.4959884, 0.3410421, 0.249812, 0.6409007, 0.105869,
0.5411969, 0.0089792, 0.8784268, 0.5515874, 0.4038952,
0.1654023, 0.2965158, 0.3660356, 0.0366554, 0.950242,
0.2638101, 0.9277386, 0.5377694, 0.7374676, 0.4674627,
0.9186109, 0.0416884, 0.1291029, 0.6763676, 0.8444238,
0.3273328, 0.1893879, 0.0645923, 0.0180147, 0.8904992,
0.4160648, 0.4688995, 0.2174508, 0.5734231, 0.8853319,
0.8018436, 0.6388941, 0.8931002, 0.1000558, 0.2789506,
0.9082948, 0.3259159, 0.8318747, 0.0508513, 0.970845,
0.5120548, 0.2859716, 0.9581641, 0.6183429, 0.3779934,
0.4010423, 0.9478657, 0.7425486, 0.8883287, 0.549675])),
uniform9=TestDataSet(
x=np.array([1.25000000e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 1.25000000e-01, 1.25000000e-01,
1.25000000e-01, 1.25000000e-01, 1.25000000e-01,
1.25000000e-01, 1.25000000e-01, 1.25000000e-01,
2.50000000e-01, 2.50000000e-01, 2.50000000e-01,
2.50000000e-01, 2.50000000e-01, 2.50000000e-01,
2.50000000e-01, 2.50000000e-01, 2.50000000e-01,
3.75000000e-01, 3.75000000e-01, 3.75000000e-01,
3.75000000e-01, 3.75000000e-01, 3.75000000e-01,
3.75000000e-01, 3.75000000e-01, 3.75000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
6.25000000e-01, 6.25000000e-01, 6.25000000e-01,
6.25000000e-01, 6.25000000e-01, 6.25000000e-01,
6.25000000e-01, 6.25000000e-01, 6.25000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
8.75000000e-01, 8.75000000e-01, 8.75000000e-01,
8.75000000e-01, 8.75000000e-01, 8.75000000e-01,
8.75000000e-01, 8.75000000e-01, 8.75000000e-01,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00]),
y=np.array([0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00])),
)
def constant(x, y):
return np.ones(x.shape, x.dtype)
constant.title = 'Constant'
def xramp(x, y):
return x
xramp.title = 'X Ramp'
def yramp(x, y):
return y
yramp.title = 'Y Ramp'
def exponential(x, y):
x = x * 9
y = y * 9
x1 = x + 1.0
x2 = x - 2.0
x4 = x - 4.0
x7 = x - 7.0
y1 = x + 1.0
y2 = y - 2.0
y3 = y - 3.0
y7 = y - 7.0
f = (0.75 * np.exp(-(x2 * x2 + y2 * y2) / 4.0) +
0.75 * np.exp(-x1 * x1 / 49.0 - y1 / 10.0) +
0.5 * np.exp(-(x7 * x7 + y3 * y3) / 4.0) -
0.2 * np.exp(-x4 * x4 - y7 * y7))
return f
exponential.title = 'Exponential and Some Gaussians'
def cliff(x, y):
f = np.tanh(9.0 * (y - x) + 1.0) / 9.0
return f
cliff.title = 'Cliff'
def saddle(x, y):
f = (1.25 + np.cos(5.4 * y)) / (6.0 + 6.0 * (3 * x - 1.0) ** 2)
return f
saddle.title = 'Saddle'
def gentle(x, y):
f = np.exp(-5.0625 * ((x - 0.5) ** 2 + (y - 0.5) ** 2)) / 3.0
return f
gentle.title = 'Gentle Peak'
def steep(x, y):
f = np.exp(-20.25 * ((x - 0.5) ** 2 + (y - 0.5) ** 2)) / 3.0
return f
steep.title = 'Steep Peak'
def sphere(x, y):
circle = 64 - 81 * ((x - 0.5) ** 2 + (y - 0.5) ** 2)
f = np.where(circle >= 0, np.sqrt(np.clip(circle, 0, 100)) - 0.5, 0.0)
return f
sphere.title = 'Sphere'
def trig(x, y):
f = 2.0 * np.cos(10.0 * x) * np.sin(10.0 * y) + np.sin(10.0 * x * y)
return f
trig.title = 'Cosines and Sines'
def gauss(x, y):
x = 5.0 - 10.0 * x
y = 5.0 - 10.0 * y
g1 = np.exp(-x * x / 2)
g2 = np.exp(-y * y / 2)
f = g1 + 0.75 * g2 * (1 + g1)
return f
gauss.title = 'Gaussian Peak and Gaussian Ridges'
def cloverleaf(x, y):
ex = np.exp((10.0 - 20.0 * x) / 3.0)
ey = np.exp((10.0 - 20.0 * y) / 3.0)
logitx = 1.0 / (1.0 + ex)
logity = 1.0 / (1.0 + ey)
f = (((20.0 / 3.0) ** 3 * ex * ey) ** 2 * (logitx * logity) ** 5 *
(ex - 2.0 * logitx) * (ey - 2.0 * logity))
return f
cloverleaf.title = 'Cloverleaf'
def cosine_peak(x, y):
circle = np.hypot(80 * x - 40.0, 90 * y - 45.)
f = np.exp(-0.04 * circle) * np.cos(0.15 * circle)
return f
cosine_peak.title = 'Cosine Peak'
allfuncs = [exponential, cliff, saddle, gentle, steep, sphere, trig, gauss,
cloverleaf, cosine_peak]
class LinearTester(object):
name = 'Linear'
def __init__(self, xrange=(0.0, 1.0), yrange=(0.0, 1.0),
nrange=101, npoints=250):
self.xrange = xrange
self.yrange = yrange
self.nrange = nrange
self.npoints = npoints
rng = np.random.RandomState(1234567890)
self.x = rng.uniform(xrange[0], xrange[1], size=npoints)
self.y = rng.uniform(yrange[0], yrange[1], size=npoints)
self.tri = Triangulation(self.x, self.y)
def replace_data(self, dataset):
self.x = dataset.x
self.y = dataset.y
self.tri = Triangulation(self.x, self.y)
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.linear_extrapolator(z, bbox=self.xrange + self.yrange)
def plot(self, func, interp=True, plotter='imshow'):
import matplotlib as mpl
from matplotlib import pylab as pl
if interp:
lpi = self.interpolator(func)
z = lpi[self.yrange[0]:self.yrange[1]:complex(0, self.nrange),
self.xrange[0]:self.xrange[1]:complex(0, self.nrange)]
else:
y, x = np.mgrid[
self.yrange[0]:self.yrange[1]:complex(0, self.nrange),
self.xrange[0]:self.xrange[1]:complex(0, self.nrange)]
z = func(x, y)
z = np.where(np.isinf(z), 0.0, z)
extent = (self.xrange[0], self.xrange[1],
self.yrange[0], self.yrange[1])
pl.ioff()
pl.clf()
pl.hot() # Some like it hot
if plotter == 'imshow':
pl.imshow(np.nan_to_num(z), interpolation='nearest', extent=extent,
origin='lower')
elif plotter == 'contour':
Y, X = np.ogrid[
self.yrange[0]:self.yrange[1]:complex(0, self.nrange),
self.xrange[0]:self.xrange[1]:complex(0, self.nrange)]
pl.contour(np.ravel(X), np.ravel(Y), z, 20)
x = self.x
y = self.y
lc = mpl.collections.LineCollection(
np.array([((x[i], y[i]), (x[j], y[j]))
for i, j in self.tri.edge_db]),
colors=[(0, 0, 0, 0.2)])
ax = pl.gca()
ax.add_collection(lc)
if interp:
title = '%s Interpolant' % self.name
else:
title = 'Reference'
if hasattr(func, 'title'):
pl.title('%s: %s' % (func.title, title))
else:
pl.title(title)
pl.show()
pl.ion()
class NNTester(LinearTester):
name = 'Natural Neighbors'
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.nn_extrapolator(z, bbox=self.xrange + self.yrange)
def plotallfuncs(allfuncs=allfuncs):
from matplotlib import pylab as pl
pl.ioff()
nnt = NNTester(npoints=1000)
lpt = LinearTester(npoints=1000)
for func in allfuncs:
print(func.title)
nnt.plot(func, interp=False, plotter='imshow')
pl.savefig('%s-ref-img.png' % func.__name__)
nnt.plot(func, interp=True, plotter='imshow')
pl.savefig('%s-nn-img.png' % func.__name__)
lpt.plot(func, interp=True, plotter='imshow')
pl.savefig('%s-lin-img.png' % func.__name__)
nnt.plot(func, interp=False, plotter='contour')
pl.savefig('%s-ref-con.png' % func.__name__)
nnt.plot(func, interp=True, plotter='contour')
pl.savefig('%s-nn-con.png' % func.__name__)
lpt.plot(func, interp=True, plotter='contour')
pl.savefig('%s-lin-con.png' % func.__name__)
pl.ion()
def plot_dt(tri, colors=None):
import matplotlib as mpl
from matplotlib import pylab as pl
if colors is None:
colors = [(0, 0, 0, 0.2)]
lc = mpl.collections.LineCollection(
np.array([((tri.x[i], tri.y[i]), (tri.x[j], tri.y[j]))
for i, j in tri.edge_db]),
colors=colors)
ax = pl.gca()
ax.add_collection(lc)
pl.draw_if_interactive()
def plot_vo(tri, colors=None):
import matplotlib as mpl
from matplotlib import pylab as pl
if colors is None:
colors = [(0, 1, 0, 0.2)]
lc = mpl.collections.LineCollection(np.array(
[(tri.circumcenters[i], tri.circumcenters[j])
for i in xrange(len(tri.circumcenters))
for j in tri.triangle_neighbors[i] if j != -1]),
colors=colors)
ax = pl.gca()
ax.add_collection(lc)
pl.draw_if_interactive()
def plot_cc(tri, edgecolor=None):
import matplotlib as mpl
from matplotlib import pylab as pl
if edgecolor is None:
edgecolor = (0, 0, 1, 0.2)
dxy = (np.array([(tri.x[i], tri.y[i]) for i, j, k in tri.triangle_nodes])
- tri.circumcenters)
r = np.hypot(dxy[:, 0], dxy[:, 1])
ax = pl.gca()
for i in xrange(len(r)):
p = mpl.patches.Circle(tri.circumcenters[i], r[i],
resolution=100, edgecolor=edgecolor,
facecolor=(1, 1, 1, 0), linewidth=0.2)
ax.add_patch(p)
pl.draw_if_interactive()
def quality(func, mesh, interpolator='nn', n=33):
"""Compute a quality factor (the quantity r**2 from TOMS792).
interpolator must be in ('linear', 'nn').
"""
fz = func(mesh.x, mesh.y)
tri = Triangulation(mesh.x, mesh.y)
intp = getattr(tri,
interpolator + '_extrapolator')(fz, bbox=(0., 1., 0., 1.))
Y, X = np.mgrid[0:1:complex(0, n), 0:1:complex(0, n)]
Z = func(X, Y)
iz = intp[0:1:complex(0, n), 0:1:complex(0, n)]
#nans = np.isnan(iz)
#numgood = n*n - np.sum(np.array(nans.flat, np.int32))
numgood = n * n
SE = (Z - iz) ** 2
SSE = np.sum(SE.flat)
meanZ = np.sum(Z.flat) / numgood
SM = (Z - meanZ) ** 2
SSM = np.sum(SM.flat)
r2 = 1.0 - SSE / SSM
print(func.__name__, r2, SSE, SSM, numgood)
return r2
def allquality(interpolator='nn', allfuncs=allfuncs, data=data, n=33):
results = {}
kv = list(six.iteritems(data))
kv.sort()
for name, mesh in kv:
reslist = results.setdefault(name, [])
for func in allfuncs:
reslist.append(quality(func, mesh, interpolator, n))
return results
def funky():
x0 = np.array([0.25, 0.3, 0.5, 0.6, 0.6])
y0 = np.array([0.2, 0.35, 0.0, 0.25, 0.65])
tx = 0.46
ty = 0.23
t0 = Triangulation(x0, y0)
t1 = Triangulation(np.hstack((x0, [tx])), np.hstack((y0, [ty])))
return t0, t1
|
yuanagain/seniorthesis
|
venv/lib/python2.7/site-packages/matplotlib/delaunay/testfuncs.py
|
Python
|
mit
| 21,215
|
[
"Gaussian"
] |
47a9c92cd2d67a970eeca6c953f477f72a6349d6eb4eef95c2c0c9f801615577
|
# -*- coding: utf-8 -*-
#
# INDRA documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 23 16:42:17 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import mock
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('ext'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinxarg.ext',
'sphinx.ext.autosummary',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'citations'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'INDRA'
copyright = u'2018, B. M. Gyori, J. A. Bachman'
author = u'B. M. Gyori, J. A. Bachman'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.12'
# The full version, including alpha/beta/rc tags.
release = '1.12.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'indradoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'indra.tex', u'INDRA Documentation',
u'B. M. Gyori, J. A. Bachman', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'INDRA', u'INDRA Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for numpydoc ------------------------------------------------------
numpydoc_show_class_members = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'INDRA', u'INDRA Documentation',
author, 'INDRA', 'Integrated Network and Dynamical Reasoning Assembler.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Mock out some problematic modules-------------------------------------
# Note that for sub-modules, all parent modules must be listed explicitly.
MOCK_MODULES = [
'pybel', 'pybel.constants', 'pybel.struct', 'pybel.canonicalize',
'pybel.language', 'pybel.dsl', 'pybel.resources',
'pybel.resources.definitions',
'pybel.resources.definitions.definitions',
'pygraphviz', 'jnius', 'jnius_config', 'flask',
'networkx.drawing.nx_agraph',
'objectpath', 'lxml', 'lxml.etree', 'lxml.builder',
'networkx', 'networkx.algorithms', 'networkx.algorithms.dag',
'networkx.drawing', 'networkx.algorithms.isomorphism',
'networkx.algorithms.isomorphism.vf2userfunc',
'functools32', 'ndex2', 'ndex2.client', 'ndex2.nice_cx_network',
'nltk', 'kappy', 'openpyxl', 'reportlab', 'reportlab.lib', 'reportlab.lib.enums',
'reportlab.lib.pagesizes', 'reportlab.platypus', 'reportlab.lib.styles',
'reportlab.lib.units'
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.MagicMock()
# jnius_config needs a little extra hacking to avoid producing a warning
# from our code that uses it.
jnius_config = sys.modules['jnius_config']
jnius_config.vm_running = False
|
pvtodorov/indra
|
doc/conf.py
|
Python
|
bsd-2-clause
| 10,872
|
[
"Pybel"
] |
986f3c6fa1aa6540efc5d8192135c321f26910c830a12ab100f0537fdb5aadb8
|
"""
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readibility of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <gael.varoquaux@inria.fr>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
|
thilbern/scikit-learn
|
examples/covariance/plot_sparse_cov.py
|
Python
|
bsd-3-clause
| 5,078
|
[
"Gaussian"
] |
9281d9897a4020a86fd21c27a692b5eb9407e483b468995c3f4032a2ed708d47
|
"""Guess the MIME type of a file.
This module defines two useful functions:
guess_type(url, strict=1) -- guess the MIME type and encoding of a URL.
guess_extension(type, strict=1) -- guess the extension for a given MIME type.
It also contains the following, for tuning the behavior:
Data:
knownfiles -- list of files to parse
inited -- flag set when init() has been called
suffix_map -- dictionary mapping suffixes to suffixes
encodings_map -- dictionary mapping suffixes to encodings
types_map -- dictionary mapping suffixes to types
Functions:
init([files]) -- parse a list of files, default knownfiles (on Windows, the
default values are taken from the registry)
read_mime_types(file) -- parse one file, return a dictionary or None
"""
import os
import sys
import posixpath
import urllib
try:
import _winreg
except ImportError:
_winreg = None
__all__ = [
"guess_type","guess_extension","guess_all_extensions",
"add_type","read_mime_types","init"
]
knownfiles = [
"/etc/mime.types",
"/etc/httpd/mime.types", # Mac OS X
"/etc/httpd/conf/mime.types", # Apache
"/etc/apache/mime.types", # Apache 1
"/etc/apache2/mime.types", # Apache 2
"/usr/local/etc/httpd/conf/mime.types",
"/usr/local/lib/netscape/mime.types",
"/usr/local/etc/httpd/conf/mime.types", # Apache 1.2
"/usr/local/etc/mime.types", # Apache 1.3
]
inited = False
_db = None
class MimeTypes:
"""MIME-types datastore.
This datastore can handle information from mime.types-style files
and supports basic determination of MIME type from a filename or
URL, and can guess a reasonable extension given a MIME type.
"""
def __init__(self, filenames=(), strict=True):
if not inited:
init()
self.encodings_map = encodings_map.copy()
self.suffix_map = suffix_map.copy()
self.types_map = ({}, {}) # dict for (non-strict, strict)
self.types_map_inv = ({}, {})
for (ext, type) in types_map.items():
self.add_type(type, ext, True)
for (ext, type) in common_types.items():
self.add_type(type, ext, False)
for name in filenames:
self.read(name, strict)
def add_type(self, type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
self.types_map[strict][ext] = type
exts = self.types_map_inv[strict].setdefault(type, [])
if ext not in exts:
exts.append(ext)
def guess_type(self, url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if
the type can't be guessed (no or unknown suffix) or a string
of the form type/subtype, usable for a MIME Content-type
header; and encoding is None for no encoding or the name of
the program used to encode (e.g. compress or gzip). The
mappings are table driven. Encoding suffixes are case
sensitive; type suffixes are first tried case sensitive, then
case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all
mapped to '.tar.gz'. (This is table-driven too, using the
dictionary suffix_map.)
Optional `strict' argument when False adds a bunch of commonly found,
but non-standard types.
"""
scheme, url = urllib.splittype(url)
if scheme == 'data':
# syntax of data URLs:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
# type/subtype defaults to "text/plain"
comma = url.find(',')
if comma < 0:
# bad data URL
return None, None
semi = url.find(';', 0, comma)
if semi >= 0:
type = url[:semi]
else:
type = url[:comma]
if '=' in type or '/' not in type:
type = 'text/plain'
return type, None # never compressed, so encoding is None
base, ext = posixpath.splitext(url)
while ext in self.suffix_map:
base, ext = posixpath.splitext(base + self.suffix_map[ext])
if ext in self.encodings_map:
encoding = self.encodings_map[ext]
base, ext = posixpath.splitext(base)
else:
encoding = None
types_map = self.types_map[True]
if ext in types_map:
return types_map[ext], encoding
elif ext.lower() in types_map:
return types_map[ext.lower()], encoding
elif strict:
return None, encoding
types_map = self.types_map[False]
if ext in types_map:
return types_map[ext], encoding
elif ext.lower() in types_map:
return types_map[ext.lower()], encoding
else:
return None, encoding
def guess_all_extensions(self, type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data stream,
but would be mapped to the MIME type `type' by guess_type().
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
type = type.lower()
extensions = self.types_map_inv[True].get(type, [])
if not strict:
for ext in self.types_map_inv[False].get(type, []):
if ext not in extensions:
extensions.append(ext)
return extensions
def guess_extension(self, type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension,
including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
extensions = self.guess_all_extensions(type, strict)
if not extensions:
return None
return extensions[0]
def read(self, filename, strict=True):
"""
Read a single mime.types-format file, specified by pathname.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
with open(filename) as fp:
self.readfp(fp, strict)
def readfp(self, fp, strict=True):
"""
Read a single mime.types-format file.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
while 1:
line = fp.readline()
if not line:
break
words = line.split()
for i in range(len(words)):
if words[i][0] == '#':
del words[i:]
break
if not words:
continue
type, suffixes = words[0], words[1:]
for suff in suffixes:
self.add_type(type, '.' + suff, strict)
def read_windows_registry(self, strict=True):
"""
Load the MIME types database from Windows registry.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
# Windows only
if not _winreg:
return
def enum_types(mimedb):
i = 0
while True:
try:
ctype = _winreg.EnumKey(mimedb, i)
except EnvironmentError:
break
try:
ctype = ctype.encode(default_encoding) # omit in 3.x!
except UnicodeEncodeError:
pass
except UnicodeDecodeError:
pass
else:
yield ctype
i += 1
default_encoding = sys.getdefaultencoding()
with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, '') as hkcr:
for subkeyname in enum_types(hkcr):
try:
with _winreg.OpenKey(hkcr, subkeyname) as subkey:
# Only check file extensions
if not subkeyname.startswith("."):
continue
# raises EnvironmentError if no 'Content Type' value
mimetype, datatype = _winreg.QueryValueEx(
subkey, 'Content Type')
if datatype != _winreg.REG_SZ:
continue
try:
mimetype = mimetype.encode(default_encoding)
subkeyname = subkeyname.encode(default_encoding)
except UnicodeEncodeError:
continue
self.add_type(mimetype, subkeyname, strict)
except EnvironmentError:
continue
def guess_type(url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if the
type can't be guessed (no or unknown suffix) or a string of the
form type/subtype, usable for a MIME Content-type header; and
encoding is None for no encoding or the name of the program used
to encode (e.g. compress or gzip). The mappings are table
driven. Encoding suffixes are case sensitive; type suffixes are
first tried case sensitive, then case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped
to ".tar.gz". (This is table-driven too, using the dictionary
suffix_map).
Optional `strict' argument when false adds a bunch of commonly found, but
non-standard types.
"""
if _db is None:
init()
return _db.guess_type(url, strict)
def guess_all_extensions(type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_all_extensions(type, strict)
def guess_extension(type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension, including the
leading dot ('.'). The extension is not guaranteed to have been
associated with any particular data stream, but would be mapped to the
MIME type `type' by guess_type(). If no extension can be guessed for
`type', None is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_extension(type, strict)
def add_type(type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
if _db is None:
init()
return _db.add_type(type, ext, strict)
def init(files=None):
global suffix_map, types_map, encodings_map, common_types
global inited, _db
inited = True # so that MimeTypes.__init__() doesn't call us again
db = MimeTypes()
if files is None:
if _winreg:
db.read_windows_registry()
files = knownfiles
for file in files:
if os.path.isfile(file):
db.read(file)
encodings_map = db.encodings_map
suffix_map = db.suffix_map
types_map = db.types_map[True]
common_types = db.types_map[False]
# Make the DB a global variable now that it is fully initialized
_db = db
def read_mime_types(file):
try:
f = open(file)
except IOError:
return None
db = MimeTypes()
db.readfp(f, True)
return db.types_map[True]
def _default_mime_types():
global suffix_map
global encodings_map
global types_map
global common_types
suffix_map = {
'.tgz': '.tar.gz',
'.taz': '.tar.gz',
'.tz': '.tar.gz',
'.tbz2': '.tar.bz2',
'.txz': '.tar.xz',
}
encodings_map = {
'.gz': 'gzip',
'.Z': 'compress',
'.bz2': 'bzip2',
'.xz': 'xz',
}
# Before adding new types, make sure they are either registered with IANA,
# at http://www.isi.edu/in-notes/iana/assignments/media-types
# or extensions, i.e. using the x- prefix
# If you add to these, please keep them sorted!
types_map = {
'.a' : 'application/octet-stream',
'.ai' : 'application/postscript',
'.aif' : 'audio/x-aiff',
'.aifc' : 'audio/x-aiff',
'.aiff' : 'audio/x-aiff',
'.au' : 'audio/basic',
'.avi' : 'video/x-msvideo',
'.bat' : 'text/plain',
'.bcpio' : 'application/x-bcpio',
'.bin' : 'application/octet-stream',
'.bmp' : 'image/x-ms-bmp',
'.c' : 'text/plain',
# Duplicates :(
'.cdf' : 'application/x-cdf',
'.cdf' : 'application/x-netcdf',
'.cpio' : 'application/x-cpio',
'.csh' : 'application/x-csh',
'.css' : 'text/css',
'.dll' : 'application/octet-stream',
'.doc' : 'application/msword',
'.dot' : 'application/msword',
'.dvi' : 'application/x-dvi',
'.eml' : 'message/rfc822',
'.eps' : 'application/postscript',
'.etx' : 'text/x-setext',
'.exe' : 'application/octet-stream',
'.gif' : 'image/gif',
'.gtar' : 'application/x-gtar',
'.h' : 'text/plain',
'.hdf' : 'application/x-hdf',
'.htm' : 'text/html',
'.html' : 'text/html',
'.ico' : 'image/vnd.microsoft.icon',
'.ief' : 'image/ief',
'.jpe' : 'image/jpeg',
'.jpeg' : 'image/jpeg',
'.jpg' : 'image/jpeg',
'.js' : 'application/javascript',
'.ksh' : 'text/plain',
'.latex' : 'application/x-latex',
'.m1v' : 'video/mpeg',
'.man' : 'application/x-troff-man',
'.me' : 'application/x-troff-me',
'.mht' : 'message/rfc822',
'.mhtml' : 'message/rfc822',
'.mif' : 'application/x-mif',
'.mov' : 'video/quicktime',
'.movie' : 'video/x-sgi-movie',
'.mp2' : 'audio/mpeg',
'.mp3' : 'audio/mpeg',
'.mp4' : 'video/mp4',
'.mpa' : 'video/mpeg',
'.mpe' : 'video/mpeg',
'.mpeg' : 'video/mpeg',
'.mpg' : 'video/mpeg',
'.ms' : 'application/x-troff-ms',
'.nc' : 'application/x-netcdf',
'.nws' : 'message/rfc822',
'.o' : 'application/octet-stream',
'.obj' : 'application/octet-stream',
'.oda' : 'application/oda',
'.p12' : 'application/x-pkcs12',
'.p7c' : 'application/pkcs7-mime',
'.pbm' : 'image/x-portable-bitmap',
'.pdf' : 'application/pdf',
'.pfx' : 'application/x-pkcs12',
'.pgm' : 'image/x-portable-graymap',
'.pl' : 'text/plain',
'.png' : 'image/png',
'.pnm' : 'image/x-portable-anymap',
'.pot' : 'application/vnd.ms-powerpoint',
'.ppa' : 'application/vnd.ms-powerpoint',
'.ppm' : 'image/x-portable-pixmap',
'.pps' : 'application/vnd.ms-powerpoint',
'.ppt' : 'application/vnd.ms-powerpoint',
'.ps' : 'application/postscript',
'.pwz' : 'application/vnd.ms-powerpoint',
'.py' : 'text/x-python',
'.pyc' : 'application/x-python-code',
'.pyo' : 'application/x-python-code',
'.qt' : 'video/quicktime',
'.ra' : 'audio/x-pn-realaudio',
'.ram' : 'application/x-pn-realaudio',
'.ras' : 'image/x-cmu-raster',
'.rdf' : 'application/xml',
'.rgb' : 'image/x-rgb',
'.roff' : 'application/x-troff',
'.rtx' : 'text/richtext',
'.sgm' : 'text/x-sgml',
'.sgml' : 'text/x-sgml',
'.sh' : 'application/x-sh',
'.shar' : 'application/x-shar',
'.snd' : 'audio/basic',
'.so' : 'application/octet-stream',
'.src' : 'application/x-wais-source',
'.sv4cpio': 'application/x-sv4cpio',
'.sv4crc' : 'application/x-sv4crc',
'.swf' : 'application/x-shockwave-flash',
'.t' : 'application/x-troff',
'.tar' : 'application/x-tar',
'.tcl' : 'application/x-tcl',
'.tex' : 'application/x-tex',
'.texi' : 'application/x-texinfo',
'.texinfo': 'application/x-texinfo',
'.tif' : 'image/tiff',
'.tiff' : 'image/tiff',
'.tr' : 'application/x-troff',
'.tsv' : 'text/tab-separated-values',
'.txt' : 'text/plain',
'.ustar' : 'application/x-ustar',
'.vcf' : 'text/x-vcard',
'.wav' : 'audio/x-wav',
'.wiz' : 'application/msword',
'.wsdl' : 'application/xml',
'.xbm' : 'image/x-xbitmap',
'.xlb' : 'application/vnd.ms-excel',
# Duplicates :(
'.xls' : 'application/excel',
'.xls' : 'application/vnd.ms-excel',
'.xml' : 'text/xml',
'.xpdl' : 'application/xml',
'.xpm' : 'image/x-xpixmap',
'.xsl' : 'application/xml',
'.xwd' : 'image/x-xwindowdump',
'.zip' : 'application/zip',
}
# These are non-standard types, commonly found in the wild. They will
# only match if strict=0 flag is given to the API methods.
# Please sort these too
common_types = {
'.jpg' : 'image/jpg',
'.mid' : 'audio/midi',
'.midi': 'audio/midi',
'.pct' : 'image/pict',
'.pic' : 'image/pict',
'.pict': 'image/pict',
'.rtf' : 'application/rtf',
'.xul' : 'text/xul'
}
_default_mime_types()
if __name__ == '__main__':
import getopt
USAGE = """\
Usage: mimetypes.py [options] type
Options:
--help / -h -- print this message and exit
--lenient / -l -- additionally search of some common, but non-standard
types.
--extension / -e -- guess extension instead of type
More than one type argument may be given.
"""
def usage(code, msg=''):
print USAGE
if msg: print msg
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hle',
['help', 'lenient', 'extension'])
except getopt.error, msg:
usage(1, msg)
strict = 1
extension = 0
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-l', '--lenient'):
strict = 0
elif opt in ('-e', '--extension'):
extension = 1
for gtype in args:
if extension:
guess = guess_extension(gtype, strict)
if not guess: print "I don't know anything about type", gtype
else: print guess
else:
guess, encoding = guess_type(gtype, strict)
if not guess: print "I don't know anything about type", gtype
else: print 'type:', guess, 'encoding:', encoding
|
xymz/meron
|
mimetypes.py
|
Python
|
mit
| 21,050
|
[
"NetCDF"
] |
5494f375a4ce02e57ccf170bfea36f0ea01a0f48a9854a71fed3503a806f2335
|
#ImportModules
import ShareYourSystem as SYS
import operator
#Definition of a brian structure
MyNetworker=SYS.NetworkerClass(
).array(
["Networkers","Connecters"],
[
['A','B'],['1','2']
],
[SYS.NetworkerClass,SYS.ConnecterClass],
).update(
[
(
'<Networkers>ANetworker/Dis_<Connecters>',
[
[
('ConnectingGraspClueVariablesList',
[
'/NodePointDeriveNoder/<Connecters>2Connecter'
]
),
('TagStr','Networked')
],
[
('TagStr','Networked')
]
]
),
]
).network(
**{
'VisitingCollectionStrsList':["Networkers","Connecters"],
'RecruitingConcludeConditionVariable':[
('NameStr',operator.eq,'Connecter')
]
}
)
#Definition the AttestedStr
SYS._attest(
[
'MyNetworker is '+SYS._str(
MyNetworker,
**{
'RepresentingBaseKeyStrsList':False,
'RepresentingAlineaIsBool':False
}
),
]
)
#Print
|
Ledoux/ShareYourSystem
|
Pythonlogy/draft/Teamers_Networker/01_ExampleDoc.py
|
Python
|
mit
| 925
|
[
"Brian"
] |
8b111191312c51471aedadd0ae96c2488c5bd0fa246b1b81e490e5ad193ccf62
|
from ase.test import NotAvailable
from ase.calculators.gaussian import Gaussian
if Gaussian().get_command() is None:
raise NotAvailable('Gaussian required')
from ase.tasks.main import run
atoms, task = run('gaussian molecule O2 O')
atoms, task = run('gaussian molecule O2 O -s')
ae = 2 * task.data['O']['energy'] - task.data['O2']['energy']
print ae
assert abs(ae - 5.664) < 1e-3
|
conwayje/ase-python
|
ase/test/gaussian/gaussian_cmdline.py
|
Python
|
gpl-2.0
| 388
|
[
"ASE",
"Gaussian"
] |
16a0ce63f4e75cd3b8d797a9e09e835cf94f3bf8ecf2f3c8e26b8b6f6a3564de
|
#!/usr/bin/env python
"""
DataCollectingParser subclasses ctypesparser.CtypesParser and builds Description
objects from the CtypesType objects and other information from CtypesParser.
After parsing is complete, a DescriptionCollection object can be retrieved by
calling DataCollectingParser.data().
"""
import ctypesparser
from ctypesgencore.descriptions import *
from ctypesgencore.ctypedescs import *
from ctypesgencore.expressions import *
from ctypesgencore.messages import *
from tempfile import mkstemp
import os
class DataCollectingParser(ctypesparser.CtypesParser,
ctypesparser.CtypesTypeVisitor):
"""Main class for the Parser component. Steps for use:
p=DataCollectingParser(names_of_header_files,options)
p.parse()
data=p.data() #A dictionary of constants, enums, structs, functions, etc.
"""
def __init__(self,headers,options):
ctypesparser.CtypesParser.__init__(self,options)
self.headers=headers
self.options=options
self.constants=[]
self.typedefs=[]
self.structs=[]
self.enums=[]
self.functions=[]
self.variables=[]
self.macros=[]
self.all=[]
self.output_order=[]
# NULL is a useful macro to have defined
null = ConstantExpressionNode(None)
nullmacro = ConstantDescription("NULL",null,("<built-in>",1))
self.constants.append(nullmacro)
self.all.append(nullmacro)
self.output_order.append(("constant", nullmacro))
# A list of tuples describing macros; saved to be processed after
# everything else has been parsed
self.saved_macros = []
# A set of structs that are already known
self.already_seen_structs=set()
# A dict of structs that have only been seen in opaque form
self.already_seen_opaque_structs={}
# A set of enums that are already known
self.already_seen_enums=set()
# A dict of enums that have only been seen in opaque form
self.already_seen_opaque_enums={}
def parse(self):
fd, fname = mkstemp(suffix=".h")
f = os.fdopen(fd, 'w')
for header in self.options.other_headers:
print >>f, '#include <%s>' % header
for header in self.headers:
print >>f, '#include "%s"' % os.path.abspath(header)
f.flush()
f.close()
ctypesparser.CtypesParser.parse(self, fname, None)
os.unlink(fname)
for name, params, expr, (filename,lineno) in self.saved_macros:
self.handle_macro(name, params, expr, filename, lineno)
def handle_define_constant(self, name, expr, filename, lineno):
# Called by CParser
# Save to handle later
self.saved_macros.append((name, None, expr, (filename, lineno)))
def handle_define_unparseable(self, name, params, value, filename, lineno):
# Called by CParser
if params:
original_string = "#define %s(%s) %s" % \
(name, ",".join(params), " ".join(value))
else:
original_string = "#define %s %s" % \
(name, " ".join(value))
macro = MacroDescription(name, params, None,
src = (filename,lineno))
macro.error("Could not parse macro \"%s\"" % original_string,
cls = 'macro')
macro.original_string = original_string
self.macros.append(macro)
self.all.append(macro)
self.output_order.append(('macro',macro))
def handle_define_macro(self, name, params, expr, filename, lineno):
# Called by CParser
# Save to handle later
self.saved_macros.append((name, params, expr, (filename,lineno)))
def handle_ctypes_typedef(self, name, ctype, filename, lineno):
# Called by CtypesParser
ctype.visit(self)
typedef=TypedefDescription(name,
ctype,
src=(filename,repr(lineno)))
self.typedefs.append(typedef)
self.all.append(typedef)
self.output_order.append(('typedef',typedef))
def handle_ctypes_new_type(self, ctype, filename, lineno):
# Called by CtypesParser
if isinstance(ctype,ctypesparser.CtypesEnum):
self.handle_enum(ctype, filename, lineno)
else:
self.handle_struct(ctype, filename, lineno)
def handle_ctypes_function(self, name, restype, argtypes, variadic,
filename, lineno):
# Called by CtypesParser
restype.visit(self)
for argtype in argtypes:
argtype.visit(self)
function=FunctionDescription(name,
restype,
argtypes,
variadic = variadic,
src=(filename,repr(lineno)))
self.functions.append(function)
self.all.append(function)
self.output_order.append(('function',function))
def handle_ctypes_variable(self, name, ctype, filename, lineno):
# Called by CtypesParser
ctype.visit(self)
variable=VariableDescription(name,
ctype,
src=(filename,repr(lineno)))
self.variables.append(variable)
self.all.append(variable)
self.output_order.append(('variable',variable))
def handle_struct(self, ctypestruct, filename, lineno):
# Called from within DataCollectingParser
# When we find an opaque struct, we make a StructDescription for it
# and record it in self.already_seen_opaque_structs. If we later
# find a transparent struct with the same tag, we fill in the
# opaque struct with the information from the transparent struct and
# move the opaque struct to the end of the struct list.
name = "%s %s"%(ctypestruct.variety,ctypestruct.tag)
if name in self.already_seen_structs:
return
if ctypestruct.opaque:
if name not in self.already_seen_opaque_structs:
struct = StructDescription(ctypestruct.tag,
ctypestruct.variety,
None, # No members
True, # Opaque
ctypestruct,
src=(filename,str(lineno)))
self.already_seen_opaque_structs[name]=struct
self.structs.append(struct)
self.all.append(struct)
self.output_order.append(('struct',struct))
else:
for (membername,ctype) in ctypestruct.members:
ctype.visit(self)
if name in self.already_seen_opaque_structs:
# Fill in older version
struct=self.already_seen_opaque_structs[name]
struct.opaque = False
struct.members = ctypestruct.members
struct.ctype = ctypestruct
struct.src = ctypestruct.src
self.output_order.append(('struct-body',struct))
del self.already_seen_opaque_structs[name]
else:
struct = StructDescription(ctypestruct.tag,
ctypestruct.variety,
ctypestruct.members,
False, # Not opaque
src=(filename,str(lineno)),
ctype=ctypestruct)
self.structs.append(struct)
self.all.append(struct)
self.output_order.append(('struct',struct))
self.output_order.append(('struct-body',struct))
self.already_seen_structs.add(name)
def handle_enum(self, ctypeenum, filename, lineno):
# Called from within DataCollectingParser.
# Process for handling opaque enums is the same as process for opaque
# structs. See handle_struct() for more details.
tag = ctypeenum.tag
if tag in self.already_seen_enums:
return
if ctypeenum.opaque:
if tag not in self.already_seen_opaque_enums:
enum=EnumDescription(ctypeenum.tag,
None,
ctypeenum,
src = (filename,str(lineno)))
enum.opaque = True
self.already_seen_opaque_enums[tag]=enum
self.enums.append(enum)
self.all.append(enum)
self.output_order.append(('enum',enum))
else:
if tag in self.already_seen_opaque_enums:
# Fill in older opaque version
enum = self.already_seen_opaque_enums[tag]
enum.opaque = False
enum.ctype = ctypeenum
enum.src = ctypeenum.src
enum.members = ctypeenum.enumerators
del self.already_seen_opaque_enums[tag]
else:
enum=EnumDescription(ctypeenum.tag,
ctypeenum.enumerators,
src=(filename,str(lineno)),
ctype=ctypeenum)
enum.opaque = False
self.enums.append(enum)
self.all.append(enum)
self.output_order.append(('enum',enum))
self.already_seen_enums.add(tag)
for (enumname,expr) in ctypeenum.enumerators:
constant=ConstantDescription(enumname, expr,
src=(filename,lineno))
self.constants.append(constant)
self.all.append(constant)
self.output_order.append(('constant',constant))
def handle_macro(self, name, params, expr, filename, lineno):
# Called from within DataCollectingParser
src = (filename,lineno)
if expr==None:
expr = ConstantExpressionNode(True)
constant = ConstantDescription(name, expr, src)
self.constants.append(constant)
self.all.append(constant)
return
expr.visit(self)
if isinstance(expr,CtypesType):
if params:
macro = MacroDescription(name, "", src)
macro.error("%s has parameters but evaluates to a type. " \
"Ctypesgen does not support it." % macro.casual_name(),
cls = 'macro')
self.macros.append(macro)
self.all.append(macro)
self.output_order.append(('macro',macro))
else:
typedef = TypedefDescription(name, expr, src)
self.typedefs.append(typedef)
self.all.append(typedef)
self.output_order.append(('typedef',typedef))
else:
macro = MacroDescription(name, params, expr, src)
self.macros.append(macro)
self.all.append(macro)
self.output_order.append(('macro',macro))
# Macros could possibly contain things like __FILE__, __LINE__, etc...
# This could be supported, but it would be a lot of work. It would
# probably also bloat the Preamble considerably.
def handle_error(self, message, filename, lineno):
# Called by CParser
error_message("%s:%d: %s" % (filename,lineno,message), cls='cparser')
def handle_pp_error(self, message):
# Called by PreprocessorParser
error_message("%s: %s" % (self.options.cpp, message), cls = 'cparser')
def handle_status(self, message):
# Called by CParser
status_message(message)
def visit_struct(self, struct):
self.handle_struct(struct, struct.src[0], struct.src[1])
def visit_enum(self,enum):
self.handle_enum(enum, enum.src[0], enum.src[1])
def data(self):
return DescriptionCollection(self.constants,
self.typedefs,
self.structs,
self.enums,
self.functions,
self.variables,
self.macros,
self.all,
self.output_order)
|
pombredanne/ctypesgen
|
ctypesgencore/parser/datacollectingparser.py
|
Python
|
bsd-3-clause
| 12,623
|
[
"VisIt"
] |
8118793ee68d4a80aa280de0c18ad31b23668ef481f563d839008897bd0c34a1
|
r"""wamp is a module that provide classes that extend any
WAMP related class for the purpose of vtkWeb.
"""
import inspect, types, string, random, logging, six, json, re, base64, time
from threading import Timer
from twisted.web import resource
from twisted.python import log
from twisted.internet import reactor
from twisted.internet import defer
from twisted.internet.defer import Deferred, returnValue
from autobahn import wamp
from autobahn import util
from autobahn.wamp import types
from autobahn.wamp import auth
from autobahn.wamp import register as exportRpc
from autobahn.twisted.wamp import ApplicationSession, RouterSession
from autobahn.twisted.websocket import WampWebSocketServerFactory
from autobahn.twisted.websocket import WampWebSocketServerProtocol
from autobahn.twisted.websocket import WebSocketServerProtocol
from vtk.web import protocols
try:
from vtk.vtkWebCore import vtkWebApplication
except ImportError:
from vtkWebCore import vtkWebApplication
# =============================================================================
salt = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(32))
application = None
imageCapture = None
# =============================================================================
#
# Base class for vtkWeb WampServerProtocol
#
# =============================================================================
class ServerProtocol(ApplicationSession):
"""
Defines the core server protocol for vtkWeb. Adds support to
marshall/unmarshall RPC callbacks that involve ServerManager proxies as
arguments or return values.
Applications typically don't use this class directly, since it doesn't
register any RPC callbacks that are required for basic web-applications with
interactive visualizations. For that, use vtkWebServerProtocol.
"""
def __init__(self, config):
ApplicationSession.__init__(self, config)
self.vtkWebProtocols = []
self.authdb = None
self.secret = None
self.Application = self.initApplication()
self.initialize()
# Init Binary WebSocket image renderer
global imageCapture
imageCapture = protocols.vtkWebViewPortImageDelivery()
imageCapture.setApplication(self.Application)
def setAuthDB(self, db):
self.authdb = db
if self.secret:
self.authdb.updateKey('vtkweb', self.secret)
def initialize(self):
"""
Let the sub class define what they need to do to properly initialize
themselves.
"""
pass
def initApplication(self):
"""
Let subclass optionally initialize a custom application in lieu
of the default vtkWebApplication.
"""
global application
if not application:
application = vtkWebApplication()
return application
def onJoin(self, details):
ApplicationSession.onJoin(self, details)
self.register(self)
for protocol in self.vtkWebProtocols:
self.register(protocol)
def setApplication(self, application):
self.Application = application
# Init Binary WebSocket image renderer
global imageCapture
imageCapture.setApplication(self.Application)
def registerVtkWebProtocol(self, protocol):
protocol.coreServer = self
protocol.setApplication(self.Application)
self.vtkWebProtocols.append(protocol)
def getVtkWebProtocols(self):
return self.vtkWebProtocols
def updateSecret(self, newSecret):
self.secret = newSecret
if self.authdb:
self.authdb.updateKey('vtkweb', self.secret)
@exportRpc("application.exit")
def exit(self):
"""RPC callback to exit"""
reactor.stop()
@exportRpc("application.exit.later")
def exitLater(self, secondsLater=60):
"""RPC callback to exit after a short delay"""
reactor.callLater(secondsLater, reactor.stop)
# =============================================================================
#
# Base class for vtkWeb WampServerFactory
#
# =============================================================================
class TimeoutWampWebSocketServerFactory(WampWebSocketServerFactory):
"""
TimeoutWampWebSocketServerFactory is WampWebSocketServerFactory subclass
that adds support to close the web-server after a timeout when the last
connected client drops.
Currently, the protocol must call connectionMade() and connectionLost() methods
to notify the factory that the connection was started/closed.
If the connection count drops to zero, then the reap timer
is started which will end the process if no other connections are made in
the timeout interval.
"""
def __init__(self, factory, *args, **kwargs):
self._connection_count = 0
self._timeout = kwargs['timeout']
self._reaper = reactor.callLater(self._timeout, lambda: reactor.stop())
del kwargs['timeout']
WampWebSocketServerFactory.__init__(self, factory, *args, **kwargs)
WampWebSocketServerFactory.protocol = TimeoutWampWebSocketServerProtocol
def connectionMade(self):
if self._reaper:
log.msg("Client has reconnected, cancelling reaper", logLevel=logging.DEBUG)
self._reaper.cancel()
self._reaper = None
self._connection_count += 1
log.msg("on_connect: connection count = %s" % self._connection_count, logLevel=logging.DEBUG)
def connectionLost(self, reason):
if self._connection_count > 0:
self._connection_count -= 1
log.msg("connection_lost: connection count = %s" % self._connection_count, logLevel=logging.DEBUG)
if self._connection_count == 0 and not self._reaper:
log.msg("Starting timer, process will terminate in: %ssec" % self._timeout, logLevel=logging.DEBUG)
self._reaper = reactor.callLater(self._timeout, lambda: reactor.stop())
# =============================================================================
class TimeoutWampWebSocketServerProtocol(WampWebSocketServerProtocol):
def connectionMade(self):
WampWebSocketServerProtocol.connectionMade(self)
self.factory.connectionMade()
def connectionLost(self, reason):
WampWebSocketServerProtocol.connectionLost(self, reason)
self.factory.connectionLost(reason)
# =============================================================================
class AuthDb:
"""
An in-memory-only user database of a single user.
"""
AUTHEXTRA = {'salt': 'salt123', 'keylen': 32, 'iterations': 1000}
def __init__(self):
self._creds = {'vtkweb': auth.derive_key("vtkweb-secret", self.AUTHEXTRA['salt'])}
def get(self, authid):
## we return a deferred to simulate an asynchronous lookup
return defer.succeed(self._creds.get(authid, None))
def updateKey(self, id, newKey):
self._creds[id] = auth.derive_key(newKey, self.AUTHEXTRA['salt'])
# =============================================================================
class PendingAuth:
"""
Used for tracking pending authentications.
"""
def __init__(self, key, session, authid, authrole, authmethod, authprovider):
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
self.session = session
self.timestamp = util.utcnow()
self.nonce = util.newid()
challenge_obj = {
'authid': self.authid,
'authrole': self.authrole,
'authmethod': self.authmethod,
'authprovider': self.authprovider,
'session': self.session,
'nonce': self.nonce,
'timestamp': self.timestamp
}
self.challenge = json.dumps(challenge_obj)
self.signature = auth.compute_wcs(key, self.challenge)
# =============================================================================
class CustomWampCraRouterSession(RouterSession):
"""
A custom router session that authenticates via WAMP-CRA.
"""
def __init__(self, routerFactory):
"""
Constructor.
"""
RouterSession.__init__(self, routerFactory)
@defer.inlineCallbacks
def onHello(self, realm, details):
"""
Callback fired when client wants to attach session.
"""
self._pending_auth = None
if details.authmethods:
for authmethod in details.authmethods:
if authmethod == u"wampcra":
authdb = self.factory.authdb
## lookup user in user DB
key = yield authdb.get(details.authid)
## if user found ..
if key:
## setup pending auth
self._pending_auth = PendingAuth(key, details.pending_session,
details.authid, "user", authmethod, "authdb")
## send challenge to client
extra = { 'challenge': self._pending_auth.challenge }
## when using salted passwords, provide the client with
## the salt and then PBKDF2 parameters used
extra['salt'] = authdb.AUTHEXTRA['salt']
extra['iterations'] = 1000
extra['keylen'] = 32
defer.returnValue(types.Challenge('wampcra', extra))
## deny client
defer.returnValue(types.Deny())
def onAuthenticate(self, signature, extra):
"""
Callback fired when a client responds to an authentication challenge.
"""
## if there is a pending auth, and the signature provided by client matches ..
if self._pending_auth and signature == self._pending_auth.signature:
## accept the client
return types.Accept(authid = self._pending_auth.authid,
authrole = self._pending_auth.authrole,
authmethod = self._pending_auth.authmethod,
authprovider = self._pending_auth.authprovider)
## deny client
return types.Deny()
# =============================================================================
# Simple web server endpoint handling POST requests to execute rpc methods
# =============================================================================
class HttpRpcResource(resource.Resource, object):
def __init__(self, serverProtocol, endpointRootPath):
super(HttpRpcResource, self).__init__()
self.functionMap = {}
self.urlMatcher = re.compile(endpointRootPath.strip('/') + '/([^/]+)')
# Build the rpc method dictionary
protocolList = serverProtocol.getVtkWebProtocols()
protocolList.append(serverProtocol) # so the exit methods get "registered"
for protocolObject in protocolList:
test = lambda x: inspect.ismethod(x) or inspect.isfunction(x)
for k in inspect.getmembers(protocolObject.__class__, test):
proc = k[1]
if "_wampuris" in proc.__dict__:
pat = proc.__dict__["_wampuris"][0]
if pat.is_endpoint():
uri = pat.uri()
self.functionMap[uri] = (protocolObject, proc)
def extractRpcMethod(self, path):
m = self.urlMatcher.search(path)
if m:
return m.group(1)
else:
return None
def getChild(self, path, request):
return self
def render_POST(self, request):
payload = json.loads(request.content.getvalue())
args = payload['args']
methodName = self.extractRpcMethod(request.path)
obj,func = self.functionMap[methodName]
results = func(obj, *args)
return json.dumps(results)
# =============================================================================
# Binary WebSocket image push protocol
# =============================================================================
class ImagePushBinaryWebSocketServerProtocol(WebSocketServerProtocol):
def onOpen(self):
global imageCapture
self.helper = imageCapture
self.app = imageCapture.getApplication()
self.viewToCapture = {}
self.lastStaleTime = 0
self.staleHandlerCount = 0
self.deltaStaleTimeBeforeRender = 0.5 # 0.5s
self.subscription = self.app.AddObserver('PushRender', lambda obj, event: reactor.callLater(0.0, lambda: self.render()))
def onMessage(self, msg, isBinary):
request = json.loads(msg)
if 'view_id' in request:
viewId = str(request['view_id'])
if viewId not in self.viewToCapture:
self.viewToCapture[viewId] = { 'quality': 100, 'enabled': True, 'view': self.helper.getView(viewId), 'view_id': viewId, 'mtime': 0 }
if 'invalidate_cache' in request:
if self.viewToCapture[viewId]['view']:
self.app.InvalidateCache(self.viewToCapture[viewId]['view'].SMProxy)
self.render()
else:
# Update fields
objToUpdate = self.viewToCapture[viewId]
for key in request:
objToUpdate[key] = request[key]
def onClose(self, wasClean, code, reason):
self.viewToCapture = {}
self.app.RemoveObserver(self.subscription)
def connectionLost(self, reason):
self.viewToCapture = {}
self.app.RemoveObserver(self.subscription)
def renderStaleImage(self):
self.staleHandlerCount -= 1
if self.lastStaleTime != 0:
delta = (time.time() - self.lastStaleTime)
if delta >= self.deltaStaleTimeBeforeRender:
self.render()
else:
self.staleHandlerCount += 1
reactor.callLater(self.deltaStaleTimeBeforeRender - delta + 0.001, lambda: self.renderStaleImage())
def render(self):
keepGoing = False
for k, v in self.viewToCapture.iteritems():
if v['enabled']:
keepGoing = True
view = v['view']
if hasattr(view,'SMProxy'):
view = view.SMProxy
quality = v['quality']
mtime = v['mtime']
base64Image = self.app.StillRenderToString(view, mtime, quality)
stale = self.app.GetHasImagesBeingProcessed(view)
if base64Image:
v['mtime'] = self.app.GetLastStillRenderToStringMTime()
meta = {
'size': self.app.GetLastStillRenderImageSize(),
'id': k
}
self.sendMessage(json.dumps(meta), False)
self.sendMessage(base64.standard_b64decode(base64Image), True)
if stale:
self.lastStaleTime = time.time()
if self.staleHandlerCount == 0:
self.staleHandlerCount += 1
reactor.callLater(self.deltaStaleTimeBeforeRender, lambda: self.renderStaleImage())
else:
self.lastStaleTime = 0
return keepGoing
|
keithroe/vtkoptix
|
Web/Python/vtk/web/wamp.py
|
Python
|
bsd-3-clause
| 15,475
|
[
"VTK"
] |
c5cfbc1c73d4ac60a2b3bf5df92700a263da2cbc1a7f74c50d8a158d20bbb85d
|
"""
Boring json which is just a basic
dump of the resource into json format.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from ripozo.adapters import AdapterBase
import json
_CONTENT_TYPE = 'application/json'
class BasicJSONAdapter(AdapterBase):
"""
Just a plain old JSON dump of the properties.
Nothing exciting.
Format:
.. code-block:: javascript
<resource_name>: {
field1: "value"
field2: "value"
relationship: {
relationship_field: "value"
}
list_relationship: [
{
relationship_field: "value"
}
{
relationship_field: "value"
}
]
}
"""
formats = ['json', _CONTENT_TYPE]
extra_headers = {'Content-Type': _CONTENT_TYPE}
@property
def formatted_body(self):
"""
:return: The formatted body that should be returned.
It's just a ``json.dumps`` of the properties and
relationships
:rtype: unicode
"""
response = dict()
parent_properties = self.resource.properties.copy()
self._append_relationships_to_list(response, self.resource.related_resources)
self._append_relationships_to_list(response, self.resource.linked_resources)
response.update(parent_properties)
return json.dumps({self.resource.resource_name: response})
@staticmethod
def _append_relationships_to_list(rel_dict, relationships):
"""
Dumps the relationship resources provided into
a json ready list of dictionaries.
:param dict rel_dict:
:param list relationships:
:return: A list of the resources in dictionary format.
:rtype: list
"""
for resource, name, embedded in relationships:
if name not in rel_dict:
rel_dict[name] = []
if isinstance(resource, (list, tuple)):
for res in resource:
rel_dict[name].append(res.properties)
continue
rel_dict[name].append(resource.properties)
|
chenokay/ripozo
|
ripozo/adapters/basic_json.py
|
Python
|
gpl-2.0
| 2,295
|
[
"exciting"
] |
7f208346cd9932eecf27a66c9d32f2e66e67943dbabd19bd65d7a09cee2c8915
|
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
# Request object; contains Apache request object and other handy stuff.
try:
from mod_python import apache
import mod_python.util
except ImportError:
pass # script is being run locally
import hashlib
from lxml import etree
import re
import time
import traceback
import conf
import g
from grax.item_manager import Item_Manager
from gwis import cmd_factory
from gwis.query_branch import Query_Branch
from gwis.query_client import Query_Client
from gwis.query_filters import Query_Filters
from gwis.query_overlord import Query_Overlord
from gwis.query_revision import Query_Revision
from gwis.query_viewport import Query_Viewport
from gwis.exception.gwis_error import GWIS_Error
from gwis.exception.gwis_warning import GWIS_Warning
from item.util.item_query_builder import Item_Query_Builder
from util_ import db_glue
from util_ import mem_usage
from util_ import misc
log = g.log.getLogger('gwis.request')
class Request_Parts(object):
__slots__ = (
'branch', # query_branch object
'client', # query_client object
'filters', # query_filters object
'revision', # query_revision object
'viewport', # query_viewport object
)
def __init__(self, req):
self.branch = Query_Branch(req)
self.client = Query_Client(req)
self.filters = Query_Filters(req)
self.revision = Query_Revision(req)
self.viewport = Query_Viewport(req)
def decode_gwis(self):
# Decode the GWIS packet. Do this is a particular order, since some
# routines expect previous routines to have populated some things.
self.client.decode_gwis()
self.filters.decode_gwis()
self.revision.decode_gwis()
self.viewport.decode_gwis()
#
self.branch.decode_gwis()
# Request wraps an Apache request object from ModPython. For more info:
# http://www.modpython.org/live/current/doc-html/pyapi-mprequest-mem.html
class Request(object):
__slots__ = (
'start_time_str', # time of request initialization (string)
'start_time_msec', # time of request initialization (float)
'areq', # Apache request object from mod_python
'cmd', # command object
'db', # db_glue object
'doc_in', # incoming request body (XML tree)
'raw_content', # incoming request body (raw)
'content_in', # incoming request body (text)
'content_out', # outgoing response body (text)
'file_data_in', #
'htmlfile_out', # override content_out and send html back instead
'sendfile_out', # override content_out and send file back instead
'gwis_kvp_in', # GWIS key-value parameters
'parts', # Request_Parts object
#
'branch', # query_branch object
'client', # query_client object
'filters', # query_filters object
'revision', # query_revision object
'viewport', # query_viewport object
)
def __init__(self, areq, start_time_msec=None):
log.verbose('Creating new request')
# Remember when we starting processing this request so we can compute
# how long it takes
self.start_time_str = misc.nowstr()
self.start_time_msec = start_time_msec or time.time()
self.areq = areq
self.cmd = None
self.db = None
self.doc_in = None
self.raw_content = ''
self.content_in = ''
self.content_out = None
self.file_data_in = None
self.htmlfile_out = None
self.sendfile_out = None
self.gwis_kvp_in = dict()
if self.areq is not None:
self.setup_areq()
# Do this last, since these guys do a lot more processing of things we
# just configured.
self.parts = Request_Parts(self)
# Convenience ptrs.
self.branch = self.parts.branch
self.client = self.parts.client
self.filters = self.parts.filters
self.revision = self.parts.revision
self.viewport = self.parts.viewport
# ***
#
def __str__(self):
selfie = (
#'request: raw: %s / rln: %s / req: %s / hdr: %s'
'request: rln: %s / req: %s'
% (#self.raw_content,
self.areq.read_length if self.areq is not None else '',
self.areq.the_request if self.areq is not None else '',
#self.areq.headers_in if self.areq is not None else '',
))
return selfie
#
def setup_areq(self):
# Tell mod_python to populate the subprocess_env member
self.areq.add_common_vars()
#log.verbose('Apache request: %s' % (dir(areq),))
# If you send a direct request from your browser or wget, e.g.,
# http://ccpv2/gwis?rqst=item_names_get&ityp=branch&gwv=3
# you'll get a KeyError on Content-Type.
try:
time_0 = time.time()
log.verbose('self.areq.headers_in[\'Content-Type\']: %s'
% (self.areq.headers_in['Content-Type'],))
if self.areq.headers_in['Content-Type'] == 'text/xml':
# FIXME: This voodoo strips out any non-ASCII characters and
# replaces them with question marks. This is a nasty hack
# to address SQL injection (see Bug 1599), because I [reid]
# don't have time to figure out character encoding problems
# right now (see Bug 1224). I believe psycopg may also be a
# problem (Bug 1532).
do_decode = True
elif (self.areq.headers_in['Content-Type']
== 'application/x-www-form-urlencoded'):
# This is a download request from the client. For whatever reason,
# when you use Flex's FileReference, Flash sends this header. The
# content is still our GML, though (can we start calling it CCPML
# or something? It's XML, I guess, but it's our XML, and not
# Geometric XML, per se).
do_decode = True
else:
# This is upload data. Or unexpected unsomethings.
do_decode = False
if do_decode:
# 2014.02.04: Hrmpf. I copied and pasted from a Web page and
# there was a hidden unicode control character (the left-to-right
# mark). It gets decoded from three characters to one (the "?").
# http://bugs.cyclopath.org/show_bug.cgi?id=2826
self.raw_content = self.areq.read()
cnt_decoded = self.raw_content.decode('utf-8')
# BUG nnnn: i18n support. Here we dump anything that's not ASCII!
# 2014.02.04: If we 'replace', then unicode characters become
# question marks. But sometimes the unicode character
# is a hidden control character, so why not just ignore
# it.
# self.content_in = cnt_decoded.encode('ascii', 'replace')
self.content_in = cnt_decoded.encode('ascii', 'ignore')
else:
self.setup_areq_incoming()
except KeyError, e:
# setup_areq: KeyError: e: 'Content-Type'
# setup_areq: rln: 0 / req: GET /gwis?request=Null&ping=mon.itor.us
# HTTP/1.0 / hdr: {'Accept': '*/*', 'Host': 'cycloplan.cyclopath.org',
# 'Connection': 'close', 'User-Agent': 'Mozilla/5.0 (compatible;
# mon.itor.us - free monitoring service; http://mon.itor.us)'}
mon_itor_us_req_g = 'GET /gwis?request=Null&ping=mon.itor.us HTTP'
# 2014.02.18: Wait, what? When did they start POSTing?
mon_itor_us_req_p = 'POST /gwis?request=Null&ping=mon.itor.us HTTP'
if (self.areq.the_request.startswith(mon_itor_us_req_g)
or self.areq.the_request.startswith(mon_itor_us_req_p)):
# It doesn't really matter what we return, since the answer is not
# parsed. So just short-circuit the byte outta here!
raise GWIS_Warning('Hello, mon.itor.us.',
tag=None, logger=log.info)
# This is a bit of a hack, but it's fine unless we add a bunch
# more HTML-responding commands. For now, the user_unsubscribe
# command is a rare command that returns HTML.
unsubscribe_req = 'GET /gwis?request=user_unsubscribe&'
nowatchers_req = 'GET /gwis?request=user_nowatchers&'
if ( (not self.areq.the_request.startswith(unsubscribe_req))
and (not self.areq.the_request.startswith(nowatchers_req))):
# We should make exceptions for known non-conformities, like
# mon.itor.us. But the rest are suspect and we should investigate.
# E.g.,
# Jan-12 05:17:31 WARN gwis.request # 140702243264344-67:
# setup_areq: KeyError: e: 'Content-Type'
# Jan-12 05:17:31 WARN gwis.request # 140702243264344-67:
# setup_areq: rln: 0 / req: GET /gwis?rqst=item_draw_class_get&
# ...&android=true HTTP/1.1 / hdr: {'Accept-Encoding': 'gzip',
# 'Connection': 'Keep-Alive', 'Host': 'cycloplan.cyclopath.org',
# 'User-Agent': 'Dalvik/1.6.0 (Linux; U; Android 4.1.1;
# sdk Build/JRO03R)'}
# So... this is a rogue Android app, or is someone testing?
log.warning('setup_areq: KeyError: e: %s' % (str(e),))
log.warning('setup_areq: rln: %s / req: %s / hdr: %s'
% (self.areq.read_length,
self.areq.the_request,
self.areq.headers_in,))
# Content-Type is missing. Or this is a GET not POST.
# And we want logcheck complaints, so log to warning or error.
raise GWIS_Error('Malformed request. Sorry!',
tag=None, logger=log.error)
# To continue processing instead, just comment the last line, then:
# else, it's a special command, user_unsubscribe or user_nowatchers.
self.content_in = ''
except IOError, e:
# E.g., "IOError: Client read error (Timeout?)"
self.content_in = ''
elapsed = time.time() - time_0
log.debug('setup_areq: %s / err: %s'
% (misc.time_format_scaled(elapsed)[0], str(e),))
# Raising GWIS_Error is the easiest way to stop processing the
# request, but then we'll try to send a response... which should fail.
raise GWIS_Error('IOError on Timeout. Are you there?',
tag=None, logger=log.info)
#
def setup_areq_incoming(self):
#import rpdb2
#rpdb2.start_embedded_debugger('password', fAllowRemote=True)
g.assurt(self.areq.headers_in['Content-Type'].startswith(
'multipart/form-data; boundary='))
# BUG nnnn: This is nuts. I [lb] can't figure out how to process a
# download in parts. So, for now, everything gets loaded in memory.
# Also, I can't figure out how to get req.data in flashclient to
# set its Content-Disposition: form-data; name= to something other
# than the first few chars of the GML.
# FieldStorage calls read(), which exhausts the request of data.
# It also converts the URL components as well as the multipart
# components into dictionary key,value pairs.
#
# I looked and I looked and I looked and this seems like the best
# way to get megabytes of data via a download (i.e., an upload from
# the client). There is, like, no forum chatter about gettings
# downloads via mod_python. Pyhton's library.pdf says to use
# FieldStoarge "if you are expecting megabytes to be uploaded -- in
# that case, use the FieldStorage class instead which is much more
# flexible." I'm not sure that flexible means faster. Not that that
# matters -- networks packets can come out of order. And it's not
# our job to worry about how a download is stored while being
# received. That is, it could be in memory or on disk, and we don't
# care. But it seems to me -- and I could be wrong, but I've not
# read otherwise -- that FieldStorage probably calls the mod_python
# request's read(), which probably probably means the file we just
# downloaded, which apache could have chucked on disk, might now be
# loaded into memory? Or maybe not. Anyway, I think we're left with
# FieldStorage and nothing else. (I'm not sure how to authenticate
# the user before making this FieldStorage object: when the client
# connection is first made, what data can we access to verify the
# user?)
fs = mod_python.util.FieldStorage(self.areq)
# These are all the things from the URL.
g.assurt(fs['body'].value == 'yes')
g.assurt(fs['rqst'].value == 'commit')
# E.g., g.assurt(fs['gwv'].value == '3')
g.assurt(fs['gwv'].value == str(conf.gwis_version))
# Skipping: brid, rev, browid, sessid
# I have no idea what this means. Flex must set it. (The 'Submit
# Query' sounds vaguely Web 1.0 to me... a blast from the 1997 past
g.assurt(fs['Upload'].value == 'Submit Query')
#import rpdb2
#rpdb2.start_embedded_debugger('password', fAllowRemote=True)
# FIXME: Use a filter handler?
# http://www.modpython.org/live/current/doc-html/pyapi-filter.html
# So you can periodically track and save bits of the upload
# (download?) and not have to process it all in memory all at once
# at the end of the HTTP. Anyway, your httpd.conf will probably
# time out...............
log.debug('request.py: Filename: %s' % (fs['Filename'].value,))
# FIXME: BIG BIG BUG BUG: The way we currently handles files sets
# us up for huge DDOS attack vector boo-yeah. Should check creds
# before consuming the whole file, eh buddy?
# MAGIC_NUMBER: 'Filedata' is Flex's name. Or maybe a Web
# standard....
self.file_data_in = fs['Filedata'].value
# I don't know how to name the the GML content, so we look for it.
# startswith.(
# '<data>\n <metadata>\n <changenote/>\n <user name')
# '<data>\n <metadata>\n <changenote/>\n <user name':
# [Field('<data>\n <metadata>\n <changenote/>\n
# <user name', '"username" token="sdfdsfdsfdsfsd"/>\n
# </metadata>\n <items>\n
# <merge_job stack_id="-10" version="0" name="null" deleted="0"
# job_act="upload" job_priority="0" for_group_id="0"
# for_revision="0"/>\n </items>\n</data>')]
for key in fs:
if key.startswith('<data>'):
# This is so strange. The first number of chars are made into
# its name, since we didn't specify a name. So reassemble.
# Oh, baloney. it split on the =. what am i suppose to do??!!
# This is a superhack. Add the equals back into the XML....
# I figure figure how else to split a multipart (in python),
# and I can't figure out how to tell Flash to name to XML
# data portion of the URLRequest... jeepers! At this this
# works, super wonder golden trick.
# FIXME: There's still a problem decoding the xml..........
self.content_in = key + '=' + fs[key]
self.content_in = \
self.content_in.decode('utf-8').encode('ascii', 'replace')
#
#self.content_in = key + fs[key]
log.debug('found data/content_in: %s' % (self.content_in,))
break # Don't allow us to be fooled twice, fool you
else:
log.verbose('request.py: key: %s' % (key,))
g.assurt(key in ('body', 'rqst', 'gwv',
'brid', 'rev', 'browid', 'sessid',
'Upload', 'Filedata', 'Filename',))
# *** Public interface
#
def as_iqb(self, addons=True, username=None, user_group_id=None):
# See if the caller wants the user's viewport or filters or not.
if addons:
viewport = self.viewport
filters = self.filters
else:
viewport = None
filters = None
# Respect anonymous cowards.
if (not username) and (not user_group_id):
username = self.client.username
user_group_id = self.client.user_group_id
else:
g.assurt(username and user_group_id)
# Check the rev.
# Make the query builder based on the request parts.
qb = Item_Query_Builder(self.db,
username,
self.branch.branch_hier,
self.revision.rev,
viewport, filters)
# This is a lotta hacky. Setup the dev. switches.
# Skipping qb.request_is_mobile = self.client.request_is_mobile
qb.request_is_local = self.client.request_is_local
qb.request_is_script = self.client.request_is_script
qb.request_is_secret = self.client.request_is_secret
# This is a littler more hacky.
# NOTE: self.doc_in is None during route analysis.
if self.doc_in is not None:
riat = self.doc_in.find('./metadata').get('request_is_a_test', False)
qb.request_is_a_test = bool(int(riat))
# This one's not too hacky.
qb.user_group_id = user_group_id
#
qb.session_id = self.client.session_id
qb.remote_ip = self.client.remote_ip
qb.remote_host = self.client.remote_host
# Make sure the Item_Manager is available, whether or not we use it
# (and do it before we call finalize_query, which may need it).
qb.item_mgr = Item_Manager()
# Call finalize_query now so we calculate, e.g., only_in_multi_geometry.
Query_Overlord.finalize_query(qb)
# Return the completed query builder.
return qb
#
def process_req(self):
'''
Processes a client request.
This request always succeeds, even if the request cannot really be
processed (that is, if the request cannot be completed for the user,
we return error text instead).
'''
log.verbose('Processing request')
# This server isn't RESTful; we only support GET and POST, not UPDATE or
# DELETE.
g.assurt(self.areq.method in ('GET', 'POST',))
# Developers can enable dump_requests to get local copies of client
# request (but Landon recommends you use a network sniffer instead,
# namely, WireShark).
if conf.dump_requests:
self.dump_request(conf.dump_dir)
# Track memory usage, if requested.
usage_0 = None
if conf.debug_mem_usage:
usage_0 = mem_usage.get_usage_mb()
# Wrap our control logic with an outer try block so we can catch
# exceptions raised by the inner try block exception handler
try:
# Use one try block to try to process the user's request; if anything
# fails, we'll catch it and try to send the user an error message.
try:
# Developers can break into the debugger here if they wish,
# after the client request is parsed but before it's processed
if conf.break_on_gwis_request:
log.debug('Waiting for remote debug client...')
import rpdb2
rpdb2.start_embedded_debugger('password', fAllowRemote=True)
# Open the database connection
log.verbose('Opening database connection')
g.assurt(self.db is None)
self.db = db_glue.new()
# Process the request
self.command_process_req()
# GWIS throws an error if Cyclopath cannot (or will not) complete the
# request (i.e., GML syntax error, or user doesn't have access, etc.)
# This catches both GWIS_Warning and GWIS_Error.
except GWIS_Warning, e:
self.error_handler_gwis(e)
# Python throws an error if we made a programming error; these
# should always be bugs that we must fix (otherwise use GWIS_Error).
except Exception, e:
self.error_handler_exception(e)
# Fall-through from try-block; the exception handlers for the
# previous try-block never re-raise, so execution always makes
# it here.
# Let go of the database lock once outside of the try block
if self.db is not None:
# NOTE: Under normal conditions -- that is, the request succeeded
# -- there's nothing to rollback. However, if processing the
# request threw an exception, here's where we roll it back.
self.db.transaction_rollback()
self.db.close()
self.db = None
# Send the response to the client. At this point, we know that the
# response is a GWIS XML response of some kind, even if it's an
# error, so 200 OK is always the right HTTP response code.
# NOTE This raises if there's an unexpected error (in which case
# we won't be returning 200 OK)
self.command_process_resp()
# Handle unhandled exceptions
except Exception, e:
log.debug('Caught exception "%s" / %s'
% (str(e), traceback.format_exc(),))
# EXPLAIN: Why don't we call apache.log_error like in
# error_handler_exception()?
# Catastrophic failure or IOError; unknown exception;
# dump and re-raise
self.dump_exception(conf.dump_dir)
# If the developer hasn't already asked for the dump, dump it.
# NOTE This means we always dump exceptions on the production
# server, since we always want to investigate these failures.
if not conf.dump_requests:
self.dump_request(conf.dump_dir)
# NOTE Re-raise the failure, which Apache catches, not us!
raise
conf.debug_log_mem_usage(log, usage_0, 'request.process_req')
return
# *** Private interface
# Process the request
def command_process_req(self):
# Prepare the request object
# NOTE The request object called this earlier
#self.areq.add_common_vars()
self.decode_gwis()
# Tell the class object to process the request
self.cmd.doit()
# Prepare the response.
self.content_out = self.cmd.response_xml()
# FIXME: reset self.cmd to None?
self.cmd = None
#
def command_process_resp(self):
Request.areq_send_response(self.areq,
self.content_out,
self.htmlfile_out,
self.sendfile_out,
self.start_time_msec,
self.dump_response)
#
@staticmethod
def areq_send_response(areq,
content_out,
htmlfile_out=None,
sendfile_out=None,
start_time_msec=None,
dump_response_fcn=None):
# Mark the processing time, or how long we took to handle the request.
# FIXME: Are we sending ptime to the client? (It's in the Apache req.)
elapsed = time.time() - start_time_msec
areq.subprocess_env['ptime'] = ('%.3g' % (elapsed,))
# Setup the response.
if htmlfile_out is not None:
areq.content_type = 'text/html'
areq.set_content_length(len(htmlfile_out))
try:
log.verbose('areq_send_response: sending html...')
areq.write(htmlfile_out)
time_elapsed = misc.time_format_elapsed(start_time_msec)
log.info('areq_send_response: sent %d html bytes in %s'
% (len(htmlfile_out), time_elapsed,))
except IOError, e:
Request.raise_on_non_remote_problem(e, callee='html')
elif sendfile_out is not None:
# See modpython.org/live/current/doc-html/pyapi-mprequest-meth.html
#
# This fcn. obviously blocks.
try:
# If we run into problem, we could trying implementing a download
# resume feature: sendfile(path, offset, len)
bytes_sent = areq.sendfile(sendfile_out)
log.debug('bytes_sent: %d' % (bytes_sent,))
except IOError, e:
Request.raise_on_non_remote_problem(e, callee='file')
else:
g.assurt(content_out)
areq.content_type = 'text/xml'
# Set the length of the response.
areq.set_content_length(len(content_out))
# We use a try/except block to send the request. It's okay if this
# fails because the client disconnected, but if it fails for any other
# reason, re-raise.
try:
# Send the response to the client.
log.verbose('areq_send_response: sending...')
areq.write(content_out)
time_elapsed = misc.time_format_elapsed(start_time_msec)
log.info('areq_send_response: sent %d gwis bytes in %s'
% (len(content_out), time_elapsed,))
# See if the developer wants a local copy of the response.
# (Again, Landon suggests using WireShark to sniff the network,
# which is quicker and more powerful.)
if conf.dump_responses and (dump_response_fcn is not None):
dump_response_fcn(conf.dump_dir)
except IOError, e:
Request.raise_on_non_remote_problem(e, callee='xml')
#
@staticmethod
def raise_on_non_remote_problem(io_err, callee):
# If the error is the client simply going away, we want to ignore
# that. But re-raise other IOError's. See bug 1479.
# DeprecationWarning: BaseException.message deprecated as of Python 2.6
# Not needed: io_err.message
if ((str(io_err).find('Write failed, client closed connection.') == 0)
or (str(io_err).find('Client read error (Timeout?)') == 0)):
log.info('Ignoring IOError [%s]: %s' % (callee, io_err,))
else:
log.error('Unexpected IOError: %s' % (io_err,))
raise
#
def decode_gwis(self):
'''
Decode the GWIS parameters of the request.
* Decode the Request-URI query string (the part of the URL after '?').
We place keyword-value pairs (KVPs) in self.gwis_kvp_in as a dictionary
with lowercase keys (similar to WFS spec sec. 13). GWIS does not
support multiple occurences of the same keyword.
* Parse any XML input and place the etree object at req.doc_in.
* Verify the GWIS 'version' of the request.
* Create a command object based on the type of request.
* Set up the request helpers based on the incoming request.
'''
# FIXME: Check the mobile version.
# Decode KVPs (from URL query string).
if self.areq.args is not None:
kvp = mod_python.util.parse_qs(self.areq.args)
for (k, v) in kvp.iteritems():
self.gwis_kvp_in[k.lower()] = v[0]
# Parse incoming GML (from HTTP POST content part).
# BUG 2725: Mobile sometimes sends a WFS_Log (GWIS_Log) with nonzero
# Content-Length but no content.
# FIXME: This doesn't fix the mobile problem, it just catches it here;
# we still need to fix the problem in the android code.
try:
found_errs = []
raise_error = ''
content_len_hdr = int(self.areq.headers_in['Content-Length'])
content_len_req = int(self.areq.read_length)
if content_len_hdr != content_len_req:
found_errs.append('content_len_hdr != content_len_req')
if (content_len_hdr == 0) or (content_len_req == 0):
found_errs.append('content_len_* is zero')
# FIXME: Should we raise?? Or are there XML-less commands?
if len(self.content_in) == 0:
found_errs.append('len(self.content_in) is zero')
# FIXME: Should we raise?? Or are there XML-less commands?
# Because we drop unicode control characters, the length of the
# decoded string might be less than the input string's length.
# See: http://bugs.cyclopath.org/show_bug.cgi?id=2826
if len(self.content_in) > content_len_hdr:
found_errs.append('content_len_hdr > len(self.content_in)')
raise_error = 'Unexpected XML content length: so large'
if found_errs:
# 2014.02.04: Investigate this now that the length code above is
# different.
log_f = log.error
if False:
# 2014.09.23: Happened twice at 18:03:05, but says android=true.
# Do we need to cast the_request to str?
# Misses some: if '&android=true' in self.areq.the_request:
if '&android=true' in str(self.areq.the_request):
log_f = log.info
else:
log_f = log.error
# SYNC_ME: Search: logcheck.
log_f('decode_gwis: found errors: %s' % (' / '.join(found_errs),))
log_f('decode_gwis: raw req: %s' % (self.raw_content,))
log_f('decode_gwis: decoded: %s' % (self.content_in,))
log_f('decode_gwis: req_ln: %s / hdr_ln: %s / req: %s / hdr: %s%s'
% (content_len_req,
content_len_hdr,
self.areq.the_request,
self.areq.headers_in,
'' if not raise_error else ' / raising',))
# FIXME: Does Android display an error when this happens to WFS_Log,
# or does it silently fail?
if raise_error:
raise GWIS_Error(raise_error)
except KeyError:
# Requests from Flashclient usually specify Content-Length. But other
# commands, like a ping from mon.it.or.us, don't specify it.
g.assurt(not self.content_in)
pass
if self.content_in:
try:
# BUG 2825: Mobile doesn't encode ampersands. E.g.:
# b'<data><metadata><device is_mobile="True" /></metadata><addrs>
# <addr addr_line="Gateway Fountain" />
# <addr addr_line="W 50th St & S Dupont Ave" /></addrs></data>'
# XMLSyntaxError(u'xmlParseEntityRef: no name, line 1, column 89',)
# BUG 2825 (part 2): Mobile Geocoding fails, presumably because the
# addr they send us does not match the one we return (we'd have to
# unencode the & in our response to match the original query).
# Bug 2825 - Mobile does not encode to/from addresses in GWIS
# http://bugs.cyclopath.org/show_bug.cgi?id=2825
# The XML is not URL encoded, so, e.g.,
# >>> from lxml import etree
# >>> test='<addr addr_line="Central & university"/>'
# >>> etree.fromstring(test)
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# ...
# lxml.etree.XMLSyntaxError: xmlParseEntityRef: ...
# >>> test='<addr addr_line="Central & university"/>'
# >>> etree.fromstring(test)
# <Element data at 0xbcb280>
# >>>
try:
self.doc_in = etree.fromstring(self.content_in)
except etree.XMLSyntaxError, e:
log.error('mobile bug not encoding: "%s"' % (self.content_in,))
# MAYBE: [lb] tried a work-around but this is only half of it:
# In the response, we want to use the original content_in
# but I didn't add the second part of the work-around: at
# this point it seems wiser to just fix the android app.
# Nonetheless, here's the work-around code, which we use
# to not work-around the bug but to tailor our error msg.
encode_re = re.compile(r'&([^a-zA-Z]+[^;])')
try_again = re.sub(encode_re, r'&\1', self.content_in)
self.doc_in = etree.fromstring(try_again)
if try_again != self.content_in:
# BUG nnnn: This is lame; fix android.
raise GWIS_Error('%s%s'
% ('There was a problem -- which is our fault. You might',
' have better luck trying "and" instead of "&".',))
else:
log.error(
'decode_gwis: %s / cin: %s / req: %s / hdr: %s'
% ('not the mobile bug after all?',
self.content_in,
self.areq.the_request,
self.areq.headers_in,))
# MAYBE: Really send the Exception text to the client?
# [lb]'s two concerns: 1.) revealing sensitive data
# 2.) confusing users with code
raise GWIS_Error('Error parsing XML: %s' % (e,))
except Exception, e:
# MAYBE/BUG nnnn: Android Bug nnnn on user_hello?
# Sep-19 13:44:16 ERRR gwis.request # Error parsing XML:
# "EntityRef: expecting ';', line 1, column 45" / Traceback:
# File "/export/scratch/ccp/dev/cycloplan_live/pyserver/gwis/
# request.py", line 571, in decode_gwis
# self.doc_in = etree.fromstring(self.content_in)
# File "lxml.etree.pyx", line 2532, in
# lxml.etree.fromstring (src/lxml/lxml.etree.c:48270)
# File "parser.pxi", line 1545, in
# lxml.etree._parseMemoryDocument (src/lxml/lxml.etree.c:71812)
# File "parser.pxi", line 1424, in
# lxml.etree._parseDoc (src/lxml/lxml.etree.c:70673)
# File "parser.pxi", line 938, in
# lxml.etree._BaseParser._parseDoc (src/lxml/lxml.etree.c:67442)
# File "parser.pxi", line 539, in
# lxml.etree._ParserContext._handleParseResultDoc
# (src/lxml/lxml.etree.c:63824)
# File "parser.pxi", line 625, in
# lxml.etree._handleParseResult (src/lxml/lxml.etree.c:64745)
# File "parser.pxi", line 565, in
# lxml.etree._raiseParseError (src/lxml/lxml.etree.c:64088)
# XMLSyntaxError: EntityRef: expecting ';', line 1, column 45
#
# 70.197.196.92 - - [19/Sep/2013:13:44:16 -0500]
# "POST /gwis?rqst=user_hello&browid=...&sessid=...&android=true
# HTTP/1.1" 200 103 "-" "Dalvik/1.6.0 (Linux; U; Android 4.2.2;
# Galaxy Nexus Build/JDQ39)" 0.0113
# NOTE: logcheck filters all log statements from this file because
# some of them -- like this one -- might reveal sensitive
# user information. But by raising GWIS_Error, logcheck will
# see that message, we'll get an email, and then it's up to
# a DEV to poke around the log files.
#
log.error(
'decode_gwis: cin: %s / rln: %s / cln: %s / req: %s / hdr: %s'
% (self.content_in,
self.areq.read_length,
content_len_hdr, # self.areq.headers_in['Content-Length'],
self.areq.the_request,
self.areq.headers_in,))
log.error('Error parsing XML: "%s" / %s'
% (e, traceback.format_exc(),))
# MAYBE: Really send the Exception text to the client?
# [lb]'s two concerns: 1.) revealing sensitive data
# 2.) confusing users with code
raise GWIS_Error('Error parsing XML: %s' % (e,))
# end: if self.content_in
# Verify GWIS version of request.
self.verify_request_version()
# Instantiate a class from gwis.req.* to handle this request.
try:
# This throws a GWIS_Error if the request type is unknown.
try:
req_class = self.decode_key('rqst')
except GWIS_Error, e:
# 'request' is an alias for 'rqst'.
req_class = self.decode_key('request')
self.cmd = cmd_factory.get_command(req_class, self)
#log.debug('Got cmd: %s' % self.cmd)
except KeyError:
raise GWIS_Error('GWIS request type not specified.')
# Configure the request helpers.
self.parts.decode_gwis()
#
def decode_key(self, key, *args, **kwargs):
'''Return the value of the request parameter key. If required is true
and the key isn't specified, raise GWIS_Error.'''
# NOTE: Ignoring kwargs (included for completeness).
try:
val = self.gwis_kvp_in[key]
except KeyError:
# If the callee specifies a second positional argument, it's the
# default value to use. If the callee does not specify the second
# argument, this is a required parameter.
if len(args) == 1:
val = args[0]
else:
g.assurt(len(args) == 0)
# From /ccp/var/log/apache2/access.log:
# 54207 123.456.789.10 - - [23/Dec/2013:17:27:50 -0600]
# "GET /gwis HTTP/1.1" 200 96 "-" "Dalvik/1.6.0 (Linux; U;
# Android 4.0.3; HTC PH39100 Build/IML74K)" 0.00485
# 118375 123.456.789.10 - - [02/Feb/2014:15:27:51 -0600]
# "GET /gwis HTTP/1.1" 200 81 "-" "Dalvik/1.6.0 (Linux; U;
# Android 4.1.1; HTC One X Build/JRO03C)" 0.00537
#
# So... a GET request from a phone?
# See also setup_areq, which sees a similar problem, but the GET is
# at least filled in...
log_msg = ('Missing param key: %s / req: %s'
% (key, self.areq.the_request,))
if key in ('rqst', 'request',):
# This happens when no params are specified, so don't complain.
logger_f = log.info
else:
logger_f = log.warning
logger_f(log_msg)
# From logcheck, you'll see
# ... GWIS_Exception caught:
# Request param "rqst" required but not specified..
# ... GWIS_Exception caught:
# Request param "request" required but not specified..
if self.areq.method == 'POST':
raise GWIS_Error('Request param "%s" required but not specified'
% (key,), logger=logger_f)
else:
# 2014.09.07: This is that mobile BUG nnnn:
# E.g., from access.log:
# 119406 NN.NN.NN.NN - - [07/Sep/2014:10:26:25 -0500]
# "GET /gwis HTTP/1.1" 200 81 "-" "Dalvik/1.6.0 (Linux; U;
# Android 4.0.4; SAMSUNG-SGH-I777 Build/IMM76D)" 0.00534
# except when [lb] tries to reproduce it, like,
# wget http://cycloplan.cyclopath.org/gwis
# I just get <gwis_error msg="Malformed request. Sorry!"/>.
# 132292 lb.lb.lb.lb - - [07/Sep/2014:12:25:12 -0500]
# "GET /gwis HTTP/1.1" 200 45 "-" "Wget/1.14 (linux-gnu)" -
# So, yeah, hrmm...
#
g.assurt_soft(self.areq.method == 'GET')
raise GWIS_Error('Please try a POST request, not "%s"'
% (self.areq.method,), logger=logger_f)
return val
#
def decode_key_bool(self, key_name):
attr_value = self.decode_key(key_name, False)
try:
attr_value = bool(int(attr_value))
except ValueError, e:
# E.g., ValueError: invalid literal for int() with base 10: 'false'.
try:
attr_value = attr_value.lower()
attr_value = ((attr_value == 'true') or (attr_value == 't'))
except AttributeError:
# Not an integer or string.
attr_value = False
return attr_value
#
def error_handler_exception(self, ex):
# We wrap exceptions here because the flashclient uses a single
# io/error event for any status code that's not 200 and we lose the
# error information to present to the user. We do _not_ re-raise.
apache.log_error(
'Unhandled exception; see $dump_dir/dump.EXCEPT for details: '
+ misc.exception_format(ex), apache.APLOG_ERR)
self.dump_exception(conf.dump_dir)
if (not conf.dump_requests):
self.dump_request(conf.dump_dir)
# 2012.05.10: For whatever reason, I wasn't seeing errors in the log
# until I added this log trace. So weird....
log.error('Unhandled exception: %s / %s'
% (misc.exception_format(ex),
traceback.format_exc(),))
# This message is shown to the user, so make it moderately friendly.
# NOTE: We use an Exception object, but we don't raise it.
self.content_out = GWIS_Error(
'We apologize for the inconvenience, but Cyclopath had a problem. '
+ 'This is not your fault. '
+ 'Our developers have been notified of the problem. '
+ ('If you have any questions, please email %s.'
% (conf.mail_from_addr,))
).as_xml()
#
def error_handler_gwis(self, gwis_err):
self.content_out = gwis_err.as_xml()
# Check request version (only on non-GetCapabilities)
def verify_request_version(self):
versions_supported = [conf.gwis_version,]
for syn in ('v', 'gwv', 'gwis_version'):
gwis_version = self.decode_key(syn, '')
if gwis_version:
break
if not gwis_version:
#log.warning('Client did not specify gwis_version.')
# 2014.09.09: This happens too often... and it's low priority.
# EXPLAIN/BUG nnnn: What client is not sending gwis version?
#log.info('Client did not specify gwis_version.')
log.debug('Client did not specify gwis_version.')
gwis_version = conf.gwis_version
elif (not gwis_version in versions_supported):
raise GWIS_Warning(
'Invalid GWIS version %s; this server only knows %s.'
% (gwis_version, str(versions_supported), ))
# *** Developer interface
# Developer string dump helpers
#
def dump_names(self, dump_dir, kind):
# obscure IP address in filename
h = hashlib.md5(self.areq.get_remote_host()).hexdigest()[:5]
# WARNING: The details of this format are used by cron jobs to detect
# and report crashes. Do not change unless you know what you are doing!
return (('%s/dump.%s_%s_%s' % (dump_dir, self.start_time_str, h, kind)),
('%s/dump.%s' % (dump_dir, kind)))
#
def dump_exception(self, dump_dir):
'Dumps the pending exception.'
for filename in self.dump_names(dump_dir, 'EXCEPT'):
traceback.print_exc(file=open(filename, 'w'))
#
def dump_headers(self, fp, headers):
hs = headers.items()
hs.sort()
for h in hs:
fp.write('%s: %s\n' % (h[0], h[1]))
#
def dump_request(self, dump_dir):
for filename in self.dump_names(dump_dir, 'REQUEST'):
fp = open(filename, 'w')
fp.write('*** Request from %s\n' % (self.areq.get_remote_host()))
fp.write('*** %s\n' % (self.areq.the_request))
fp.write('\n')
self.dump_headers(fp, self.areq.headers_in)
fp.write('\n')
fp.write(self.content_in)
#
def dump_response(self, dump_dir):
for filename in self.dump_names(dump_dir, 'RESPONSE'):
fp = open(filename, 'w')
fp.write('*** Response to %s\n' % (self.areq.get_remote_host()))
fp.write('*** %s\n' % (self.areq.status_line))
fp.write('\n')
self.dump_headers(fp, self.areq.headers_out)
fp.write('\n')
fp.write(self.content_out)
#
# Logging shortcuts -- this stuff goes to Apache error log.
#
def p_notice(self, message):
self.areq.log_error(message, apache.APLOG_NOTICE)
def p_warning(self, message):
self.areq.log_error(message, apache.APLOG_WARNING)
def p_error(self, message):
self.areq.log_error(message, apache.APLOG_ERR)
|
lbouma/Cyclopath
|
pyserver/gwis/request.py
|
Python
|
apache-2.0
| 44,702
|
[
"BLAST",
"Galaxy"
] |
782b12439ad2da29dde19a252dfc7e9569953a1bb583c44cfe2804fa65cb8818
|
# -*- coding: utf-8 -*-
#
# one-neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import nest.voltage_trace
nest.ResetKernel()
neuron = nest.Create("iaf_neuron")
nest.SetStatus(neuron, "I_e", 376.0)
voltmeter = nest.Create("voltmeter")
nest.SetStatus(voltmeter, {"withgid": True, "withtime": True})
nest.Connect(voltmeter, neuron)
nest.Simulate(1000.0)
nest.voltage_trace.from_device(voltmeter)
|
gewaltig/cython-neuron
|
pynest/examples/one-neuron.py
|
Python
|
gpl-2.0
| 1,060
|
[
"NEURON"
] |
491a6006998a1ef5909bddf8d647a6d36793205298932301a16859ff0c0242a8
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from PyQt5.QtWidgets import QWidget, QTextBrowser
from peacock.utils import WidgetUtils
from peacock.utils import ExeLauncher, TerminalUtils
from PyQt5.QtCore import pyqtSignal
from peacock.base.MooseWidget import MooseWidget
import os
class CheckInputWidget(QWidget, MooseWidget):
"""
Runs the executable with "--check-input" on the input file and stores the results.
Signals:
needInputFile: Emitted when we need the input file. Argument is the path where the input file will be written.
"""
needInputFile = pyqtSignal(str)
def __init__(self, **kwds):
super(CheckInputWidget, self).__init__(**kwds)
self.input_file = "peacock_check_input.i"
self.top_layout = WidgetUtils.addLayout(vertical=True)
self.setLayout(self.top_layout)
self.output = QTextBrowser(self)
self.output.setStyleSheet("QTextBrowser { background: black; color: white; }")
self.output.setReadOnly(True)
self.top_layout.addWidget(self.output)
self.button_layout = WidgetUtils.addLayout()
self.top_layout.addLayout(self.button_layout)
self.hide_button = WidgetUtils.addButton(self.button_layout, self, "Hide", lambda: self.hide())
self.check_button = WidgetUtils.addButton(self.button_layout, self, "Check", self._check)
self.resize(800, 500)
self.setup()
self.path = None
def cleanup(self):
try:
os.remove(self.input_file)
except:
pass
def check(self, path):
"""
Runs the executable with "--check-input" and adds the output to the window
Input:
path[str]: Path to the executable
"""
self.path = path
self._check()
def _check(self):
"""
Runs the executable with "--check-input" and adds the output to the window
"""
input_file = os.path.abspath(self.input_file)
self.needInputFile.emit(input_file)
self.output.clear()
try:
args = ["-i", input_file, "--check-input"]
output = ExeLauncher.runExe(self.path, args, print_errors=False)
output_html = TerminalUtils.terminalOutputToHtml(output)
self.output.setHtml("<pre>%s</pre>" % output_html)
except Exception as e:
output_html = TerminalUtils.terminalOutputToHtml(str(e))
self.output.setHtml("<pre>%s</pre>" % output_html)
self.cleanup()
|
harterj/moose
|
python/peacock/Input/CheckInputWidget.py
|
Python
|
lgpl-2.1
| 2,774
|
[
"MOOSE"
] |
774c2092cd6960190dee626813e51b75ba0fb4d69f9bd4ccf2d4b559742cb50e
|
''' Create rst documentaiton of the examples directory.
This uses screenshots in the screenshots_dir
(currently doc/sources/images/examples) along with source code and files
in the examples/ directory to create rst files in the generation_dir
(doc/sources/examples) gallery.rst, index.rst, and gen__*.rst
'''
import os
import re
from os.path import join as slash # just like that name better
from kivy.logger import Logger
import textwrap
base_dir = '..' # from here to the kivy top
examples_dir = slash(base_dir, 'examples')
screenshots_dir = slash(base_dir, 'doc/sources/images/examples')
generation_dir = slash(base_dir, 'doc/sources/examples')
image_dir = "../images/examples/" # relative to generation_dir
gallery_filename = slash(generation_dir, 'gallery.rst')
# Info is a dict built up from
# straight filename information, more from reading the docstring,
# and more from parsing the description text. Errors are often
# shown by setting the key 'error' with the value being the error message.
#
# It doesn't quite meet the requirements for a class, but is a vocabulary
# word in this module.
def iter_filename_info(dir_name):
"""
Yield info (dict) of each matching screenshot found walking the
directory dir_name. A matching screenshot uses double underscores to
separate fields, i.e., path__to__filename__py.png as the screenshot for
examples/path/to/filename.py.
Files not ending with .png are ignored, others are either parsed or
yield an error.
Info fields 'dunder', 'dir', 'file', 'ext', 'source' if not 'error'
"""
pattern = re.compile(r'^((.+)__(.+)__([^-]+))\.png')
for t in os.walk(dir_name):
for filename in t[2]:
if filename.endswith('.png'):
m = pattern.match(filename)
if m is None:
yield {'error': 'png filename not following screenshot'
' pattern: {}'.format(filename)}
else:
d = m.group(2).replace('__', os.path.sep)
yield {'dunder': m.group(1),
'dir': d,
'file': m.group(3),
'ext': m.group(4),
'source': slash(d, m.group(3) + '.' + m.group(4))
}
def parse_docstring_info(text):
''' parse docstring from text (normal string with '\n's) and return an info
dict. A docstring should the first triple quoted string, have a title
followed by a line of equal signs, and then a description at
least one sentence long.
fields are 'docstring', 'title', and 'first_sentence' if not 'error'
'first_sentence' is a single line without newlines.
'''
q = '\"\"\"|\'\'\''
p = r'({})\s+([^\n]+)\s+\=+\s+(.*?)(\1)'.format(q)
m = re.search(p, text, re.S)
if m:
comment = m.group(3).replace('\n', ' ')
first_sentence = comment[:comment.find('.') + 1]
return {'docstring': m.group(0), 'title': m.group(2),
'description': m.group(3), 'first_sentence': first_sentence}
else:
return {'error': 'Did not find docstring with title at top of file.'}
def iter_docstring_info(dir_name):
''' Iterate over screenshots in directory, yield info from the file
name and initial parse of the docstring. Errors are logged, but
files with errors are skipped.
'''
for file_info in iter_filename_info(dir_name):
if 'error' in file_info:
Logger.error(file_info['error'])
continue
source = slash(examples_dir, file_info['dir'],
file_info['file'] + '.' + file_info['ext'])
if not os.path.exists(source):
Logger.error('Screen shot references source code that does '
'not exist: %s', source)
continue
with open(source) as f:
text = f.read()
docstring_info = parse_docstring_info(text)
if 'error' in docstring_info:
Logger.error(docstring_info['error'] + ' File: ' + source)
continue # don't want to show ugly entries
else:
file_info.update(docstring_info)
yield file_info
def enhance_info_description(info, line_length=50):
''' Using the info['description'], add fields to info.
info['files'] is the source filename and any filenames referenced by the
magic words in the description, e.g., 'the file xxx.py' or
'The image this.png'. These are as written in the description, do
not allow ../dir notation, and are relative to the source directory.
info['enhanced_description'] is the description, as an array of
paragraphs where each paragraph is an array of lines wrapped to width
line_length. This enchanced description include the rst links to
the files of info['files'].
'''
# make text a set of long lines, one per paragraph.
paragraphs = info['description'].split('\n\n')
lines = [paragraph.replace('\n', ' ') for paragraph in paragraphs]
text = '\n'.join(lines)
info['files'] = [info['file'] + '.' + info['ext']]
regex = r'[tT]he (?:file|image) ([\w\/]+\.\w+)'
for name in re.findall(regex, text):
if name not in info['files']:
info['files'].append(name)
# add links where the files are referenced
text = re.sub(r'([tT]he (?:file|image) )([\w\/]+\.\w+)', r'\1`\2`_', text)
# now break up text into array of paragraphs, each an array of lines.
lines = text.split('\n')
paragraphs = [textwrap.wrap(line, line_length) for line in lines]
info['enhanced_description'] = paragraphs
def get_infos(dir_name):
''' return infos, an array info dicts for each matching screenshot in the
dir, sorted by source file name, and adding the field 'num' as he unique
order in this array of dicts'.
'''
infos = [i for i in iter_docstring_info(dir_name)]
infos.sort(key=lambda x: x['source'])
for num, info in enumerate(infos):
info['num'] = num
enhance_info_description(info)
return infos
def make_gallery_page(infos):
''' return string of the rst (Restructured Text) of the gallery page,
showing information on all screenshots found.
'''
def a(s=''):
''' append formatted s to output, which will be joined into lines '''
output.append(s.format(**info))
def t(left='', right=''):
''' append left and right format strings into a table line. '''
l = left.format(**info)
r = right.format(**info)
if len(l) > width1 or len(r) > width2:
Logger.error('items to wide for generated table: "%s" and "%s"',
l,r)
return
output.append('| {0:{w1}} | {1:{w2}} |'
.format(l, r, w1=width1, w2=width2))
gallery_top = '''
Gallery
-------
.. _Tutorials: ../tutorials-index.html
.. container:: title
This gallery lets you explore the many examples included with Kivy.
Click on any screenshot to see the code.
This gallery contains:
* Examples from the examples/ directory that show specific capabilities of
different libraries and features of Kivy.
* Demonstrations from the examples/demos/ directory that explore many of
Kivy's abilities.
There are more Kivy programs elsewhere:
* Tutorials_ walks through the development of complete Kivy applications.
* Unit tests found in the source code under the subdirectory kivy/tests/
can also be useful.
We hope your journey into learning Kivy is exciting and fun!
'''
output = [gallery_top]
for info in infos:
a("\n.. |link{num}| replace:: :doc:`{source}<gen__{dunder}>`")
a("\n.. |pic{num}| image:: ../images/examples/{dunder}.png"
"\n :width: 216pt"
"\n :align: middle"
"\n :target: gen__{dunder}.html")
a("\n.. |title{num}| replace:: **{title}**")
# write the table
width1, width2 = 20, 50 # not including two end spaces
head = '+-' + '-' * width1 + '-+-' + '-' * width2 + '-+'
a()
a(head)
for info in infos:
t('| |pic{num}|', '| |title{num}|')
t('| |link{num}|', '')
paragraphs = info['description'].split("\n\n")
for p in paragraphs:
for line in textwrap.wrap(p, width2):
t('', line)
t() # line between paragraphs
t()
a(head)
return "\n".join(output) + "\n"
def make_detail_page(info):
''' return str of the rst text for the detail page of the file in info. '''
def a(s=''):
''' append formatted s to output, which will be joined into lines '''
output.append(s.format(**info))
output = []
a('{title}')
a('=' * len(info['title']))
a('\n.. |pic{num}| image:: /images/examples/{dunder}.png'
'\n :width: 50%'
'\n :align: middle')
a('\n|pic{num}|')
a()
for paragraph in info['enhanced_description']:
for line in paragraph:
a(line)
a()
# include images
last_lang = '.py'
for fname in info['files']:
full_name = slash(info['dir'], fname)
ext = re.search(r'\.\w+$', fname).group(0)
a('\n.. _`' + fname + '`:')
if ext in ['.png', '.jpg', '.jpeg']:
title = 'Image **' + full_name + '**'
a('\n' + title)
a('~' * len(title))
a('\n.. image:: ../../../examples/' + full_name)
a(' :align: center')
else: # code
title = 'File **' + full_name + '**'
a('\n' + title)
a('~' * len(title))
if ext != last_lang and ext != '.txt':
a('\n.. highlight:: ' + ext[1:])
a(' :linenothreshold: 3')
last_lang = ext
a('\n.. include:: ../../../examples/' + full_name)
a(' :code:')
return '\n'.join(output) + '\n'
def write_file(name, s):
''' write the string to the filename '''
with open(name, 'w') as f:
f.write(s)
def make_index(infos):
''' return string of the rst for the gallary's index.rst file. '''
start_string = '''
Gallery of Examples
===================
.. toctree::
:maxdepth: 1
gallery'''
output = [start_string]
for info in infos:
output.append(' gen__{}'.format(info['dunder']))
return '\n'.join(output) + '\n'
def write_all_rst_pages():
''' Do the main task of writing the gallery, detail, and index rst pages '''
infos = get_infos(screenshots_dir)
s = make_gallery_page(infos)
write_file(gallery_filename, s)
for info in infos:
s = make_detail_page(info)
detail_name = slash(generation_dir,
'gen__{}.rst'.format(info['dunder']))
write_file(detail_name, s)
s = make_index(infos)
index_name = slash(generation_dir, 'index.rst')
write_file(index_name, s)
Logger.info('gallery.py: Created gallery rst documentation pages.')
if __name__ == '__main__':
write_all_rst_pages()
|
arlowhite/kivy
|
doc/gallery.py
|
Python
|
mit
| 11,144
|
[
"exciting"
] |
969b0f25590ed44e86bb20f4e2084501ab286bd24692fb6f59e20c588d685db7
|
# Copyright (C) 2020 Atsushi Togo
# All rights reserved.
#
# This file is part of phono3py.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import numpy as np
import h5py
from phonopy.file_IO import (write_force_constants_to_hdf5,
check_force_constants_indices,
get_cell_from_disp_yaml)
from phonopy.cui.load_helper import read_force_constants_from_hdf5
def write_cell_yaml(w, supercell):
w.write("lattice:\n")
for axis in supercell.get_cell():
w.write("- [ %20.15f,%20.15f,%20.15f ]\n" % tuple(axis))
symbols = supercell.get_chemical_symbols()
positions = supercell.get_scaled_positions()
w.write("atoms:\n")
for i, (s, v) in enumerate(zip(symbols, positions)):
w.write("- symbol: %-2s # %d\n" % (s, i+1))
w.write(" position: [ %18.14f,%18.14f,%18.14f ]\n" % tuple(v))
def write_disp_fc3_yaml(dataset, supercell, filename='disp_fc3.yaml'):
w = open(filename, 'w')
w.write("natom: %d\n" % dataset['natom'])
num_first = len(dataset['first_atoms'])
w.write("num_first_displacements: %d\n" % num_first)
if 'cutoff_distance' in dataset:
w.write("cutoff_distance: %f\n" % dataset['cutoff_distance'])
num_second = 0
num_disp_files = 0
for d1 in dataset['first_atoms']:
num_disp_files += 1
num_second += len(d1['second_atoms'])
for d2 in d1['second_atoms']:
if 'included' in d2:
if d2['included']:
num_disp_files += 1
else:
num_disp_files += 1
w.write("num_second_displacements: %d\n" % num_second)
w.write("num_displacements_created: %d\n" % num_disp_files)
if 'duplicates' in dataset:
w.write("duplicates:\n")
for (i, j) in dataset['duplicates']:
w.write("- [ %d, %d ]\n" % (i, j))
w.write("first_atoms:\n")
count1 = 0
count2 = len(dataset['first_atoms'])
for disp1 in dataset['first_atoms']:
count1 += 1
disp_cart1 = disp1['displacement']
w.write("- number: %5d\n" % (disp1['number'] + 1))
w.write(" displacement:\n")
w.write(" [%20.16f,%20.16f,%20.16f ] # %05d\n" %
(disp_cart1[0], disp_cart1[1], disp_cart1[2], count1))
w.write(" displacement_id: %d\n" % count1)
w.write(" second_atoms:\n")
included = None
atom2_list = np.array([disp2['number']
for disp2 in disp1['second_atoms']], dtype=int)
_, indices = np.unique(atom2_list, return_index=True)
for atom2 in atom2_list[indices]:
disp2_list = []
for disp2 in disp1['second_atoms']:
if disp2['number'] == atom2:
disp2_list.append(disp2)
disp2 = disp2_list[0]
atom2 = disp2['number']
if 'included' in disp2:
included = disp2['included']
pair_distance = disp2['pair_distance']
w.write(" - number: %5d\n" % (atom2 + 1))
w.write(" distance: %f\n" % pair_distance)
if included is not None:
if included:
w.write(" included: %s\n" % "true")
else:
w.write(" included: %s\n" % "false")
w.write(" displacements:\n")
for disp2 in disp2_list:
count2 += 1
# Assert all disp2s belonging to same atom2 appear straight.
assert disp2['id'] == count2
disp_cart2 = disp2['displacement']
w.write(" - [%20.16f,%20.16f,%20.16f ] # %05d\n" %
(disp_cart2[0], disp_cart2[1], disp_cart2[2],
count2))
ids = ["%d" % disp2['id'] for disp2 in disp2_list]
w.write(" displacement_ids: [ %s ]\n" % ', '.join(ids))
write_cell_yaml(w, supercell)
w.close()
return num_first + num_second, num_disp_files
def write_disp_fc2_yaml(dataset, supercell, filename='disp_fc2.yaml'):
w = open(filename, 'w')
w.write("natom: %d\n" % dataset['natom'])
num_first = len(dataset['first_atoms'])
w.write("num_first_displacements: %d\n" % num_first)
w.write("first_atoms:\n")
for i, disp1 in enumerate(dataset['first_atoms']):
disp_cart1 = disp1['displacement']
w.write("- number: %5d\n" % (disp1['number'] + 1))
w.write(" displacement:\n")
w.write(" [%20.16f,%20.16f,%20.16f ] # %05d\n" %
(disp_cart1[0], disp_cart1[1], disp_cart1[2], i + 1))
if supercell is not None:
write_cell_yaml(w, supercell)
w.close()
return num_first
def write_FORCES_FC2(disp_dataset,
forces_fc2=None,
fp=None,
filename="FORCES_FC2"):
if fp is None:
w = open(filename, 'w')
else:
w = fp
for i, disp1 in enumerate(disp_dataset['first_atoms']):
w.write("# File: %-5d\n" % (i + 1))
w.write("# %-5d " % (disp1['number'] + 1))
w.write("%20.16f %20.16f %20.16f\n" % tuple(disp1['displacement']))
if 'forces' in disp1 and forces_fc2 is None:
force_set = disp1['forces']
else:
force_set = forces_fc2[i]
for forces in force_set:
w.write("%15.10f %15.10f %15.10f\n" % tuple(forces))
def write_FORCES_FC3(disp_dataset,
forces_fc3=None,
fp=None,
filename="FORCES_FC3"):
if fp is None:
w = open(filename, 'w')
else:
w = fp
natom = disp_dataset['natom']
num_disp1 = len(disp_dataset['first_atoms'])
count = num_disp1
file_count = num_disp1
write_FORCES_FC2(disp_dataset, forces_fc2=forces_fc3, fp=w)
for i, disp1 in enumerate(disp_dataset['first_atoms']):
atom1 = disp1['number']
for disp2 in disp1['second_atoms']:
atom2 = disp2['number']
w.write("# File: %-5d\n" % (count + 1))
w.write("# %-5d " % (atom1 + 1))
w.write("%20.16f %20.16f %20.16f\n" % tuple(disp1['displacement']))
w.write("# %-5d " % (atom2 + 1))
w.write("%20.16f %20.16f %20.16f\n" % tuple(disp2['displacement']))
# For supercell calculation reduction
included = True
if 'included' in disp2:
included = disp2['included']
if included:
if 'forces' in disp2 and forces_fc3 is None:
force_set = disp2['forces']
else:
force_set = forces_fc3[file_count]
for force in force_set:
w.write("%15.10f %15.10f %15.10f\n" % tuple(force))
file_count += 1
else:
# for forces in forces_fc3[i]:
# w.write("%15.10f %15.10f %15.10f\n" % (tuple(forces)))
for j in range(natom):
w.write("%15.10f %15.10f %15.10f\n" % (0, 0, 0))
count += 1
def write_fc3_dat(force_constants_third, filename='fc3.dat'):
w = open(filename, 'w')
for i in range(force_constants_third.shape[0]):
for j in range(force_constants_third.shape[1]):
for k in range(force_constants_third.shape[2]):
tensor3 = force_constants_third[i, j, k]
w.write(" %d - %d - %d (%f)\n" % (i + 1, j + 1, k + 1,
np.abs(tensor3).sum()))
for tensor2 in tensor3:
for vec in tensor2:
w.write("%20.14f %20.14f %20.14f\n" % tuple(vec))
w.write("\n")
def write_fc3_to_hdf5(fc3,
filename='fc3.hdf5',
p2s_map=None,
compression="gzip"):
"""Write third-order force constants in hdf5 format.
Parameters
----------
force_constants : ndarray
Force constants
shape=(n_satom, n_satom, n_satom, 3, 3, 3) or
(n_patom, n_satom, n_satom,3,3,3), dtype=double
filename : str
Filename to be used.
p2s_map : ndarray, optional
Primitive atom indices in supercell index system
shape=(n_patom,), dtype=intc
compression : str or int, optional
h5py's lossless compression filters (e.g., "gzip", "lzf"). None gives
no compression. See the detail at docstring of
h5py.Group.create_dataset. Default is "gzip".
"""
with h5py.File(filename, 'w') as w:
w.create_dataset('fc3', data=fc3, compression=compression)
if p2s_map is not None:
w.create_dataset('p2s_map', data=p2s_map)
def read_fc3_from_hdf5(filename='fc3.hdf5', p2s_map=None):
with h5py.File(filename, 'r') as f:
fc3 = f['fc3'][:]
if 'p2s_map' in f:
p2s_map_in_file = f['p2s_map'][:]
check_force_constants_indices(fc3.shape[:2],
p2s_map_in_file,
p2s_map,
filename)
if fc3.dtype == np.double and fc3.flags.c_contiguous:
return fc3
else:
msg = ("%s has to be read by h5py as numpy ndarray of "
"dtype='double' and c_contiguous." % filename)
raise TypeError(msg)
return None
def write_fc2_dat(force_constants, filename='fc2.dat'):
w = open(filename, 'w')
for i, fcs in enumerate(force_constants):
for j, fcb in enumerate(fcs):
w.write(" %d - %d\n" % (i+1, j+1))
for vec in fcb:
w.write("%20.14f %20.14f %20.14f\n" % tuple(vec))
w.write("\n")
def write_fc2_to_hdf5(force_constants,
filename='fc2.hdf5',
p2s_map=None,
physical_unit=None,
compression="gzip"):
write_force_constants_to_hdf5(force_constants,
filename=filename,
p2s_map=p2s_map,
physical_unit=physical_unit,
compression=compression)
def read_fc2_from_hdf5(filename='fc2.hdf5',
p2s_map=None):
return read_force_constants_from_hdf5(filename=filename,
p2s_map=p2s_map,
calculator='vasp')
def write_triplets(triplets,
weights,
mesh,
grid_address,
grid_point=None,
filename=None):
triplets_filename = "triplets"
suffix = "-m%d%d%d" % tuple(mesh)
if grid_point is not None:
suffix += ("-g%d" % grid_point)
if filename is not None:
suffix += "." + filename
suffix += ".dat"
triplets_filename += suffix
w = open(triplets_filename, 'w')
for weight, g3 in zip(weights, triplets):
w.write("%4d " % weight)
for q3 in grid_address[g3]:
w.write("%4d %4d %4d " % tuple(q3))
w.write("\n")
w.close()
def write_grid_address(grid_address, mesh, filename=None):
grid_address_filename = "grid_address"
suffix = "-m%d%d%d" % tuple(mesh)
if filename is not None:
suffix += "." + filename
suffix += ".dat"
grid_address_filename += suffix
w = open(grid_address_filename, 'w')
w.write("# Grid addresses for %dx%dx%d mesh\n" % tuple(mesh))
w.write("#%9s %8s %8s %8s %8s %8s %8s\n" %
("index", "a", "b", "c",
("a%%%d" % mesh[0]), ("b%%%d" % mesh[1]), ("c%%%d" % mesh[2])))
for i, bz_q in enumerate(grid_address):
if i == np.prod(mesh):
w.write("#" + "-" * 78 + "\n")
q = bz_q % mesh
w.write("%10d %8d %8d %8d " % (i, bz_q[0], bz_q[1], bz_q[2]))
w.write("%8d %8d %8d\n" % tuple(q))
return grid_address_filename
def write_grid_address_to_hdf5(grid_address,
mesh,
grid_mapping_table,
compression="gzip",
filename=None):
suffix = _get_filename_suffix(mesh, filename=filename)
full_filename = "grid_address" + suffix + ".hdf5"
with h5py.File(full_filename, 'w') as w:
w.create_dataset('mesh', data=mesh)
w.create_dataset('grid_address', data=grid_address,
compression=compression)
w.create_dataset('grid_mapping_table', data=grid_mapping_table,
compression=compression)
return full_filename
return None
def write_imag_self_energy_at_grid_point(gp,
band_indices,
mesh,
frequencies,
gammas,
sigma=None,
temperature=None,
scattering_event_class=None,
filename=None,
is_mesh_symmetry=True):
gammas_filename = "gammas"
gammas_filename += "-m%d%d%d-g%d-" % (mesh[0], mesh[1], mesh[2], gp)
if sigma is not None:
gammas_filename += ("s%f" % sigma).rstrip('0').rstrip(r'\.') + "-"
if temperature is not None:
gammas_filename += ("t%f" % temperature).rstrip('0').rstrip(r'\.') + "-"
for i in band_indices:
gammas_filename += "b%d" % (i + 1)
if scattering_event_class is not None:
gammas_filename += "-c%d" % scattering_event_class
if filename is not None:
gammas_filename += ".%s" % filename
elif not is_mesh_symmetry:
gammas_filename += ".nosym"
gammas_filename += ".dat"
w = open(gammas_filename, 'w')
for freq, g in zip(frequencies, gammas):
w.write("%15.7f %20.15e\n" % (freq, g))
w.close()
return gammas_filename
def write_joint_dos(gp,
mesh,
frequencies,
jdos,
sigma=None,
temperatures=None,
filename=None,
is_mesh_symmetry=True):
if temperatures is None:
return _write_joint_dos_at_t(gp,
mesh,
frequencies,
jdos,
sigma=sigma,
temperature=None,
filename=filename,
is_mesh_symmetry=is_mesh_symmetry)
else:
for jdos_at_t, t in zip(jdos, temperatures):
return _write_joint_dos_at_t(gp,
mesh,
frequencies,
jdos_at_t,
sigma=sigma,
temperature=t,
filename=filename,
is_mesh_symmetry=is_mesh_symmetry)
def _write_joint_dos_at_t(grid_point,
mesh,
frequencies,
jdos,
sigma=None,
temperature=None,
filename=None,
is_mesh_symmetry=True):
suffix = _get_filename_suffix(mesh,
grid_point=grid_point,
sigma=sigma,
filename=filename)
jdos_filename = "jdos%s" % suffix
if temperature is not None:
jdos_filename += ("-t%f" % temperature).rstrip('0').rstrip(r'\.')
if not is_mesh_symmetry:
jdos_filename += ".nosym"
if filename is not None:
jdos_filename += ".%s" % filename
jdos_filename += ".dat"
with open(jdos_filename, 'w') as w:
for omega, vals in zip(frequencies, jdos):
w.write("%15.7f" % omega)
w.write((" %20.15e" * len(vals)) % tuple(vals))
w.write("\n")
return jdos_filename
def write_real_self_energy_at_grid_point(gp,
band_indices,
frequency_points,
deltas,
mesh,
epsilon,
temperature,
filename=None,
is_mesh_symmetry=True):
deltas_filename = "deltas"
deltas_filename += _get_filename_suffix(mesh, grid_point=gp)
if epsilon > 1e-5:
deltas_filename += "-e" + _del_zeros(epsilon)
else:
deltas_filename += "-e%.3e" % epsilon
if temperature is not None:
deltas_filename += "-t" + _del_zeros(temperature) + "-"
for i in band_indices:
deltas_filename += "b%d" % (i + 1)
if filename is not None:
deltas_filename += ".%s" % filename
elif not is_mesh_symmetry:
deltas_filename += ".nosym"
deltas_filename += ".dat"
with open(deltas_filename, 'w') as w:
for freq, v in zip(frequency_points, deltas):
w.write("%15.7f %20.15e\n" % (freq, v))
return deltas_filename
def write_real_self_energy_to_hdf5(grid_point,
band_indices,
temperatures,
deltas,
mesh,
epsilon,
frequency_points=None,
frequencies=None,
filename=None):
"""Wirte real part of self energy (currently only bubble) in hdf5
deltas : ndarray
Real part of self energy.
With frequency_points:
shape=(temperatures, band_indices, frequency_points),
dtype='double', order='C'
otherwise:
shape=(temperatures, band_indices), dtype='double', order='C'
"""
full_filename = "deltas"
suffix = _get_filename_suffix(mesh, grid_point=grid_point)
_band_indices = np.array(band_indices, dtype='intc')
full_filename += suffix
if epsilon > 1e-5:
full_filename += "-e" + _del_zeros(epsilon)
else:
full_filename += "-e%.3e" % epsilon
full_filename += ".hdf5"
with h5py.File(full_filename, 'w') as w:
w.create_dataset('grid_point', data=grid_point)
w.create_dataset('mesh', data=mesh)
w.create_dataset('band_index', data=_band_indices)
w.create_dataset('delta', data=deltas)
w.create_dataset('temperature', data=temperatures)
w.create_dataset('epsilon', data=epsilon)
if frequency_points is not None:
w.create_dataset('frequency_points', data=frequency_points)
if frequencies is not None:
w.create_dataset('frequency', data=frequencies)
return full_filename
def write_spectral_function_at_grid_point(gp,
band_indices,
frequency_points,
spectral_functions,
mesh,
temperature,
sigma=None,
filename=None,
is_mesh_symmetry=True):
spectral_filename = "spectral"
spectral_filename += _get_filename_suffix(mesh, grid_point=gp, sigma=sigma)
if temperature is not None:
spectral_filename += "-t" + _del_zeros(temperature) + "-"
for i in band_indices:
spectral_filename += "b%d" % (i + 1)
if filename is not None:
spectral_filename += ".%s" % filename
elif not is_mesh_symmetry:
spectral_filename += ".nosym"
spectral_filename += ".dat"
with open(spectral_filename, 'w') as w:
for freq, v in zip(frequency_points, spectral_functions):
w.write("%15.7f %20.15e\n" % (freq, v))
return spectral_filename
def write_spectral_function_to_hdf5(grid_point,
band_indices,
temperatures,
spectral_functions,
shifts,
half_linewidths,
mesh,
sigma=None,
frequency_points=None,
frequencies=None,
filename=None):
"""Wirte spectral functions (currently only bubble) in hdf5
spectral_functions : ndarray
Spectral functions.
shape=(temperature, band_index, frequency_points),
dtype='double', order='C'
"""
full_filename = "spectral"
suffix = _get_filename_suffix(mesh, grid_point=grid_point, sigma=sigma)
_band_indices = np.array(band_indices, dtype='intc')
full_filename += suffix
full_filename += ".hdf5"
with h5py.File(full_filename, 'w') as w:
w.create_dataset('grid_point', data=grid_point)
w.create_dataset('mesh', data=mesh)
w.create_dataset('band_index', data=_band_indices)
w.create_dataset('spectral_function', data=spectral_functions)
w.create_dataset('shift', data=shifts)
w.create_dataset('half_linewidth', data=half_linewidths)
w.create_dataset('temperature', data=temperatures)
if frequency_points is not None:
w.create_dataset('frequency_point', data=frequency_points)
if frequencies is not None:
w.create_dataset('frequency', data=frequencies)
return full_filename
def write_collision_to_hdf5(temperature,
mesh,
gamma=None,
gamma_isotope=None,
collision_matrix=None,
grid_point=None,
band_index=None,
sigma=None,
sigma_cutoff=None,
filename=None):
if band_index is None:
band_indices = None
else:
band_indices = [band_index]
suffix = _get_filename_suffix(mesh,
grid_point=grid_point,
band_indices=band_indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "collision" + suffix + ".hdf5"
with h5py.File(full_filename, 'w') as w:
w.create_dataset('temperature', data=temperature)
if gamma is not None:
w.create_dataset('gamma', data=gamma)
if gamma_isotope is not None:
w.create_dataset('gamma_isotope', data=gamma_isotope)
if collision_matrix is not None:
w.create_dataset('collision_matrix', data=collision_matrix)
if grid_point is not None:
w.create_dataset('grid_point', data=grid_point)
if band_index is not None:
w.create_dataset('band_index', data=(band_index + 1))
if sigma is not None:
w.create_dataset('sigma', data=sigma)
if sigma_cutoff is not None:
w.create_dataset('sigma_cutoff_width', data=sigma_cutoff)
text = "Collisions "
if grid_point is not None:
text += "at grid adress %d " % grid_point
if sigma is not None:
if grid_point is not None:
text += "and "
else:
text += "at "
text += "sigma %s " % _del_zeros(sigma)
text += "were written into "
if sigma is not None:
text += "\n"
text += "\"%s\"." % ("collision" + suffix + ".hdf5")
print(text)
return full_filename
def write_full_collision_matrix(collision_matrix, filename='fcm.hdf5'):
with h5py.File(filename, 'w') as w:
w.create_dataset('collision_matrix', data=collision_matrix)
def write_unitary_matrix_to_hdf5(temperature,
mesh,
unitary_matrix=None,
sigma=None,
sigma_cutoff=None,
solver=None,
filename=None,
verbose=False):
"""Write eigenvectors of collision matrices at temperatures.
Depending on the choice of the solver, eigenvectors are sotred in
either column-wise or row-wise.
"""
suffix = _get_filename_suffix(mesh,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
hdf5_filename = "unitary" + suffix + ".hdf5"
with h5py.File(hdf5_filename, 'w') as w:
w.create_dataset('temperature', data=temperature)
if unitary_matrix is not None:
w.create_dataset('unitary_matrix', data=unitary_matrix)
if solver is not None:
w.create_dataset('solver', data=solver)
if verbose:
if len(temperature) > 1:
text = "Unitary matrices "
else:
text = "Unitary matrix "
if sigma is not None:
text += "at sigma %s " % _del_zeros(sigma)
if sigma_cutoff is not None:
text += "(%4.2f SD) " % sigma_cutoff
if len(temperature) > 1:
text += "were written into "
else:
text += "was written into "
if sigma is not None:
text += "\n"
text += "\"%s\"." % hdf5_filename
print(text)
def write_collision_eigenvalues_to_hdf5(temperatures,
mesh,
collision_eigenvalues,
sigma=None,
sigma_cutoff=None,
filename=None,
verbose=True):
suffix = _get_filename_suffix(mesh,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
with h5py.File("coleigs" + suffix + ".hdf5", 'w') as w:
w.create_dataset('temperature', data=temperatures)
w.create_dataset('collision_eigenvalues', data=collision_eigenvalues)
w.close()
if verbose:
text = "Eigenvalues of collision matrix "
if sigma is not None:
text += "with sigma %s\n" % sigma
text += "were written into "
text += "\"%s\"" % ("coleigs" + suffix + ".hdf5")
print(text)
def write_kappa_to_hdf5(temperature,
mesh,
frequency=None,
group_velocity=None,
gv_by_gv=None,
mean_free_path=None,
heat_capacity=None,
kappa=None,
mode_kappa=None,
kappa_RTA=None, # RTA calculated in LBTE
mode_kappa_RTA=None, # RTA calculated in LBTE
f_vector=None,
gamma=None,
gamma_isotope=None,
gamma_N=None,
gamma_U=None,
averaged_pp_interaction=None,
qpoint=None,
weight=None,
mesh_divisors=None,
grid_point=None,
band_index=None,
sigma=None,
sigma_cutoff=None,
kappa_unit_conversion=None,
compression="gzip",
filename=None,
verbose=True):
if band_index is None:
band_indices = None
else:
band_indices = [band_index]
suffix = _get_filename_suffix(mesh,
mesh_divisors=mesh_divisors,
grid_point=grid_point,
band_indices=band_indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "kappa" + suffix + ".hdf5"
with h5py.File(full_filename, 'w') as w:
w.create_dataset('temperature', data=temperature)
w.create_dataset('mesh', data=mesh)
if frequency is not None:
w.create_dataset('frequency', data=frequency,
compression=compression)
if group_velocity is not None:
w.create_dataset('group_velocity', data=group_velocity,
compression=compression)
if gv_by_gv is not None:
w.create_dataset('gv_by_gv', data=gv_by_gv)
if mean_free_path is not None:
w.create_dataset('mean_free_path', data=mean_free_path,
compression=compression)
if heat_capacity is not None:
w.create_dataset('heat_capacity', data=heat_capacity,
compression=compression)
if kappa is not None:
w.create_dataset('kappa', data=kappa)
if mode_kappa is not None:
w.create_dataset('mode_kappa', data=mode_kappa,
compression=compression)
if kappa_RTA is not None:
w.create_dataset('kappa_RTA', data=kappa_RTA)
if mode_kappa_RTA is not None:
w.create_dataset('mode_kappa_RTA', data=mode_kappa_RTA,
compression=compression)
if f_vector is not None:
w.create_dataset('f_vector', data=f_vector,
compression=compression)
if gamma is not None:
w.create_dataset('gamma', data=gamma,
compression=compression)
if gamma_isotope is not None:
w.create_dataset('gamma_isotope', data=gamma_isotope,
compression=compression)
if gamma_N is not None:
w.create_dataset('gamma_N', data=gamma_N,
compression=compression)
if gamma_U is not None:
w.create_dataset('gamma_U', data=gamma_U,
compression=compression)
if averaged_pp_interaction is not None:
w.create_dataset('ave_pp', data=averaged_pp_interaction,
compression=compression)
if qpoint is not None:
w.create_dataset('qpoint', data=qpoint,
compression=compression)
if weight is not None:
w.create_dataset('weight', data=weight,
compression=compression)
if grid_point is not None:
w.create_dataset('grid_point', data=grid_point)
if band_index is not None:
w.create_dataset('band_index', data=(band_index + 1))
if sigma is not None:
w.create_dataset('sigma', data=sigma)
if sigma_cutoff is not None:
w.create_dataset('sigma_cutoff_width', data=sigma_cutoff)
if kappa_unit_conversion is not None:
w.create_dataset('kappa_unit_conversion',
data=kappa_unit_conversion)
if verbose:
text = ""
if kappa is not None:
text += "Thermal conductivity and related properties "
else:
text += "Thermal conductivity related properties "
if grid_point is not None:
text += "at gp-%d " % grid_point
if band_index is not None:
text += "and band_index-%d\n" % (band_index + 1)
if sigma is not None:
if grid_point is not None:
text += "and "
else:
text += "at "
text += "sigma %s" % sigma
if sigma_cutoff is None:
text += "\n"
else:
text += "(%4.2f SD)\n" % sigma_cutoff
text += "were written into "
else:
text += "were written into "
if band_index is None:
text += "\n"
text += "\"%s\"." % full_filename
print(text)
return full_filename
def read_gamma_from_hdf5(mesh,
mesh_divisors=None,
grid_point=None,
band_index=None,
sigma=None,
sigma_cutoff=None,
filename=None,
verbose=True):
if band_index is None:
band_indices = None
else:
band_indices = [band_index]
suffix = _get_filename_suffix(mesh,
mesh_divisors=mesh_divisors,
grid_point=grid_point,
band_indices=band_indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "kappa" + suffix + ".hdf5"
if not os.path.exists(full_filename):
if verbose:
print("%s not found." % full_filename)
return None
read_data = {}
with h5py.File(full_filename, 'r') as f:
read_data['gamma'] = f['gamma'][:]
for key in ('gamma_isotope',
'ave_pp',
'gamma_N',
'gamma_U'):
if key in f.keys():
if len(f[key].shape) > 0:
read_data[key] = f[key][:]
else:
read_data[key] = f[key][()]
if verbose:
print("Read data from %s." % full_filename)
return read_data
def read_collision_from_hdf5(mesh,
indices=None,
grid_point=None,
band_index=None,
sigma=None,
sigma_cutoff=None,
filename=None,
verbose=True):
if band_index is None:
band_indices = None
else:
band_indices = [band_index]
suffix = _get_filename_suffix(mesh,
grid_point=grid_point,
band_indices=band_indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "collision" + suffix + ".hdf5"
if not os.path.exists(full_filename):
if verbose:
print("%s not found." % full_filename)
return None
with h5py.File(full_filename, 'r') as f:
if indices == 'all':
colmat_shape = (1,) + f['collision_matrix'].shape
collision_matrix = np.zeros(colmat_shape,
dtype='double', order='C')
gamma = np.array(f['gamma'][:], dtype='double', order='C')
collision_matrix[0] = f['collision_matrix'][:]
temperatures = np.array(f['temperature'][:], dtype='double')
else:
colmat_shape = (1, len(indices)) + f['collision_matrix'].shape[1:]
collision_matrix = np.zeros(colmat_shape, dtype='double')
gamma = np.array(f['gamma'][indices], dtype='double', order='C')
collision_matrix[0] = f['collision_matrix'][indices]
temperatures = np.array(f['temperature'][indices], dtype='double')
if verbose:
text = "Collisions "
if band_index is None:
if grid_point is not None:
text += "at grid point %d " % grid_point
else:
if grid_point is not None:
text += ("at (grid point %d, band index %d) " %
(grid_point, band_index))
if sigma is not None:
if grid_point is not None:
text += "and "
else:
text += "at "
text += "sigma %s" % _del_zeros(sigma)
if sigma_cutoff is not None:
text += "(%4.2f SD)" % sigma_cutoff
if band_index is None and grid_point is not None:
text += " were read from "
text += "\n"
else:
text += "\n"
text += "were read from "
text += "\"%s\"." % full_filename
print(text)
return collision_matrix, gamma, temperatures
return None
def write_pp_to_hdf5(mesh,
pp=None,
g_zero=None,
grid_point=None,
triplet=None,
weight=None,
triplet_map=None,
triplet_all=None,
sigma=None,
sigma_cutoff=None,
filename=None,
verbose=True,
check_consistency=False,
compression="gzip"):
suffix = _get_filename_suffix(mesh,
grid_point=grid_point,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "pp" + suffix + ".hdf5"
with h5py.File(full_filename, 'w') as w:
if pp is not None:
if g_zero is None:
w.create_dataset('pp', data=pp,
compression=compression)
if triplet is not None:
w.create_dataset('triplet', data=triplet,
compression=compression)
if weight is not None:
w.create_dataset('weight', data=weight,
compression=compression)
if triplet_map is not None:
w.create_dataset('triplet_map', data=triplet_map,
compression=compression)
if triplet_all is not None:
w.create_dataset('triplet_all', data=triplet_all,
compression=compression)
else:
x = g_zero.ravel()
nonzero_pp = np.array(pp.ravel()[x == 0], dtype='double')
bytelen = len(x) // 8
remlen = len(x) % 8
y = x[:bytelen * 8].reshape(-1, 8)
z = np.packbits(y)
if remlen != 0:
z_rem = np.packbits(x[bytelen * 8:])
# No compression for pp because already almost random.
w.create_dataset('nonzero_pp', data=nonzero_pp,
compression=None)
w.create_dataset('pp_shape', data=pp.shape,
compression=compression)
w.create_dataset('g_zero_bits', data=z,
compression=compression)
if remlen != 0:
w.create_dataset('g_zero_bits_reminder', data=z_rem)
# This is only for the test and coupled with read_pp_from_hdf5.
if check_consistency:
w.create_dataset('pp', data=pp,
compression=compression)
w.create_dataset('g_zero', data=g_zero,
compression=compression)
if verbose:
text = ""
text += "Ph-ph interaction strength "
if grid_point is not None:
text += "at gp-%d " % grid_point
if sigma is not None:
if grid_point is not None:
text += "and "
else:
text += "at "
text += "sigma %s" % sigma
if sigma_cutoff is None:
text += "\n"
else:
text += "(%4.2f SD)\n" % sigma_cutoff
text += "were written into "
else:
text += "were written into "
text += "\n"
text += "\"%s\"." % full_filename
print(text)
return full_filename
def read_pp_from_hdf5(mesh,
grid_point=None,
sigma=None,
sigma_cutoff=None,
filename=None,
verbose=True,
check_consistency=False):
suffix = _get_filename_suffix(mesh,
grid_point=grid_point,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "pp" + suffix + ".hdf5"
if not os.path.exists(full_filename):
if verbose:
print("%s not found." % full_filename)
return None
with h5py.File(full_filename, 'r') as f:
if 'nonzero_pp' in f:
nonzero_pp = f['nonzero_pp'][:]
pp_shape = f['pp_shape'][:]
z = f['g_zero_bits'][:]
bytelen = np.prod(pp_shape) // 8
remlen = 0
if 'g_zero_bits_reminder' in f:
z_rem = f['g_zero_bits_reminder'][:]
remlen = np.prod(pp_shape) - bytelen * 8
bits = np.unpackbits(z)
if not bits.flags['C_CONTIGUOUS']:
bits = np.array(bits, dtype='uint8')
g_zero = np.zeros(pp_shape, dtype='byte', order='C')
b = g_zero.ravel()
b[:(bytelen * 8)] = bits
if remlen != 0:
b[-remlen:] = np.unpackbits(z_rem)[:remlen]
pp = np.zeros(pp_shape, dtype='double', order='C')
pp_ravel = pp.ravel()
pp_ravel[g_zero.ravel() == 0] = nonzero_pp
# check_consistency==True in write_pp_to_hdf5 required.
if check_consistency and g_zero is not None:
if verbose:
print("Checking consistency of ph-ph interanction "
"strength.")
assert (g_zero == f['g_zero'][:]).all()
assert np.allclose(pp, f['pp'][:])
else:
pp = np.zeros(f['pp'].shape, dtype='double', order='C')
pp[:] = f['pp'][:]
g_zero = None
if verbose:
print("Ph-ph interaction strength was read from \"%s\"." %
full_filename)
return pp, g_zero
return None
def write_gamma_detail_to_hdf5(temperature,
mesh,
gamma_detail=None,
grid_point=None,
triplet=None,
weight=None,
triplet_map=None,
triplet_all=None,
frequency_points=None,
band_index=None,
sigma=None,
sigma_cutoff=None,
compression="gzip",
filename=None,
verbose=True):
if band_index is None:
band_indices = None
else:
band_indices = [band_index]
suffix = _get_filename_suffix(mesh,
grid_point=grid_point,
band_indices=band_indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "gamma_detail" + suffix + ".hdf5"
with h5py.File(full_filename, 'w') as w:
w.create_dataset('temperature', data=temperature)
w.create_dataset('mesh', data=mesh)
if gamma_detail is not None:
w.create_dataset('gamma_detail', data=gamma_detail,
compression=compression)
if triplet is not None:
w.create_dataset('triplet', data=triplet,
compression=compression)
if weight is not None:
w.create_dataset('weight', data=weight,
compression=compression)
if triplet_map is not None:
w.create_dataset('triplet_map', data=triplet_map,
compression=compression)
if triplet_all is not None:
w.create_dataset('triplet_all', data=triplet_all,
compression=compression)
if grid_point is not None:
w.create_dataset('grid_point', data=grid_point)
if band_index is not None:
w.create_dataset('band_index', data=(band_index + 1))
if sigma is not None:
w.create_dataset('sigma', data=sigma)
if sigma_cutoff is not None:
w.create_dataset('sigma_cutoff_width', data=sigma_cutoff)
if frequency_points is not None:
w.create_dataset('frequency_point', data=frequency_points)
if verbose:
text = ""
text += "Phonon triplets contributions to Gamma "
if grid_point is not None:
text += "at gp-%d " % grid_point
if band_index is not None:
text += "and band_index-%d\n" % (band_index + 1)
if sigma is not None:
if grid_point is not None:
text += "and "
else:
text += "at "
text += "sigma %s" % sigma
if sigma_cutoff is None:
text += "\n"
else:
text += "(%4.2f SD)\n" % sigma_cutoff
text += "were written into "
else:
text += "were written into "
if band_index is None:
text += "\n"
text += "\"%s\"." % full_filename
print(text)
return full_filename
return None
def write_phonon_to_hdf5(frequency,
eigenvector,
grid_address,
mesh,
compression="gzip",
filename=None):
suffix = _get_filename_suffix(mesh, filename=filename)
full_filename = "phonon" + suffix + ".hdf5"
with h5py.File(full_filename, 'w') as w:
w.create_dataset('mesh', data=mesh)
w.create_dataset('grid_address', data=grid_address,
compression=compression)
w.create_dataset('frequency', data=frequency,
compression=compression)
w.create_dataset('eigenvector', data=eigenvector,
compression=compression)
return full_filename
return None
def read_phonon_from_hdf5(mesh,
filename=None,
verbose=True):
suffix = _get_filename_suffix(mesh, filename=filename)
full_filename = "phonon" + suffix + ".hdf5"
if not os.path.exists(full_filename):
if verbose:
print("%s not found." % full_filename)
return None
with h5py.File(full_filename, 'r') as f:
frequencies = np.array(f['frequency'][:], dtype='double', order='C')
itemsize = frequencies.itemsize
eigenvectors = np.array(f['eigenvector'][:],
dtype=("c%d" % (itemsize * 2)), order='C')
mesh_in_file = np.array(f['mesh'][:], dtype='intc')
grid_address = np.array(f['grid_address'][:], dtype='intc', order='C')
assert (mesh_in_file == mesh).all(), "Mesh numbers are inconsistent."
if verbose:
print("Phonons are read from \"%s\"." % full_filename)
return frequencies, eigenvectors, grid_address
return None
def write_ir_grid_points(mesh,
mesh_divs,
grid_points,
coarse_grid_weights,
grid_address,
primitive_lattice):
w = open("ir_grid_points.yaml", 'w')
w.write("mesh: [ %d, %d, %d ]\n" % tuple(mesh))
if mesh_divs is not None:
w.write("mesh_divisors: [ %d, %d, %d ]\n" % tuple(mesh_divs))
w.write("reciprocal_lattice:\n")
for vec, axis in zip(primitive_lattice.T, ('a*', 'b*', 'c*')):
w.write("- [ %12.8f, %12.8f, %12.8f ] # %2s\n"
% (tuple(vec) + (axis,)))
w.write("num_reduced_ir_grid_points: %d\n" % len(grid_points))
w.write("ir_grid_points: # [address, weight]\n")
for g, weight in zip(grid_points, coarse_grid_weights):
w.write("- grid_point: %d\n" % g)
w.write(" weight: %d\n" % weight)
w.write(" grid_address: [ %12d, %12d, %12d ]\n" %
tuple(grid_address[g]))
w.write(" q-point: [ %12.7f, %12.7f, %12.7f ]\n" %
tuple(grid_address[g].astype('double') / mesh))
def parse_disp_fc2_yaml(filename="disp_fc2.yaml", return_cell=False):
dataset = _parse_yaml(filename)
natom = dataset['natom']
new_dataset = {}
new_dataset['natom'] = natom
new_first_atoms = []
for first_atoms in dataset['first_atoms']:
first_atoms['number'] -= 1
atom1 = first_atoms['number']
disp1 = first_atoms['displacement']
new_first_atoms.append({'number': atom1, 'displacement': disp1})
new_dataset['first_atoms'] = new_first_atoms
if return_cell:
cell = get_cell_from_disp_yaml(dataset)
return new_dataset, cell
else:
return new_dataset
def parse_disp_fc3_yaml(filename="disp_fc3.yaml", return_cell=False):
dataset = _parse_yaml(filename)
natom = dataset['natom']
new_dataset = {}
new_dataset['natom'] = natom
if 'cutoff_distance' in dataset:
new_dataset['cutoff_distance'] = dataset['cutoff_distance']
new_first_atoms = []
for first_atoms in dataset['first_atoms']:
atom1 = first_atoms['number'] - 1
disp1 = first_atoms['displacement']
new_second_atoms = []
for second_atom in first_atoms['second_atoms']:
disp2_dataset = {'number': second_atom['number'] - 1}
if 'included' in second_atom:
disp2_dataset.update({'included': second_atom['included']})
if 'distance' in second_atom:
disp2_dataset.update(
{'pair_distance': second_atom['distance']})
for disp2 in second_atom['displacements']:
disp2_dataset.update({'displacement': disp2})
new_second_atoms.append(disp2_dataset.copy())
new_first_atoms.append({'number': atom1,
'displacement': disp1,
'second_atoms': new_second_atoms})
new_dataset['first_atoms'] = new_first_atoms
if return_cell:
cell = get_cell_from_disp_yaml(dataset)
return new_dataset, cell
else:
return new_dataset
def parse_FORCES_FC2(disp_dataset,
filename="FORCES_FC2",
unit_conversion_factor=None):
num_atom = disp_dataset['natom']
num_disp = len(disp_dataset['first_atoms'])
forces_fc2 = []
with open(filename, 'r') as f2:
for i in range(num_disp):
forces = _parse_force_lines(f2, num_atom)
if forces is None:
return []
else:
forces_fc2.append(forces)
for i, disp1 in enumerate(disp_dataset['first_atoms']):
if unit_conversion_factor is not None:
disp1['forces'] = forces_fc2[i] * unit_conversion_factor
else:
disp1['forces'] = forces_fc2[i]
def parse_FORCES_FC3(disp_dataset,
filename="FORCES_FC3",
use_loadtxt=False,
unit_conversion_factor=None):
"""Parse type1 FORCES_FC3 and store forces in disp_dataset"""
num_atom = disp_dataset['natom']
num_disp = len(disp_dataset['first_atoms'])
for disp1 in disp_dataset['first_atoms']:
num_disp += len(disp1['second_atoms'])
if use_loadtxt:
forces_fc3 = np.loadtxt(filename).reshape((num_disp, -1, 3))
else:
forces_fc3 = np.zeros((num_disp, num_atom, 3),
dtype='double', order='C')
with open(filename, 'r') as f3:
for i in range(num_disp):
forces = _parse_force_lines(f3, num_atom)
if forces is None:
raise RuntimeError("Failed to parse %s." % filename)
else:
forces_fc3[i] = forces
if unit_conversion_factor is not None:
forces_fc3 *= unit_conversion_factor
i = 0
for disp1 in disp_dataset['first_atoms']:
disp1['forces'] = forces_fc3[i]
i += 1
for disp1 in disp_dataset['first_atoms']:
for disp2 in disp1['second_atoms']:
disp2['forces'] = forces_fc3[i]
i += 1
def parse_QPOINTS3(filename='QPOINTS3'):
f = open(filename)
num = int(f.readline().strip())
count = 0
qpoints3 = []
for line in f:
line_array = [float(x) for x in line.strip().split()]
if len(line_array) < 9:
raise RuntimeError("Failed to parse %s." % filename)
else:
qpoints3.append(line_array[0:9])
count += 1
if count == num:
break
return np.array(qpoints3)
def parse_fc3(num_atom, filename='fc3.dat'):
f = open(filename)
fc3 = np.zeros((num_atom, num_atom, num_atom, 3, 3, 3), dtype=float)
for i in range(num_atom):
for j in range(num_atom):
for k in range(num_atom):
f.readline()
for l in range(3):
fc3[i, j, k, l] = [
[float(x) for x in f.readline().split()],
[float(x) for x in f.readline().split()],
[float(x) for x in f.readline().split()]]
f.readline()
return fc3
def parse_fc2(num_atom, filename='fc2.dat'):
f = open(filename)
fc2 = np.zeros((num_atom, num_atom, 3, 3), dtype=float)
for i in range(num_atom):
for j in range(num_atom):
f.readline()
fc2[i, j] = [[float(x) for x in f.readline().split()],
[float(x) for x in f.readline().split()],
[float(x) for x in f.readline().split()]]
f.readline()
return fc2
def parse_triplets(filename):
f = open(filename)
triplets = []
weights = []
for line in f:
if line.strip()[0] == "#":
continue
line_array = [int(x) for x in line.split()]
triplets.append(line_array[:3])
weights.append(line_array[3])
return np.array(triplets), np.array(weights)
def parse_grid_address(filename):
f = open(filename, 'r')
grid_address = []
for line in f:
if line.strip()[0] == "#":
continue
line_array = [int(x) for x in line.split()]
grid_address.append(line_array[1:4])
return np.array(grid_address)
def get_filename_suffix(mesh,
mesh_divisors=None,
grid_point=None,
band_indices=None,
sigma=None,
sigma_cutoff=None,
temperature=None,
filename=None):
return _get_filename_suffix(mesh,
mesh_divisors=mesh_divisors,
grid_point=grid_point,
band_indices=band_indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
temperature=temperature,
filename=filename)
def _get_filename_suffix(mesh,
mesh_divisors=None,
grid_point=None,
band_indices=None,
sigma=None,
sigma_cutoff=None,
temperature=None,
filename=None):
suffix = "-m%d%d%d" % tuple(mesh)
if mesh_divisors is not None:
if (np.array(mesh_divisors, dtype=int) != 1).any():
suffix += "-d%d%d%d" % tuple(mesh_divisors)
if grid_point is not None:
suffix += ("-g%d" % grid_point)
if band_indices is not None:
suffix += "-"
for bi in band_indices:
suffix += "b%d" % (bi + 1)
if sigma is not None:
suffix += "-s" + _del_zeros(sigma)
if sigma_cutoff is not None:
sigma_cutoff_str = _del_zeros(sigma_cutoff)
suffix += "-sd" + sigma_cutoff_str
if temperature is not None:
suffix += "-t" + _del_zeros(temperature)
if filename is not None:
suffix += "." + filename
return suffix
def _del_zeros(val):
return ("%f" % val).rstrip('0').rstrip(r'\.')
def _parse_yaml(file_yaml):
import yaml
try:
from yaml import CLoader as Loader
from yaml import CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
with open(file_yaml) as f:
string = f.read()
data = yaml.load(string, Loader=Loader)
return data
def _parse_force_lines(forcefile, num_atom):
forces = []
for line in forcefile:
if line.strip() == '':
continue
if line.strip()[0] == '#':
continue
forces.append([float(x) for x in line.strip().split()])
if len(forces) == num_atom:
break
if not len(forces) == num_atom:
return None
else:
return np.array(forces)
def _parse_force_constants_lines(fcthird_file, num_atom):
fc2 = []
for line in fcthird_file:
if line.strip() == '':
continue
if line.strip()[0] == '#':
continue
fc2.append([float(x) for x in line.strip().split()])
if len(fc2) == num_atom ** 2 * 3:
break
if not len(fc2) == num_atom ** 2 * 3:
return None
else:
return np.array(fc2).reshape(num_atom, num_atom, 3, 3)
def get_length_of_first_line(f):
for line in f:
if line.strip() == '':
continue
elif line.strip()[0] == '#':
continue
else:
f.seek(0)
return len(line.split())
raise RuntimeError("File doesn't contain relevant infomration.")
|
atztogo/phono3py
|
phono3py/file_IO.py
|
Python
|
bsd-3-clause
| 61,312
|
[
"VASP",
"phonopy"
] |
34d4b748307f31a578ccaac27480d99d24d5cd03fb1bc22113d5480e86b681ae
|
# -*- coding: utf-8 -*-
import os
import time
import shutil
from datetime import datetime
import bisect
import tempfile
import itertools
from math import sqrt
import numpy as np
import pandas as pd
import netCDF4
import pytz
from pyaxiom.netcdf import EnhancedDataset, EnhancedMFDataset
from rtree import index
from wms import mpl_handler
from wms import gfi_handler
from wms import data_handler
from wms import gmd_handler
from wms.models import Dataset, Layer, VirtualLayer, NetCDFDataset
from wms.utils import DotDict, calc_lon_lat_padding, calc_safety_factor, find_appropriate_time
from wms import logger
class RGrid(object):
def __init__(self, nc):
self.nc = nc
# in case a user as a version netcdf C library < 4.1.2
try:
self._filepath = nc.filepath()
except ValueError:
self._filepath = None
def rgrid_compliant_file(self):
"""
Determine whether a dataset is RGRID compliant.
:return: True if dataset is compliant, raise an exception if it is not
:rtype: bool
"""
longitude = self.nc.get_variables_by_attributes(standard_name='longitude')
latitude = self.nc.get_variables_by_attributes(standard_name='latitude')
if not longitude or not latitude:
return False
return True
def save_as_netcdf(self, filepath):
with netCDF4.Dataset(filepath, 'w') as nclocal:
# copy all dimensions
for d in self.nc.dimensions.values():
nclocal.createDimension(d.name, d.size)
# copy remote longitude/latitude (assume both present)
_lon = self.nc.get_variables_by_attributes(standard_name='longitude')
_lat = self.nc.get_variables_by_attributes(standard_name='latitude')
# TODO: throw exception if more than one latitude/longitude
#if len(_lon) > 1 or len(_lat) > 1:
# throw
_lon = _lon[0]
_lat = _lat[0]
# are longitude/latitude 1D or 2D?
if _lat.ndim > 1:
_i = _lat.shape[0]
_j = _lat.shape[1]
else: # needs meshgrid
_i = _lat.shape[0]
_j = _lon.shape[0]
nclocal.createDimension('i', _i)
nclocal.createDimension('j', _j)
latitude = nclocal.createVariable('latitude', 'f', ('i', 'j',))
latitude.setncattr('standard_name', 'latitude')
longitude = nclocal.createVariable('longitude', 'f', ('i', 'j',))
longitude.setncattr('standard_name', 'longitude')
# EPSG:4326 longitude -180 to 180
_4326l = _lon[:]
_4326l[_4326l > 180] = _4326l[_4326l > 180] + 360.0
if _lat.ndim > 1:
longitude[:] = _4326l[:] # use EPSG:4326 longitude
latitude[:] = _lat[:]
else:
longitude[:], latitude[:] = np.meshgrid(_4326l[:], _lat[:])
nclocal.sync()
class RGridDataset(Dataset, NetCDFDataset):
@staticmethod
def is_valid(uri):
try:
with EnhancedDataset(uri) as ds:
nc_ds = RGrid(ds)
return nc_ds.rgrid_compliant_file() and 'rgrid' in ds.Conventions.lower()
except RuntimeError:
try:
with EnhancedMFDataset(uri, aggdim='time') as ds:
nc_ds = RGrid(ds)
return nc_ds.rgrid_compliant_file() or 'rgrid' in ds.Conventions.lower()
except (AttributeError, RuntimeError):
return False
except (AttributeError):
return False
def has_cache(self):
return os.path.exists(self.topology_file)
def make_rtree(self):
with self.dataset() as nc:
with self.topology() as topo:
lon = topo.get_variables_by_attributes(standard_name='longitude')[0]
lat = topo.get_variables_by_attributes(standard_name='latitude')[0]
def rtree_generator_function():
c = -1
for row in range(lon.shape[0]):
for col in range(lon.shape[1]):
coord = (lon[row, col], lat[row, col], lon[row, col], lat[row, col],)
c += 1
yield(c, coord, (col, row))
logger.info("Building Faces (centers) Rtree Topology Cache for {0}".format(self.name))
_, temp_file = tempfile.mkstemp(suffix='.face')
start = time.time()
p = index.Property()
p.filename = str(temp_file)
p.overwrite = True
p.storage = index.RT_Disk
p.dimension = 2
index.Index(p.filename.decode('utf-8'),
rtree_generator_function(),
properties=p,
overwrite=True,
interleaved=True)
logger.info("Built Faces Rtree Topology Cache in {0} seconds.".format(time.time() - start))
shutil.move('{}.dat'.format(temp_file), self.face_tree_data_file)
shutil.move('{}.idx'.format(temp_file), self.face_tree_index_file)
def update_cache(self, force=False):
with self.dataset() as nc:
rg = RGrid(nc)
rg.save_as_netcdf(self.topology_file) # saves topology coordinates (latitude, longitude, time)
if not os.path.exists(self.topology_file):
logger.error("Failed to create topology_file cache for Dataset '{}'".format(self.dataset))
return
# add time to the cached topology
time_vars = nc.get_variables_by_attributes(standard_name='time')
time_dims = list(itertools.chain.from_iterable([time_var.dimensions for time_var in time_vars]))
unique_time_dims = list(set(time_dims))
with EnhancedDataset(self.topology_file, mode='a') as cached_nc:
# create pertinent time dimensions if they aren't already present
for unique_time_dim in unique_time_dims:
dim_size = len(nc.dimensions[unique_time_dim])
try:
cached_nc.createDimension(unique_time_dim, size=dim_size)
except RuntimeError:
continue
# support cases where there may be more than one variable with standard_name='time' in a dataset
for time_var in time_vars:
try:
time_var_obj = cached_nc.createVariable(time_var.name,
time_var.dtype,
time_var.dimensions)
except RuntimeError:
time_var_obj = cached_nc.variables[time_var.name]
finally:
time_var_obj[:] = time_var[:]
time_var_obj.units = time_var.units
time_var_obj.standard_name = 'time'
# Now do the RTree index
self.make_rtree()
self.cache_last_updated = datetime.utcnow().replace(tzinfo=pytz.utc)
self.save()
def getmap(self, layer, request):
time_index, time_value = self.nearest_time(layer, request.GET['time'])
wgs84_bbox = request.GET['wgs84_bbox']
with self.dataset() as nc:
with self.topology() as topo:
lon = topo.get_variables_by_attributes(standard_name='longitude')[0][:]
lat = topo.get_variables_by_attributes(standard_name='latitude')[0][:]
if isinstance(layer, Layer):
raw_var = nc.variables[layer.access_name]
if len(raw_var.shape) == 4:
z_index, z_value = self.nearest_z(layer, request.GET['elevation'])
raw_data = raw_var[time_index, z_index, :]
elif len(raw_var.shape) == 3:
raw_data = raw_var[time_index, :]
elif len(raw_var.shape) == 2:
raw_data = raw_var[:]
else:
raise BaseException('Unable to trim variable {0} data.'.format(layer.access_name))
if request.GET['image_type'] == 'pcolor':
return mpl_handler.pcolormesh_response(lon, lat, data=raw_data, request=request)
elif request.GET['image_type'] in ['filledhatches', 'hatches', 'filledcontours', 'contours']:
return mpl_handler.contouring_response(lon, lat, data=raw_data, request=request)
else:
raise NotImplementedError('Image type "{}" is not supported.'.format(request.GET['image_type']))
elif isinstance(layer, VirtualLayer):
x_var = None
y_var = None
raw_vars = []
for l in layer.layers:
data_obj = getattr(cached_sg, l.access_name)
raw_var = nc.variables[l.access_name]
raw_vars.append(raw_var)
if len(raw_var.shape) == 4:
z_index, z_value = self.nearest_z(layer, request.GET['elevation'])
raw_data = raw_var[time_index, z_index, data_obj.center_slicing[-2], data_obj.center_slicing[-1]]
elif len(raw_var.shape) == 3:
raw_data = raw_var[time_index, data_obj.center_slicing[-2], data_obj.center_slicing[-1]]
elif len(raw_var.shape) == 2:
raw_data = raw_var[data_obj.center_slicing]
else:
raise BaseException('Unable to trim variable {0} data.'.format(l.access_name))
raw_data = avg_to_cell_center(raw_data, data_obj.center_axis)
if x_var is None:
if data_obj.vector_axis and data_obj.vector_axis.lower() == 'x':
x_var = raw_data
elif data_obj.center_axis == 1:
x_var = raw_data
if y_var is None:
if data_obj.vector_axis and data_obj.vector_axis.lower() == 'y':
y_var = raw_data
elif data_obj.center_axis == 0:
y_var = raw_data
if x_var is None or y_var is None:
raise BaseException('Unable to determine x and y variables.')
dim_lengths = [ len(v.dimensions) for v in raw_vars ]
if len(list(set(dim_lengths))) != 1:
raise AttributeError('One or both of the specified variables has screwed up dimensions.')
if request.GET['image_type'] == 'vectors':
vectorstep = request.GET['vectorstep']
# don't do this if the vectorstep is 1; let's save a microsecond or two
# it's identical to getting all the data
if vectorstep > 1:
data_dim = len(lon.shape)
step_slice = (np.s_[::vectorstep],) * data_dim # make sure the vector step is used for all applicable dimensions
lon = lon[step_slice]
lat = lat[step_slice]
x_var = x_var[step_slice]
y_var = y_var[step_slice]
vectorscale = request.GET['vectorscale']
padding_factor = calc_safety_factor(vectorscale)
# figure out the average distance between lat/lon points
# do the math after taking into the vectorstep if specified
spatial_idx_padding = calc_lon_lat_padding(lon, lat, padding_factor)
spatial_idx = data_handler.lat_lon_subset_idx(lon, lat,
lonmin=wgs84_bbox.minx,
latmin=wgs84_bbox.miny,
lonmax=wgs84_bbox.maxx,
latmax=wgs84_bbox.maxy,
padding=spatial_idx_padding
)
subset_lon = self._spatial_data_subset(lon, spatial_idx)
subset_lat = self._spatial_data_subset(lat, spatial_idx)
spatial_subset_x_var = self._spatial_data_subset(x_var, spatial_idx)
spatial_subset_y_var = self._spatial_data_subset(y_var, spatial_idx)
return mpl_handler.quiver_response(subset_lon,
subset_lat,
spatial_subset_x_var,
spatial_subset_y_var,
request,
vectorscale
)
else:
raise NotImplementedError('Image type "{}" is not supported.'.format(request.GET['image_type']))
def getfeatureinfo(self, layer, request):
with self.dataset() as nc:
with self.topology() as topo:
data_obj = nc.variables[layer.access_name]
geo_index, closest_x, closest_y, start_time_index, end_time_index, return_dates = self.setup_getfeatureinfo(topo, data_obj, request)
return_arrays = []
z_value = None
if isinstance(layer, Layer):
if len(data_obj.shape) == 4:
z_index, z_value = self.nearest_z(layer, request.GET['elevation'])
data = data_obj[start_time_index:end_time_index, z_index, geo_index[0], geo_index[1]]
elif len(data_obj.shape) == 3:
data = data_obj[start_time_index:end_time_index, geo_index[0], geo_index[1]]
elif len(data_obj.shape) == 2:
data = data_obj[geo_index[0], geo_index[1]]
else:
raise ValueError("Dimension Mismatch: data_obj.shape == {0} and time indexes = {1} to {2}".format(data_obj.shape, start_time_index, end_time_index))
return_arrays.append((layer.var_name, data))
elif isinstance(layer, VirtualLayer):
# Data needs to be [var1,var2] where var are 1D (nodes only, elevation and time already handled)
for l in layer.layers:
if len(data_obj.shape) == 4:
z_index, z_value = self.nearest_z(layer, request.GET['elevation'])
data = data_obj[start_time_index:end_time_index, z_index, geo_index[0], geo_index[1]]
elif len(data_obj.shape) == 3:
data = data_obj[start_time_index:end_time_index, geo_index[0], geo_index[1]]
elif len(data_obj.shape) == 2:
data = data_obj[geo_index[0], geo_index[1]]
else:
raise ValueError("Dimension Mismatch: data_obj.shape == {0} and time indexes = {1} to {2}".format(data_obj.shape, start_time_index, end_time_index))
return_arrays.append((l.var_name, data))
# Data is now in the return_arrays list, as a list of numpy arrays. We need
# to add time and depth to them to create a single Pandas DataFrame
if len(data_obj.shape) == 4:
df = pd.DataFrame({'time': return_dates,
'x': closest_x,
'y': closest_y,
'z': z_value})
elif len(data_obj.shape) == 3:
df = pd.DataFrame({'time': return_dates,
'x': closest_x,
'y': closest_y})
elif len(data_obj.shape) == 2:
df = pd.DataFrame({'x': closest_x,
'y': closest_y})
else:
df = pd.DataFrame()
# Now add a column for each member of the return_arrays list
for (var_name, np_array) in return_arrays:
df.loc[:, var_name] = pd.Series(np_array, index=df.index)
return gfi_handler.from_dataframe(request, df)
def wgs84_bounds(self, layer):
with netCDF4.Dataset(self.topology_file) as nc:
longitudes = nc.get_variables_by_attributes(standard_name='longitude')[0][:]
latitudes = nc.get_variables_by_attributes(standard_name='latitude')[0][:]
lon_max = longitudes.max()
lon_min = longitudes.min()
lat_max = latitudes.max()
lat_min = latitudes.min()
return DotDict(minx=lon_min,
miny=lat_min,
maxx=lon_max,
maxy=lat_max,
bbox=(lon_min, lat_min, lon_max, lat_max)
)
def _spatial_data_subset(self, data, spatial_index):
rows = spatial_index[0, :]
columns = spatial_index[1, :]
data_subset = data[rows, columns]
return data_subset
def times(self, layer):
with self.topology() as nc:
time_vars = nc.get_variables_by_attributes(standard_name='time')
if len(time_vars) == 1:
time_var = time_vars[0]
else:
# if there is more than variable with standard_name = time
# fine the appropriate one to use with the layer
var_obj = nc.variables[layer.access_name]
time_var_name = find_appropriate_time(var_obj, time_vars)
time_var = nc.variables[time_var_name]
return netCDF4.num2date(time_var[:], units=time_var.units)
def depth_variable(self, layer):
with self.dataset() as nc:
try:
layer_var = nc.variables[layer.access_name]
coordinates = [d for d in nc.dimensions]
try:
coordinates = coordinates + layer_var.coordinates.strip().split()
except:
pass
for cv in coordinates:
try:
coord_var = nc.variables[cv]
if hasattr(coord_var, 'axis') and coord_var.axis.lower().strip() == 'z':
return coord_var
elif hasattr(coord_var, 'positive') and coord_var.positive.lower().strip() in ['up', 'down']:
return coord_var
except BaseException:
pass
except AttributeError:
pass
def depth_direction(self, layer):
d = self.depth_variable(layer)
if d is not None:
if hasattr(d, 'positive'):
return d.positive
return 'unknown'
def depths(self, layer):
""" sci-wms only deals in depth indexes at this time (no sigma) """
d = self.depth_variable(layer)
if d is not None:
return range(0, d.shape[0])
return []
def nearest_z(self, layer, z):
"""
Return the z index and z value that is closest
"""
depths = self.depths(layer)
depth_idx = bisect.bisect_right(depths, z)
try:
depths[depth_idx]
except IndexError:
depth_idx -= 1
return depth_idx, depths[depth_idx]
def humanize(self):
return "RGRID"
|
brianmckenna/sci-wms
|
wms/models/datasets/rgrid.py
|
Python
|
gpl-3.0
| 20,561
|
[
"NetCDF"
] |
a4ba2cfa30dc5ad2c5e3c3754b722b882f91babd2db65f2a4c4f68c5aa05b40e
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
####### Test for Cut Hexa ###############
import hexablock
doc = hexablock.addDocument("Cut Hexa Test")
size_x = 2
size_y = 1
size_z = 1
grid = doc.makeCartesianTop (size_x,size_y,size_z)
arete = grid.getEdgeK (0, 0, 0)
doc.saveVtk ("decoupe1.vtk")
nbCuts = 2
grid2 = doc.cutUni (arete, nbCuts)
doc.saveVtk ("decoupe2.vtk")
#tablen = []
#reste = 1
#abscisse = 0
#for nl in range(5):
# reste /= 2
# abscisse += reste
# tablen.append (abscisse)
tablen = [2, 3, 8, 14, 18]
arete = grid2.getEdge(0)
grid3 = doc.cut(arete, tablen)
doc.saveVtk ("decoupe3.vtk")
|
FedoraScientific/salome-hexablock
|
doc/test_doc/cutHexa/cut_hexa.py
|
Python
|
lgpl-2.1
| 1,457
|
[
"VTK"
] |
6cfadd3f82eaef23f339f710285f685ae825f63ac209d6313d6d286fae4b3350
|
##############################################################################
#
# Author: Alejandro Molina-Sanchez
# Run real-time simulations with yambo
#
# Warning: Real-time simulations requires several data folders for running
# properly. Before using this scripts compulsively is recommended
# to understand the different run levels.
#
# This script plots the KBE (delta-pulse) and BSE spectra
#
##############################################################################
from yambopy import *
from schedulerpy import *
import sys
import argparse
from numpy import loadtxt
print('-f Folder containing the rt simulation')
print('-b Folder containing the bse simulation')
print('-j Jobname of the rt simulation')
print('-s Jobname of bse simulation')
parser = argparse.ArgumentParser(description='Map of a double-grid')
parser.add_argument('-f' ,'--folder' ,help='Folder containing the rt simulation')
parser.add_argument('-b' ,'--bsefolder',help='Folder containing the bse simulation')
parser.add_argument('-j' ,'--jobname' ,help='Jobname of the rt simulation')
parser.add_argument('-s' ,'--bsename' ,help='Jobname of bse simulation')
args = parser.parse_args()
folder = args.folder
jobname = args.jobname
bsefolder = args.bsefolder
bsename = args.bsename
run = YamboIn('ypp_rt -t X -V all',folder=folder,filename='ypp.in')
run['EnRngeRt'] = [ [0,10], 'eV']
run['ETStpsRt'] = 1000
run.arguments.append('SkipJP_IO')
run.write('%s/ypp.in' % folder)
os.system('cd %s; ypp_rt -F ypp.in -J %s' % (folder,jobname))
kbe = loadtxt('%s/o-%s.YPP-eps_along_E' % (folder,jobname))
bse = loadtxt('%s/o-%s.eps_q1_diago_bse' % (bsefolder,bsename))
plt.plot(kbe[:,0],kbe[:,1],label='KBE')
plt.plot(bse[:,0],bse[:,1],label='BSE')
plt.legend()
plt.show()
|
henriquemiranda/yambopy
|
tutorial/si/kbe-spectra.py
|
Python
|
bsd-3-clause
| 1,775
|
[
"Yambo"
] |
5b3130992bcb8d1348bad4e4e2be3537162206a7fb15591e09bf41fca0c707a6
|
#!/usr/bin/env python
"""
Ben Payne
ben.is.located@gmail.com
Yoga graph
This is a library of functions used by the main YogaGraph program
This file contains no graph data
This work is licensed under the Creative Commons Attribution-ShareAlike 4.0 International License.
To view a copy of this license, visit http://creativecommons.org/licenses/by-sa/4.0/.
"""
import networkx as nx # graph format
import matplotlib.pyplot as plt # for plotting graph
import random # for selecting next pose
import subprocess # launch picture viewer
import time # for delaying next pose
import os
import signal # for keyboard input with timeout
import fnmatch # for matching file name of pictures
import yaml # used to read "config.input"
"""
def interrupted(signum, frame):
"called when read times out"
print 'interrupted!'
signal.signal(signal.SIGALRM, interrupted)
def input(delay,signal):
try:
print 'You have '+str(delay)+' seconds to type in your stuff...'
foo = raw_input()
return foo
except:
# timeout
print("timeout reached")
signal.alarm(0)
foo = "none"
return foo
"""
def get_inputs(config_filename):
"""
# https://yaml-online-parser.appspot.com/
"""
with open(config_filename, "r") as file_handle:
input_data = yaml.load(file_handle)
viewer = input_data["viewer"]
use_viewer = input_data["use_viewer"]
directory_containing_pictures = input_data["directory_containing_pictures"]
entry_point_index = input_data["entry_point_index"]
max_poses = input_data["max_poses"]
delay = input_data["delay"]
field_value = input_data["field_value"]
return (
viewer,
use_viewer,
directory_containing_pictures,
entry_point_index,
max_poses,
delay,
field_value,
)
def plot_graph_nodes(DG):
"""
# nx.draw_random(DG)
"""
nx.draw_spring(DG)
plt.show()
def plot_graph_with_labels(DG, label_str):
""" """
labels = {}
for node_indx in DG.nodes():
labels[node_indx] = DG.nodes[node_indx][label_str]
pos = nx.spring_layout(DG) # other choices are circular, random, shell, spectral
nx.draw_networkx_labels(DG, pos, labels, font_size=12)
nx.draw_networkx_nodes(DG, pos)
nx.draw_networkx_edges(DG, pos, width=1)
plt.show() # matplotlib
def list_cycles(DG):
print("cycles")
print(nx.simple_cycles(DG))
print(nx.cycle_basis(DG, 0)) # not implemented for directed graphs
def launch_picture(picturename, delay, viewer_name):
print("picture = " + picturename)
viewer = subprocess.Popen([viewer_name, picturename])
time.sleep(delay)
viewer.terminate()
viewer.kill()
# better?
# osascript -e 'tell application "Preview" to quit'
# https://docs.python.org/2/library/fnmatch.html
def find_picture(current_indx, delay, viewer):
list_of_files = []
for filename in os.listdir("pose_pictures"):
# print(filename)
if fnmatch.fnmatch(filename, str(current_indx) + "__*"):
# print(filename)
list_of_files.append(filename)
# print(list_of_files)
if len(list_of_files) > 0:
foundpic = True
picturename = random.choice(list_of_files)
else:
foundpic = False
picturename = ""
return foundpic, picturename
# http://stackoverflow.com/questions/3471461/raw-input-and-timeout
# http://stackoverflow.com/questions/1335507/keyboard-input-with-timeout-in-python
def get_user_feedback(delay, difficulty):
user_feedback_speed = raw_input(
"Currently " + str(delay) + " seconds. faster/slower? [f/s]: "
)
if user_feedback_speed == "f":
delay = delay - 1
elif user_feedback_speed == "s":
delay = delay + 1
user_feedback_difficulty = raw_input(
"Current max difficulty " + str(difficulty) + ". harder/easier? [h/e]: "
)
if user_feedback_difficulty == "h":
difficulty = difficulty + 1
elif user_feedback_difficulty == "e":
difficulty = difficulty - 1
return delay, difficulty
def random_flow(DG, entry_point_indx, max_poses, field_val, delay, viewer, use_viewer):
pose_history = [] # all the poses
symmetry_history = [] # left-right cycle
print("number of poses: " + str(max_poses))
print("delay: " + str(delay) + " seconds")
if use_viewer:
print("launching pictures")
else:
print("not launching pictures")
print("\nentry point: ")
print(str(entry_point_indx) + " = " + DG.nodes[entry_point_indx][field_val])
current_indx = entry_point_indx
pose_count = 1
while pose_count < max_poses:
# print(DG.nodes[current_indx])
pose_history.append(current_indx)
symmetry_history.append(current_indx)
# display current pose picture
if use_viewer:
print("finding pictures")
[foundpicture, picturename] = find_picture(current_indx, delay, viewer)
print("pic=" + picturename)
if foundpicture:
launch_picture(picturename, delay, viewer)
print("\a") # audible tone
# difficulty=1
# [delay,difficulty]=get_user_feedback(delay,difficulty)
"""
# set alarm
signal.alarm(delay)
s = input(delay,signal)
# disable the alarm after success
signal.alarm(0)
print 'You typed', s
"""
time.sleep(delay)
# list next pose choices
# print("choices:")
choices = DG.successors(current_indx)
# print(choices)
"""
for pose_indx in choices:
if (DG.nodes[pose_indx]["two_sided"]==False):
print(" "+str(pose_indx)+" = "+DG.nodes[pose_indx][field_val])
else:
print(" "+str(pose_indx)+" = "+DG.nodes[pose_indx][field_val]+", left side")
"""
new_indx = random.choice(list(DG.successors(current_indx)))
print("\nnext move:")
print(str(new_indx) + " = " + DG.nodes[new_indx][field_val])
if DG.nodes[new_indx]["hindi_name"] != "":
print(DG.nodes[new_indx]["hindi_name"])
if DG.nodes[new_indx]["description"] != "":
print(DG.nodes[new_indx]["description"])
current_indx = new_indx
pose_count = pose_count + 1
return pose_history
def produce_graphml(DG, filename):
""" """
nx.write_graphml(DG, filename)
|
bhpayne/yoga_graph
|
src/yoga_lib.py
|
Python
|
gpl-2.0
| 6,541
|
[
"VisIt"
] |
f41374f26b11c3b564e86d9d267975ab68b8d9b5056eff79dc6d4dcb87ae6dc3
|
#!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a Scipy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --doctests optimize
"""
import copy
import doctest
import glob
import inspect
import io
import os
import re
import shutil
import sys
import tempfile
import warnings
from argparse import ArgumentParser
from contextlib import contextmanager, redirect_stderr
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
import docutils.core
import numpy as np
import sphinx
from docutils.parsers.rst import directives
from pkg_resources import parse_version
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
if parse_version(sphinx.__version__) >= parse_version('1.5'):
# Enable specific Sphinx directives
from sphinx.directives.other import SeeAlso, Only
directives.register_directive('seealso', SeeAlso)
directives.register_directive('only', Only)
else:
# Remove sphinx directives that don't run without Sphinx environment.
# Sphinx < 1.5 installs all directives on import...
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "scipy"
PUBLIC_SUBMODULES = [
'cluster',
'cluster.hierarchy',
'cluster.vq',
'constants',
'fft',
'fftpack',
'fftpack.convolve',
'integrate',
'interpolate',
'io',
'io.arff',
'io.wavfile',
'linalg',
'linalg.blas',
'linalg.lapack',
'linalg.interpolative',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'signal.windows',
'sparse',
'sparse.csgraph',
'sparse.linalg',
'spatial',
'spatial.distance',
'spatial.transform',
'special',
'stats',
'stats.mstats',
'stats.contingency',
'stats.qmc',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
'scipy.stats.kstwobign', # inaccurate cdf or ppf
'scipy.stats.levy_stable',
'scipy.special.sinc', # comes from numpy
'scipy.misc.who', # comes from numpy
'scipy.optimize.show_options',
'scipy.integrate.quad_explain',
'io.rst', # XXX: need to figure out how to deal w/ mat files
])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.csgraph',
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
# these names are not required to be in an autosummary:: listing
# despite being in ALL
REFGUIDE_AUTOSUMMARY_SKIPLIST = [
r'scipy\.special\..*_roots', # old aliases for scipy.special.*_roots
r'scipy\.special\.jn', # alias for jv
r'scipy\.ndimage\.sum', # alias for sum_labels
r'scipy\.integrate\.simps', # alias for simpson
r'scipy\.integrate\.trapz', # alias for trapezoid
r'scipy\.integrate\.cumtrapz', # alias for cumulative_trapezoid
r'scipy\.linalg\.solve_lyapunov', # deprecated name
r'scipy\.stats\.contingency\.chi2_contingency',
r'scipy\.stats\.contingency\.expected_freq',
r'scipy\.stats\.contingency\.margins',
r'scipy\.stats\.reciprocal',
r'scipy\.stats\.trapz', # alias for trapezoid
]
# deprecated windows in scipy.signal namespace
for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',
'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',
'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',
'kaiser', 'nuttall', 'parzen', 'triang', 'tukey'):
REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name)
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:
if re.match(pat, module_name + '.' + name):
break
else:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except Exception:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = only_ref.intersection(deprecated)
only_ref = only_ref.difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
output += "\nThis issue can be fixed by adding these objects to\n"
output += "the function listing in __init__.py for this module\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
output += "\nThis issue should likely be fixed by removing these objects\n"
output += "from the function listing in __init__.py for this module\n"
output += "or adding them to __all__.\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data',
'obj', 'versionadded', 'versionchanged', 'module', 'class', 'meth',
'ref', 'func', 'toctree', 'moduleauthor', 'deprecated',
'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except Exception:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float32': np.float32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,}
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
self._had_unexpected_error = False
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
# Ignore name errors after failing due to an unexpected exception
exception_type = exc_info[0]
if self._had_unexpected_error and exception_type is NameError:
return
self._had_unexpected_error = True
self._report_item_name(out)
return super().report_unexpected_exception(
out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile(r'at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', '.axis(', '.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
'# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(',
'.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = dict(CHECK_NAMESPACE)
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except Exception:
# Maybe we're printing a numpy array? This produces invalid python
# code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between
# values. So, reinsert commas and retry.
# TODO: handle (1) abberivation (`print(np.arange(10000))`), and
# (2) n-dim arrays with n > 1
s_want = want.strip()
s_got = got.strip()
cond = (s_want.startswith("[") and s_want.endswith("]") and
s_got.startswith("[") and s_got.endswith("]"))
if cond:
s_want = ", ".join(s_want[1:-1].split())
s_got = ", ".join(s_got[1:-1].split())
return self.check_output(s_want, s_got, optionflags)
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = (r'[\w\d_]+\(' +
', '.join([r'[\w\d_]+=(.+)']*num) +
r'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogeneous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""Run modified doctests for the set of `tests`.
Returns: list of [(success_flag, output), ...]
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = io.StringIO(newline='')
success = True
# Redirect stderr to the stdout or output
tmp_stderr = sys.stdout if doctest_warnings else output
from scipy._lib._util import _fixed_default_rng
@contextmanager
def temp_cwd():
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
yield tmpdir
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
# Run tests, trying to restore global state afterward
cwd = os.getcwd()
with np.errstate(), np.printoptions(), temp_cwd(), \
redirect_stderr(tmp_stderr), \
_fixed_default_rng():
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=output.write)
if fails > 0:
success = False
output.seek(0)
return success, output.read()
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except Exception:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Returns: list of [(item_name, success_flag, output), ...]
Notes
-----
refguide can be signalled to skip testing code by adding
``#doctest: +SKIP`` to the end of the line. If the output varies or is
random, add ``# may vary`` or ``# random`` to the comment. for example
>>> plt.plot(...) # doctest: +SKIP
>>> random.randint(0,10)
5 # random
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
results = []
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
with open(fname, encoding='utf-8') as f:
text = f.read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
for part in text.split('\n\n'):
tests = parser.get_doctest(part, ns, fname, fname, 0)
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts += [part]
# Reassemble the good bits and doctest them:
good_text = '\n\n'.join(good_parts)
tests = parser.get_doctest(good_text, ns, fname, fname, 0)
success, output = _run_doctests([tests], full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def init_matplotlib():
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true", help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--skip-tutorial", action="store_true",
help="Skip running doctests in the tutorial.")
args = parser.parse_args(argv)
modules = []
names_dict = {}
if args.module_names:
args.skip_tutorial = True
else:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
if args.doctests or not args.skip_tutorial:
init_matplotlib()
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
if not args.skip_tutorial:
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
tut_path = os.path.join(base_dir, 'doc', 'source', 'tutorial', '*.rst')
print('\nChecking tutorial files at %s:' % os.path.relpath(tut_path, os.getcwd()))
for filename in sorted(glob.glob(tut_path)):
if dots:
sys.stderr.write('\n')
sys.stderr.write(os.path.split(filename)[1] + ' ')
sys.stderr.flush()
tut_results = check_doctests_testfile(filename, (args.verbose >= 2),
dots=dots, doctest_warnings=args.doctest_warnings)
def scratch():
pass # stub out a "module", see below
scratch.__name__ = filename
results.append((scratch, tut_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
|
WarrenWeckesser/scipy
|
tools/refguide_check.py
|
Python
|
bsd-3-clause
| 31,826
|
[
"Gaussian"
] |
e53e8e5c49bf4ba9387108ec8124622d25f4e8ea526f20a49c38659775422445
|
#########################################################
#
# DO NOT EDIT THIS FILE. IT IS GENERATED AUTOMATICALLY. #
# PLEASE LOOK INTO THE README FOR MORE INFORMATION. #
#
#########################################################
# coding: utf-8
# # Image Loading and Preprocessing
#
# In this tutorial we're going to look at how we can load in images from a local file or a URL which you can then utilize in other tutorials or examples. Also, we're going to go in depth on the kinds of preprocessing that is necessary to utilize Caffe2 with images.
#
# #### Mac OSx Prerequisites
#
# If you don't already have these Python modules installed you'll need to do that now.
#
# ```
# sudo pip install scikit-image scipy matplotlib
# ```
# In[1]:
import skimage
import skimage.io as io
import skimage.transform
import sys
import numpy as np
import math
from matplotlib import pyplot
import matplotlib.image as mpimg
print("Required modules imported.")
# ## Test an Image
#
# In the code block below use IMAGE_LOCATION to load what you would like to test. Just change the comment flags to go through each round of the Tutorial. In this way, you'll get to see what happens with a variety of image formats and some tips on how you might preprocess them. If you want to try your own image, drop it in the images folder or use a remote URL. When you pick a remote URL, make it easy on yourself and try to find a URL that points to a common image file type and extension versus some long identifier or query string which might just break this next step.
#
# ## Color Issues
#
# Keep in mind when you load images from smartphone cameras that you may run into color formatting issues. Below we show an example of how flipping between RGB and BGR can impact an image. This would obviously throw off detection in your model. Make sure the image data you're passing around is what you think it is!
#
# ### Caffe Uses BGR Order
#
# Due to legacy support of OpenCV in Caffe and how it handles images in Blue-Green-Red (BGR) order instead of the more commonly used Red-Green-Blue (RGB) order, Caffe2 also expects **BGR** order. In many ways this decision helps in the long run as you use different computer vision utilities and libraries, but it also can be the source of confusion.
# In[2]:
# You can load either local IMAGE_FILE or remote URL
# For Round 1 of this tutorial, try a local image.
IMAGE_LOCATION = 'images/cat.jpg'
# For Round 2 of this tutorial, try a URL image with a flower:
# IMAGE_LOCATION = "https://cdn.pixabay.com/photo/2015/02/10/21/28/flower-631765_1280.jpg"
# IMAGE_LOCATION = "images/flower.jpg"
# For Round 3 of this tutorial, try another URL image with lots of people:
# IMAGE_LOCATION = "https://upload.wikimedia.org/wikipedia/commons/1/18/NASA_Astronaut_Group_15.jpg"
# IMAGE_LOCATION = "images/astronauts.jpg"
# For Round 4 of this tutorial, try a URL image with a portrait!
# IMAGE_LOCATION = "https://upload.wikimedia.org/wikipedia/commons/9/9a/Ducreux1.jpg"
# IMAGE_LOCATION = "images/Ducreux.jpg"
img = skimage.img_as_float(skimage.io.imread(IMAGE_LOCATION)).astype(np.float32)
# test color reading
# show the original image
pyplot.figure()
pyplot.subplot(1,2,1)
pyplot.imshow(img)
pyplot.axis('on')
pyplot.title('Original image = RGB')
# show the image in BGR - just doing RGB->BGR temporarily for display
imgBGR = img[:, :, (2, 1, 0)]
#pyplot.figure()
pyplot.subplot(1,2,2)
pyplot.imshow(imgBGR)
pyplot.axis('on')
pyplot.title('OpenCV, Caffe2 = BGR')
# As you can see in the example above, the difference in order is very important to keep in mind. In the code block below we'll be taking the image and converting to BGR order for Caffe to process it appropriately.
#
# But wait, there's more color fun...
#
# ### Caffe Prefers CHW Order
#
# Now what!? What's CHW, you ask? Well, there's also HWC! Both formats come up in image processing.
#
# - H: Height
# - W: Width
# - C: Channel (as in color)
#
# Digging even deeper into how image data can be stored is the memory allocation order. You might have noticed when we first loaded the image that we forced it through some interesting transformations. These were data transformations that let us play with the image as if it were a cube. What we see is on top of the cube, and manipulating the layers below can change what we view. We can tinker with it's underlying properties and as you saw above, swap colors quite easily.
#
# For GPU processing, which is what Caffe2 excels at, this order needs to be CHW. For CPU processing, this order is generally HWC. Essentially, you're going to want to use CHW and make sure that step is included in your image pipeline. Tweak RGB to be BGR, which is encapsulated as this "C" payload, then tweak HWC, the "C" being the very same colors you just switched around.
#
# You may ask why! And the reason points to cuDNN which is what helps accelerate processing on GPUs. It uses only CHW, and we'll sum it up by saying it is faster.
#
# Give these two transformations, you might think that's enough, but it isn't. We still need to resize and/or crop and potentially look at things like orientation (rotation) and mirroring.
#
# ## Rotation and Mirroring
#
# This topic is usually reserved for images that are coming from a smart phone. Phones, in general, take great pictures, but do a horrible job communicating how the image was taken and what orientation it should be in. Then there's the user who does everything under the sun with their phone's cameras, making them do things its designer never expected. Cameras - right, because there are often two cameras and these two cameras take different sized pictures in both pixel count and aspect ratio, and not only that, they sometimes take them mirrored, and they sometimes take them in portrait and landscape modes, and sometimes they don't bother to tell which mode they were in.
#
# In many ways this is the first thing you need to evaluate in your pipeline, then look at sizing (described below), then figure out the color situation. If you're developing for iOS, then you're in luck, it's going to be relatively easy. If you're a super-hacker wizard developer with lead-lined shorts and developing for Android, then at least you have lead-lined shorts.
#
# The variability in the Android marketplace is wonderful and horrifying. In an ideal world, you could rely on the EXIF data in pictures coming from any camera and use that to decide orientation and mirroring and you'd have one simple case function to handle your transformations. No such luck, but you're not alone. Many have come before you and suffered for you.
#
# ### Library for Handling Mobile Images
#
# Hooray! We're going to give you something to ease your pain. These are not full-proof. Users can and will defy you and every other developers' best attempts to handle their images. Here we'll link to some resources that can be used depending on the platform.
#
# Image Preprocessing Libraries and/or Snippits
# - [iOS](#)
# - [Android](#)
# - [Python](#)
# In the meantime though, let's play with some images and show the basics for manipulations that you might need to do.
# In[3]:
# Image came in sideways - it should be a portait image!
# How you detect this depends on the platform
# Could be a flag from the camera object
# Could be in the EXIF data
# ROTATED_IMAGE = "https://upload.wikimedia.org/wikipedia/commons/8/87/Cell_Phone_Tower_in_Ladakh_India_with_Buddhist_Prayer_Flags.jpg"
ROTATED_IMAGE = "images/cell-tower.jpg"
imgRotated = skimage.img_as_float(skimage.io.imread(ROTATED_IMAGE)).astype(np.float32)
pyplot.figure()
pyplot.imshow(imgRotated)
pyplot.axis('on')
pyplot.title('Rotated image')
# Image came in flipped or mirrored - text is backwards!
# Again detection depends on the platform
# This one is intended to be read by drivers in their rear-view mirror
# MIRROR_IMAGE = "https://upload.wikimedia.org/wikipedia/commons/2/27/Mirror_image_sign_to_be_read_by_drivers_who_are_backing_up_-b.JPG"
MIRROR_IMAGE = "images/mirror-image.jpg"
imgMirror = skimage.img_as_float(skimage.io.imread(MIRROR_IMAGE)).astype(np.float32)
pyplot.figure()
pyplot.imshow(imgMirror)
pyplot.axis('on')
pyplot.title('Mirror image')
# So you can see that we kind of have some problems. If we're detecting places, landmarks, or objects, a sideways cell tower is no good. If we're detecting text and doing automatic language translation, then mirrored text is no good. But hey, maybe you want to make a model that can detect English both ways. That would be awesome, but not for this tutorial!
#
# Let's transform these babies into something Caffe2 and the standard detection models we have around can detect. Also, this little trick might save you if, say for example, you really had to detect the cell tower but there's no EXIF data to be found: then you'd cycle through every rotation, and every flip, spawning many derivatives of this photo and run them all through. When the percentage of confidence of detection is high enough, Bam!, you found the orientation you needed and that sneaky cell tower.
#
# Anyway, to the example code:
# In[4]:
# Run me to flip the image back and forth
imgMirror = np.fliplr(imgMirror)
pyplot.figure()
pyplot.imshow(imgMirror)
pyplot.axis('off')
pyplot.title('Mirror image')
# In[5]:
# Run me to rotate the image 90 degrees
imgRotated = np.rot90(imgRotated)
pyplot.figure()
pyplot.imshow(imgRotated)
pyplot.axis('off')
pyplot.title('Rotated image')
# ## Sizing
#
# Part of preprocessing is resizing. For reasons we won't get into here, images in the Caffe2 pipeline should be square. Also, to help with performance, they should be resized to a standard height and width which is usually going to be smaller than your original source. In the example below we're resizing to 256 x 256 pixels, however you might notice that the `input_height` and `input_width` is set to 224 x 224 which is then used to specify the crop. This is what several image-based models are expecting. They were trained on images sized to 224 x 224 and in order for the model to properly identify the suspect images you throw at it, these should also be 224 x 224.
#
# ** Make sure you double-check the input sizes for the model you're using!**
# In[6]:
# Model is expecting 224 x 224, so resize/crop needed.
# Here are the steps we use to preprocess the image.
# (1) Resize the image to 256*256, and crop out the center.
input_height, input_width = 224, 224
print("Model's input shape is %dx%d") % (input_height, input_width)
#print("Original image is %dx%d") % (skimage.)
img256 = skimage.transform.resize(img, (256, 256))
pyplot.figure()
pyplot.imshow(img256)
pyplot.axis('on')
pyplot.title('Resized image to 256x256')
print("New image shape:" + str(img256.shape))
# Note the resizing has distorted the image a little bit. It is important to recognize this effect during your processing as it can have an effect on the results of your model. Flowers and animals might be ok with a little stretching or squeezing, but facial features may not.
#
# This can happen when the dimensions of the original image are not proportionally exact to your desired size. In this particular example it would have been better to just resize to 224x224 and not bother cropping. Let's try another strategy of rescaling the image and maintaining the aspect ratio.
#
# ### Rescaling
#
# If you imagine portait images versus landscape images you'll know that there are a lot of things that can get messed up by doing a slopping resize. Rescaling is assuming that you're locking down the aspect ratio to prevent distortion in the image. In this case, we'll scale down the image to the shortest side that matches with the model's input size.
#
# In our example here, the model size is 224 x 224. As you look at your monitor in 1920x1080, it is longer in width than height and if you shrunk it down to 224, you'd run out of height before you ran out of width, so...
#
# - Landscape: limit resize by the height
# - Portrait: limit resize by the width
# In[7]:
print("Original image shape:" + str(img.shape) + " and remember it should be in H, W, C!")
print("Model's input shape is %dx%d") % (input_height, input_width)
aspect = img.shape[1]/float(img.shape[0])
print("Orginal aspect ratio: " + str(aspect))
if(aspect>1):
# landscape orientation - wide image
res = int(aspect * input_height)
imgScaled = skimage.transform.resize(img, (input_height, res))
if(aspect<1):
# portrait orientation - tall image
res = int(input_width/aspect)
imgScaled = skimage.transform.resize(img, (res, input_width))
if(aspect == 1):
imgScaled = skimage.transform.resize(img, (input_height, input_width))
pyplot.figure()
pyplot.imshow(imgScaled)
pyplot.axis('on')
pyplot.title('Rescaled image')
print("New image shape:" + str(imgScaled.shape) + " in HWC")
# At this point only one dimension is set to what the model's input requires. We still need to crop one side to make a square.
#
# ### Cropping
#
# There are a variety of strategies we could utilize. In fact, we could backpeddle and decide to do a center crop. So instead of scaling down to the smallest we could get on at least one side, we take a chunk out of the middle. If we had done that without scaling we would have ended up with just part of a flower pedal, so we still needed some resizing of the image.
#
# Below we'll try a few strategies for cropping:
#
# 1. Just grab the exact dimensions you need from the middle!
# 2. Resize to a square that's pretty close then grab from the middle.
# 3. Use the rescaled image and grab the middle.
# In[8]:
# Compare the images and cropping strategies
# Try a center crop on the original for giggles
print("Original image shape:" + str(img.shape) + " and remember it should be in H, W, C!")
def crop_center(img,cropx,cropy):
y,x,c = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy,startx:startx+cropx]
# yes, the function above should match resize and take a tuple...
pyplot.figure()
# Original image
imgCenter = crop_center(img,224,224)
pyplot.subplot(1,3,1)
pyplot.imshow(imgCenter)
pyplot.axis('on')
pyplot.title('Original')
# Now let's see what this does on the distorted image
img256Center = crop_center(img256,224,224)
pyplot.subplot(1,3,2)
pyplot.imshow(img256Center)
pyplot.axis('on')
pyplot.title('Squeezed')
# Scaled image
imgScaledCenter = crop_center(imgScaled,224,224)
pyplot.subplot(1,3,3)
pyplot.imshow(imgScaledCenter)
pyplot.axis('on')
pyplot.title('Scaled')
# As you can see that didn't work out so well, except for maybe the last one. The middle one may be just fine too, but you won't know until you try on the model and test a lot of candidate images.
# At this point we can look at the difference we have, split it in half and remove some pixels from each side. This does have a drawback, however, as an off-center subject of interest would get clipped.
# If you've run this tutorial a few times now and are on Round 3, you'll notice a pretty big problem. You're missing astronaughts! You can still see the issue with the flower from Round 2 as well. Things are missing after the cropping and that could cause you problems. Think of it this way: if you don't know how the model you're using was prepared then you don't know how to conform your images, so take care to test results! If the model used a lot of different aspect ratio images and just squeezed them to conform to a square then there's a good chance that over time and lots of samples it "learned" what things look like squeezed and can make a match. However, if you're looking for details like facial features and landmarks, or really nuanced elements in any image, this could be dangerous and error-prone.
#
# #### Further Strategies?
#
# Another strategy would be to rescale to the best size you can, with real data, but then pad the rest of the image with information that you can safely ignore in your model. We'll save that for another tutorial though since you've been through enough here!
#
# ### Upscaling
#
# What do you do when the images you want to run are "tiny"? In our example we've been prepping for Input Images with the spec of 224x224. Consider this 128x128 image below.
# 
# Now we're not talking about super-resolution or the CSI-effect where we can take blurry ATM photos and identify the tattoo an a perp's neck. Although, there are [some advances](https://github.com/david-gpu/srez) along these lines that deep learning has provided, and if you're reading this in time (before 3/1/17), go [check this out](https://developer.nvidia.com/zoom-enhance-magic-image-upscaling-using-deep-learning). What we want to do is simple, but, like cropping, it does have a variety of strategies you should consider.
#
# The most basic approach is going from a small square to a bigger square and using the defauls skimage provides for you. This `resize` method defaults the interpolation order parameter to 1 which happens to be bi-linear if you even cared, but it is worth mentioning because these might be the fine-tuning knobs you need later to fix problems, such as strange visual artifacts, that can be introduced in upscaling images.
# In[9]:
imgTiny = "images/Cellsx128.png"
imgTiny = skimage.img_as_float(skimage.io.imread(imgTiny)).astype(np.float32)
print "Original image shape: ", imgTiny.shape
imgTiny224 = skimage.transform.resize(imgTiny, (224, 224))
print "Upscaled image shape: ", imgTiny224.shape
# Plot original
pyplot.figure()
pyplot.subplot(1, 2, 1)
pyplot.imshow(imgTiny)
pyplot.axis('on')
pyplot.title('128x128')
# Plot upscaled
pyplot.subplot(1, 2, 2)
pyplot.imshow(imgTiny224)
pyplot.axis('on')
pyplot.title('224x224')
# Great, it worked!. You can see in the shape outputs that you had (128, 128, 4) and you received (224, 224, 4). Wait a minute! 4? In every example so far the last value in shape has been 3! When we used a png file we entered a new reality; one where transparency is possible. This 4th value describes opacity, or transparency, depending if you're a half-glass-empty type. Anyway, we can handle it just fine, but keep an eye on that number.
#
# It's appropriate to put this discussion towards the end, but before we do further manipulations to the image, it's data order, and its overall payload. You can really mess up your data and the image if you do a simple resample on the image in its current format. Remember that it is currently a cube of data and that there's more going on in there right now than just Red, Green, and Blue (and opacity). Depending on when you decide to resize you'll have to account for that extra data.
#
# Let's break stuff! Try upscaling the image after you've switched the image to CHW.
# In[10]:
imgTiny = "images/Cellsx128.png"
imgTiny = skimage.img_as_float(skimage.io.imread(imgTiny)).astype(np.float32)
print "Image shape before HWC --> CHW conversion: ", imgTiny.shape
# swapping the axes to go from HWC to CHW
# uncomment the next line and run this block!
imgTiny = imgTiny.swapaxes(1, 2).swapaxes(0, 1)
print "Image shape after HWC --> CHW conversion: ", imgTiny.shape
imgTiny224 = skimage.transform.resize(imgTiny, (224, 224))
print "Image shape after resize: ", imgTiny224.shape
# we know this is going to go wrong, so...
try:
# Plot original
pyplot.figure()
pyplot.subplot(1, 2, 1)
pyplot.imshow(imgTiny)
pyplot.axis('on')
pyplot.title('128x128')
except:
print "Here come bad things!"
# hands up if you want to see the error (uncomment next line)
#raise
# Epic fail, right? If you let the code block above swap the axes, then resize the image, you will see this output:
#
# `Image shape after resize: (224, 224, 128)`
#
# Now you have 128 where you should still have 4. Oops. Let's revert in the code block below and try something else. We'll show an example where the image is smaller than your Input specification, and not square. Like maybe it came from a new microscope that can only take imagery in rectangular bands.
# In[11]:
imgTiny = "images/Cellsx128.png"
imgTiny = skimage.img_as_float(skimage.io.imread(imgTiny)).astype(np.float32)
imgTinySlice = crop_center(imgTiny, 128, 56)
# Plot original
pyplot.figure()
pyplot.subplot(2, 1, 1)
pyplot.imshow(imgTiny)
pyplot.axis('on')
pyplot.title('Original')
# Plot slice
pyplot.figure()
pyplot.subplot(2, 2, 1)
pyplot.imshow(imgTinySlice)
pyplot.axis('on')
pyplot.title('128x56')
# Upscale?
print "Slice image shape: ", imgTinySlice.shape
imgTiny224 = skimage.transform.resize(imgTinySlice, (224, 224))
print "Upscaled slice image shape: ", imgTiny224.shape
# Plot upscaled
pyplot.subplot(2, 2, 2)
pyplot.imshow(imgTiny224)
pyplot.axis('on')
pyplot.title('224x224')
# Alright, this was a bit of a stretch for an example of how upscaling can fail. Get it? Stretch? This could be a life-or-death kind of failure though. What if normal cells are circular and diseased cells are elongated and bent? Sickle cell anemia for example:
# 
# In this situation, what do you do? It really depends on the model and how it was trained. In some cases it may be ok to pad the rest of the image with white, or maybe black, or maybe noise, or maybe even use png and transparencies and set a mask for the images so the model ignores transparent areas. See how much fun you can have figuring this out and you get to make medical breakthroughs too!
# Let's move on to the last step which we've already mentioned and that is to adjust the image input to be in BGR order. There's also another feature that Caffe2 uses, which is a `batch term`. We've already talked about CHW. This is the N, for number of images in NCHW.
#
# ### Final Preprocessing and the Batch Term
#
# In the last steps below we are going to switch the image's data order to BGR, stuff that into the Color column, then reoder the columns for GPU processing (HCW-->CHW) and then add a fourth dimension (N) to the image to track the number of images. In theory, you can just keep adding dimensions to your data, but this one is required for Caffe2 as it relays to Caffe how many images to expect in this batch. We set it to one (1) to indicate there's only one image going into Caffe in this batch. Note that in the final output when we check `img.shape` the order is quite different. We've added N for number of images, and changed the order like so: `N, C, H, W`
# In[12]:
# this next line helps with being able to rerun this section
# if you want to try the outputs of the different crop strategies above
# swap out imgScaled with img (original) or img256 (squeezed)
imgCropped = crop_center(imgScaled,224,224)
print "Image shape before HWC --> CHW conversion: ", imgCropped.shape
# (1) Since Caffe expects CHW order and the current image is HWC,
# we will need to change the order.
imgCropped = imgCropped.swapaxes(1, 2).swapaxes(0, 1)
print "Image shape after HWC --> CHW conversion: ", imgCropped.shape
pyplot.figure()
for i in range(3):
# For some reason, pyplot subplot follows Matlab's indexing
# convention (starting with 1). Well, we'll just follow it...
pyplot.subplot(1, 3, i+1)
pyplot.imshow(imgCropped[i])
pyplot.axis('off')
pyplot.title('RGB channel %d' % (i+1))
# (2) Caffe uses a BGR order due to legacy OpenCV issues, so we
# will change RGB to BGR.
imgCropped = imgCropped[(2, 1, 0), :, :]
print "Image shape after BGR conversion: ", imgCropped.shape
# for discussion later - not helpful at this point
# (3) We will subtract the mean image. Note that skimage loads
# image in the [0, 1] range so we multiply the pixel values
# first to get them into [0, 255].
#mean_file = os.path.join(CAFFE_ROOT, 'python/caffe/imagenet/ilsvrc_2012_mean.npy')
#mean = np.load(mean_file).mean(1).mean(1)
#img = img * 255 - mean[:, np.newaxis, np.newaxis]
pyplot.figure()
for i in range(3):
# For some reason, pyplot subplot follows Matlab's indexing
# convention (starting with 1). Well, we'll just follow it...
pyplot.subplot(1, 3, i+1)
pyplot.imshow(imgCropped[i])
pyplot.axis('off')
pyplot.title('BGR channel %d' % (i+1))
# (4) finally, since caffe2 expect the input to have a batch term
# so we can feed in multiple images, we will simply prepend a
# batch dimension of size 1. Also, we will make sure image is
# of type np.float32.
imgCropped = imgCropped[np.newaxis, :, :, :].astype(np.float32)
print 'Final input shape is:', imgCropped.shape
# In the output above you should note these alterations:
# 1. Before and after of the HWC to CHW change. The 3, which is the number of color channels moved to the beginning.
# 2. In the pictures above you can see that the color order was switched too. RGB became BGR. Blue and Red switched places.
# 3. The final input shape, meaning the last change to the image was to add the batch field to the beginning, so that now you have (1, 3, 224, 224) for:
# - 1 image in the batch,
# - 3 color channels (in BGR),
# - 224 height,
# - 224 width.
|
Yangqing/caffe2
|
caffe2/python/tutorials/py_gen/Image_Pre-Processing_Pipeline.py
|
Python
|
apache-2.0
| 25,119
|
[
"TINKER"
] |
b305fdee59e7dab521f731cc394640a65801c12ebf712caf55bcee3cbd8fb579
|
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2017 Prof. William H. Green (whgreen@mit.edu),
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import os
import distutils.spawn
import external.cclib as cclib
import itertools
import logging
from subprocess import Popen
import re
from rmgpy.molecule import Molecule
from qmdata import parseCCLibData
from molecule import QMMolecule
class Gaussian:
"""
A base class for all QM calculations that use Gaussian.
Classes such as :class:`GaussianMol` will inherit from this class.
"""
inputFileExtension = '.gjf'
outputFileExtension = '.log'
executablesToTry = ('g09', 'g03')
for exe in executablesToTry:
try:
executablePath = distutils.spawn.find_executable(exe)
except:
executablePath = None
if executablePath is not None:
break
else: # didn't break
logging.debug("Did not find Gaussian on path, checking if it exists in a declared GAUSS_EXEDIR, g09root or g03root...")
gaussEnv = os.getenv('GAUSS_EXEDIR') or os.getenv('g09root') or os.getenv('g03root') or ""
possibleDirs = gaussEnv.split(':')# GAUSS_EXEDIR may be a list like "path1:path2:path3"
for exe, possibleDir in itertools.product(executablesToTry, possibleDirs):
executablePath = os.path.join(possibleDir, exe)
if os.path.exists(executablePath):
break
else: # didn't break
executablePath = os.path.join(gaussEnv , '(Gaussian 2003 or 2009)')
usePolar = False
#: List of phrases that indicate failure
#: NONE of these must be present in a succesful job.
failureKeys = [
'ERROR TERMINATION',
'IMAGINARY FREQUENCIES'
]
#: List of phrases to indicate success.
#: ALL of these must be present in a successful job.
successKeys = [
'Normal termination of Gaussian'
]
def testReady(self):
if not os.path.exists(self.executablePath):
raise Exception("Couldn't find Gaussian executable at {0}. Try setting your GAUSS_EXEDIR environment variable.".format(self.executablePath))
def run(self):
self.testReady()
# submits the input file to Gaussian
process = Popen([self.executablePath, self.inputFilePath, self.outputFilePath])
process.communicate()# necessary to wait for executable termination!
return self.verifyOutputFile()
def verifyOutputFile(self):
"""
Check's that an output file exists and was successful.
Returns a boolean flag that states whether a successful GAUSSIAN simulation already exists for the molecule with the
given (augmented) InChI Key.
The definition of finding a successful simulation is based on these criteria:
1) finding an output file with the file name equal to the InChI Key
2) NOT finding any of the keywords that are denote a calculation failure
3) finding all the keywords that denote a calculation success.
4) finding a match between the InChI of the given molecule and the InchI found in the calculation files
5) checking that the optimized geometry, when connected by single bonds, is isomorphic with self.molecule (converted to single bonds)
If any of the above criteria is not matched, False will be returned.
If all are satisfied, it will return True.
"""
if not os.path.exists(self.outputFilePath):
logging.info("Output file {0} does not exist.".format(self.outputFilePath))
return False
InChIMatch=False #flag (1 or 0) indicating whether the InChI in the file matches InChIaug this can only be 1 if InChIFound is also 1
InChIFound=False #flag (1 or 0) indicating whether an InChI was found in the log file
# Initialize dictionary with "False"s
successKeysFound = dict([(key, False) for key in self.successKeys])
with open(self.outputFilePath) as outputFile:
for line in outputFile:
line = line.strip()
for element in self.failureKeys: #search for failure keywords
if element in line:
logging.error("Gaussian output file contains the following error: {0}".format(element) )
return False
for element in self.successKeys: #search for success keywords
if element in line:
successKeysFound[element] = True
if line.startswith("InChI="):
logFileInChI = line #output files should take up to 240 characters of the name in the input file
InChIFound = True
if self.uniqueIDlong in logFileInChI:
InChIMatch = True
elif self.uniqueIDlong.startswith(logFileInChI):
logging.info("InChI too long to check, but beginning matches so assuming OK.")
InChIMatch = True
else:
logging.warning("InChI in log file ({0}) didn't match that in geometry ({1}).".format(logFileInChI, self.geometry.uniqueIDlong))
if self.geometry.uniqueIDlong.startswith(logFileInChI):
logging.warning("but the beginning matches so it's probably just a truncation problem.")
InChIMatch = True
# Check that ALL 'success' keywords were found in the file.
if not all( successKeysFound.values() ):
logging.error('Not all of the required keywords for success were found in the output file!')
return False
if not InChIFound:
logging.error("No InChI was found in the Gaussian output file {0}".format(self.outputFilePath))
return False
if not InChIMatch:
#InChIs do not match (most likely due to limited name length mirrored in log file (240 characters), but possibly due to a collision)
return self.checkForInChiKeyCollision(logFileInChI) # Not yet implemented!
# Compare the optimized geometry to the original molecule
qmData = self.parse()
cclibMol = Molecule()
cclibMol.fromXYZ(qmData.atomicNumbers, qmData.atomCoords.value)
testMol = self.molecule.toSingleBonds()
if not cclibMol.isIsomorphic(testMol):
logging.info("Incorrect connectivity for optimized geometry in file {0}".format(self.outputFilePath))
return False
logging.info("Successful {1} quantum result in {0}".format(self.outputFilePath, self.__class__.__name__))
return True
def parse(self):
"""
Parses the results of the Gaussian calculation, and returns a QMData object.
"""
parser = cclib.parser.Gaussian(self.outputFilePath)
parser.logger.setLevel(logging.ERROR) #cf. http://cclib.sourceforge.net/wiki/index.php/Using_cclib#Additional_information
cclibData = parser.parse()
radicalNumber = sum([i.radicalElectrons for i in self.molecule.atoms])
qmData = parseCCLibData(cclibData, radicalNumber+1)
return qmData
class GaussianMol(QMMolecule, Gaussian):
"""
A base Class for calculations of molecules using Gaussian.
Inherits from both :class:`QMMolecule` and :class:`Gaussian`.
"""
def inputFileKeywords(self, attempt):
"""
Return the top keywords for attempt number `attempt`.
NB. `attempt` begins at 1, not 0.
"""
assert attempt <= self.maxAttempts
if attempt > self.scriptAttempts:
attempt -= self.scriptAttempts
return self.keywords[attempt-1]
def writeInputFile(self, attempt):
"""
Using the :class:`Geometry` object, write the input file
for the `attempt`.
"""
molfile = self.getMolFilePathForCalculation(attempt)
atomline = re.compile('\s*([\- ][0-9.]+\s+[\-0-9.]+\s+[\-0-9.]+)\s+([A-Za-z]+)')
output = ['', self.geometry.uniqueIDlong, '' ]
output.append("{charge} {mult}".format(charge=0, mult=(self.molecule.getRadicalCount() + 1) ))
atomCount = 0
with open(molfile) as molinput:
for line in molinput:
match = atomline.match(line)
if match:
output.append("{0:8s} {1}".format(match.group(2), match.group(1)))
atomCount += 1
assert atomCount == len(self.molecule.atoms)
output.append('')
input_string = '\n'.join(output)
top_keys = self.inputFileKeywords(attempt)
with open(self.inputFilePath, 'w') as gaussianFile:
gaussianFile.write(top_keys)
gaussianFile.write('\n')
gaussianFile.write(input_string)
gaussianFile.write('\n')
if self.usePolar:
gaussianFile.write('\n\n\n')
raise NotImplementedError("Not sure what should be here, if anything.")
#gaussianFile.write(polar_keys)
def generateQMData(self):
"""
Calculate the QM data and return a QMData object.
"""
for atom in self.molecule.vertices:
if atom.atomType.label in ('N5s', 'N5d', 'N5dd', 'N5t', 'N5b'):
return None
if self.verifyOutputFile():
logging.info("Found a successful output file already; using that.")
source = "QM {0} calculation found from previous run.".format(self.__class__.__name__)
else:
self.createGeometry()
success = False
for attempt in range(1, self.maxAttempts+1):
self.writeInputFile(attempt)
logging.info('Trying {3} attempt {0} of {1} on molecule {2}.'.format(attempt, self.maxAttempts, self.molecule.toSMILES(), self.__class__.__name__))
success = self.run()
if success:
logging.info('Attempt {0} of {1} on species {2} succeeded.'.format(attempt, self.maxAttempts, self.molecule.toAugmentedInChI()))
source = "QM {0} calculation attempt {1}".format(self.__class__.__name__, attempt )
break
else:
logging.error('QM thermo calculation failed for {0}.'.format(self.molecule.toAugmentedInChI()))
return None
result = self.parse() # parsed in cclib
result.source = source
return result # a CCLibData object
def getParser(self, outputFile):
"""
Returns the appropriate cclib parser.
"""
return cclib.parser.Gaussian(outputFile)
class GaussianMolPM3(GaussianMol):
"""
Gaussian PM3 calculations for molecules
This is a class of its own in case you wish to do anything differently,
but for now it's only the 'pm3' in the keywords that differs.
"""
#: Keywords that will be added at the top of the qm input file
keywords = [
# The combinations of keywords were derived by Greg Magoon for pm3 in Gaussian. His comments are attached to each combination.
"# pm3 opt=(verytight,gdiis) freq IOP(2/16=3)", # added IOP option to avoid aborting when symmetry changes; 3 is supposed to be default according to documentation, but it seems that 0 (the default) is the only option that doesn't work from 0-4; also, it is interesting to note that all 4 options seem to work for test case with z-matrix input rather than xyz coords; cf. http://www.ccl.net/cgi-bin/ccl/message-new?2006+10+17+005 for original idea for solution
"# pm3 opt=(verytight,gdiis) freq IOP(2/16=3) IOP(4/21=2)", # use different SCF method; this addresses at least one case of failure for a C4H7J species
"# pm3 opt=(verytight,calcfc,maxcyc=200) freq IOP(2/16=3) nosymm" , # try multiple different options (no gdiis, use calcfc, nosymm); 7/21/09: added maxcyc option to fix case of MPTBUKVAJYJXDE-UHFFFAOYAPmult3 (InChI=1/C4H10O5Si/c1-3-7-9-10(5,6)8-4-2/h4-5H,3H2,1-2H3/mult3) (file manually copied to speed things along)
"# pm3 opt=(verytight,calcfc,maxcyc=200) freq=numerical IOP(2/16=3) nosymm", # numerical frequency keyword version of keyword #3; used to address GYFVJYRUZAKGFA-UHFFFAOYALmult3 (InChI=1/C6H14O6Si/c1-3-10-13(8,11-4-2)12-6-5-9-7/h6-7H,3-5H2,1-2H3/mult3) case; (none of the existing Gaussian or MOPAC combinations worked with it)
"# pm3 opt=(verytight,gdiis,small) freq IOP(2/16=3)", # somehow, this worked for problematic case of ZGAWAHRALACNPM-UHFFFAOYAF (InChI=1/C8H17O5Si/c1-3-11-14(10,12-4-2)13-8-5-7(9)6-8/h7-9H,3-6H2,1-2H3); (was otherwise giving l402 errors); even though I had a keyword that worked for this case, I manually copied the fixed log file to QMfiles folder to speed things along; note that there are a couple of very low frequencies (~5-6 cm^-1 for this case)
"# pm3 opt=(verytight,nolinear,calcfc,small) freq IOP(2/16=3)", # used for troublesome C5H7J2 case (similar error to C5H7J below); calcfc is not necessary for this particular species, but it speeds convergence and probably makes it more robust for other species
"# pm3 opt=(verytight,gdiis,maxcyc=200) freq=numerical IOP(2/16=3)", # use numerical frequencies; this takes a relatively long time, so should only be used as one of the last resorts; this seemed to address at least one case of failure for a C6H10JJ species; 7/15/09: maxcyc=200 added to address GVCMURUDAUQXEY-UHFFFAOYAVmult3 (InChI=1/C3H4O7Si/c1-2(9-6)10-11(7,8)3(4)5/h6-7H,1H2/mult3)...however, result was manually pasted in QMfiles folder to speed things along
"# pm3 opt=tight freq IOP(2/16=3)", # this worked for problematic case of SZSSHFMXPBKYPR-UHFFFAOYAF (InChI=1/C7H15O5Si/c1-3-10-13(8,11-4-2)12-7-5-6-9-7/h7H,3-6H2,1-2H3) (otherwise, it had l402.exe errors); corrected log file was manually copied to QMfiles to speed things along; we could also add a freq=numerical version of this keyword combination for added robustness; UPDATE: see below
"# pm3 opt=tight freq=numerical IOP(2/16=3)", # used for problematic case of CIKDVMUGTARZCK-UHFFFAOYAImult4 (InChI=1/C8H15O6Si/c1-4-12-15(10,13-5-2)14-7-6-11-8(7,3)9/h7H,3-6H2,1-2H3/mult4 (most other cases had l402.exe errors); corrected log file was manually copied to QMfiles to speed things along
"# pm3 opt=(tight,nolinear,calcfc,small,maxcyc=200) freq IOP(2/16=3)", # similar to existing #5, but uses tight rather than verytight; used for ADMPQLGIEMRGAT-UHFFFAOYAUmult3 (InChI=1/C6H14O5Si/c1-4-9-12(8,10-5-2)11-6(3)7/h6-7H,3-5H2,1-2H3/mult3)
"# pm3 opt freq IOP(2/16=3)", # use default (not verytight) convergence criteria; use this as last resort
"# pm3 opt=(verytight,gdiis) freq=numerical IOP(2/16=3) IOP(4/21=200)", # to address problematic C10H14JJ case
"# pm3 opt=(calcfc,verytight,newton,notrustupdate,small,maxcyc=100,maxstep=100) freq=(numerical,step=10) IOP(2/16=3) nosymm", # for very troublesome RRMZRNPRCUANER-UHFFFAOYAQ (InChI=1/C5H7/c1-3-5-4-2/h3H,1-2H3) case...there were troubles with negative frequencies, where I don't think they should have been; step size of numerical frequency was adjusted to give positive result; accuracy of result is questionable; it is possible that not all of these keywords are needed; note that for this and other nearly free rotor cases, I think heat capacity will be overestimated by R/2 (R vs. R/2) (but this is a separate issue)
"# pm3 opt=(tight,gdiis,small,maxcyc=200,maxstep=100) freq=numerical IOP(2/16=3) nosymm", # for troublesome QDERTVAGQZYPHT-UHFFFAOYAHmult3(InChI=1/C6H14O4Si/c1-4-8-11(7,9-5-2)10-6-3/h4H,5-6H2,1-3H3/mult3); key aspects appear to be tight (rather than verytight) convergence criteria, no calculation of frequencies during optimization, use of numerical frequencies, and probably also the use of opt=small
"# pm3 opt=(verytight,gdiis,calcall) IOP(2/16=3)", # used for troublesome C5H7J case; note that before fixing, I got errors like the following: "Incomplete coordinate system. Try restarting with Geom=Check Guess=Read Opt=(ReadFC,NewRedundant) Incomplete coordinate system. Error termination via Lnk1e in l103.exe"; we could try to restart, but it is probably preferrable to have each keyword combination standalone; another keyword that may be helpful if additional problematic cases are encountered is opt=small; 6/9/09 note: originally, this had # pm3 opt=(verytight,gdiis,calcall) freq IOP(2/16=3)" (with freq keyword), but I discovered that in this case, there are two thermochemistry sections and cclib parses frequencies twice, giving twice the number of desired frequencies and hence produces incorrect thermo; this turned up on C5H6JJ isomer
"# pm3 opt=(verytight,gdiis,calcall,small,maxcyc=200) IOP(2/16=3) IOP(4/21=2) nosymm", # worked for troublesome ketene case: CCGKOQOJPYTBIH-UHFFFAOYAO (InChI=1/C2H2O/c1-2-3/h1H2) (could just increase number of iterations for similar keyword combination above (#6 at the time of this writing), allowing symmetry, but nosymm seemed to reduce # of iterations; I think one of nosymm or higher number of iterations would allow the similar keyword combination to converge; both are included here for robustness)
"# pm3 opt=(verytight,gdiis,calcall,small) IOP(2/16=3) nosymm", # added for case of ZWMVZWMBTVHPBS-UHFFFAOYAEmult3 (InChI=1/C4H4O2/c1-3-5-6-4-2/h1-2H2/mult3)
"# pm3 opt=(calcall,small,maxcyc=100) IOP(2/16=3)", # used to address troublesome FILUFGAZMJGNEN-UHFFFAOYAImult3 case (InChI=1/C5H6/c1-3-5-4-2/h3H,1H2,2H3/mult3)
]
class GaussianMolPM6(GaussianMol):
"""
Gaussian PM6 calculations for molecules
This is a class of its own in case you wish to do anything differently,
but for now it's only the 'pm6' in the keywords that differs.
"""
#: Keywords that will be added at the top of the qm input file
keywords = [
# The combinations of keywords were derived by Greg Magoon for pm3. For now, we assume similar ones will work for pm6:
"# pm6 opt=(verytight,gdiis) freq IOP(2/16=3)",
"# pm6 opt=(verytight,gdiis) freq IOP(2/16=3) IOP(4/21=2)",
"# pm6 opt=(verytight,calcfc,maxcyc=200) freq IOP(2/16=3) nosymm" ,
"# pm6 opt=(verytight,calcfc,maxcyc=200) freq=numerical IOP(2/16=3) nosymm",
"# pm6 opt=(verytight,gdiis,small) freq IOP(2/16=3)",
"# pm6 opt=(verytight,nolinear,calcfc,small) freq IOP(2/16=3)",
"# pm6 opt=(verytight,gdiis,maxcyc=200) freq=numerical IOP(2/16=3)",
"# pm6 opt=tight freq IOP(2/16=3)",
"# pm6 opt=tight freq=numerical IOP(2/16=3)",
"# pm6 opt=(tight,nolinear,calcfc,small,maxcyc=200) freq IOP(2/16=3)",
"# pm6 opt freq IOP(2/16=3)",
"# pm6 opt=(verytight,gdiis) freq=numerical IOP(2/16=3) IOP(4/21=200)",
"# pm6 opt=(calcfc,verytight,newton,notrustupdate,small,maxcyc=100,maxstep=100) freq=(numerical,step=10) IOP(2/16=3) nosymm",
"# pm6 opt=(tight,gdiis,small,maxcyc=200,maxstep=100) freq=numerical IOP(2/16=3) nosymm",
"# pm6 opt=(verytight,gdiis,calcall) IOP(2/16=3)",
"# pm6 opt=(verytight,gdiis,calcall,small,maxcyc=200) IOP(2/16=3) IOP(4/21=2) nosymm",
"# pm6 opt=(verytight,gdiis,calcall,small) IOP(2/16=3) nosymm",
"# pm6 opt=(calcall,small,maxcyc=100) IOP(2/16=3)",
]
|
Molecular-Image-Recognition/Molecular-Image-Recognition
|
code/rmgpy/qm/gaussian.py
|
Python
|
mit
| 21,266
|
[
"Gaussian",
"MOPAC",
"cclib"
] |
ab4676492c0dab364e08c2de31a8f61b1107189cb952483d821cde06e9f9c79f
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.2
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_gslbgeodbprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of GslbGeoDbProfile Avi RESTful Object
description:
- This module is used to configure GslbGeoDbProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
description:
description:
- Field introduced in 17.1.1.
entries:
description:
- List of geodb entries.
- An entry can either be a geodb file or an ip address group with geo properties.
- Field introduced in 17.1.1.
is_federated:
description:
- This field indicates that this object is replicated across gslb federation.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
name:
description:
- A user-friendly name for the geodb profile.
- Field introduced in 17.1.1.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the geodb profile.
- Field introduced in 17.1.1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create GslbGeoDbProfile object
avi_gslbgeodbprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_gslbgeodbprofile
"""
RETURN = '''
obj:
description: GslbGeoDbProfile (api/gslbgeodbprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
description=dict(type='str',),
entries=dict(type='list',),
is_federated=dict(type='bool',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'gslbgeodbprofile',
set([]))
if __name__ == '__main__':
main()
|
bearstech/ansible
|
lib/ansible/modules/network/avi/avi_gslbgeodbprofile.py
|
Python
|
gpl-3.0
| 4,055
|
[
"VisIt"
] |
a9e17eb46a0f6ca981c9ef9e68bc3feb5bef06c382cd5e3880a50b95365017ac
|
#!/usr/bin/env python
"""
This is a script to converge the geometry of a system
"""
from __future__ import division
__author__ = "Stephen Dacek"
__version__ = "0.1"
__maintainer__ = "Stephen Dacek"
__email__ = "sdacek@mit.edu"
__status__ = "Beta"
__date__ = "11/3/13"
import logging
from custodian.custodian import Custodian
from custodian.vasp.handlers import VaspErrorHandler, UnconvergedErrorHandler, \
MeshSymmetryErrorHandler, NonConvergingErrorHandler, PotimErrorHandler
from custodian.vasp.jobs import VaspJob
from pymatgen.io.vasp.outputs import Vasprun
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO, filename="run.log")
def get_runs(args):
vasp_command = args.command.split()
converged = False
job_number = 0
while (not converged) and (job_number < args.max_relax):
suffix = ".{}{}".format('relax', job_number + 1)
if job_number == 0:
backup = True
#assume the initial guess is poor,
#start with conjugate gradients
settings = [
{"dict": "INCAR",
"action": {"_set": {"IBRION": 2}}}
]
else:
backup = False
v = Vasprun("vasprun.xml")
if len(v.ionic_steps) == 1:
converged = True
if job_number < 2 and not converged:
settings = [
{"dict": "INCAR",
"action": {"_set": {"ISTART": 1}}},
{"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}}]
#switch to RMM-DIIS once we are near the
#local minimum (assumed after 2 runs of CG)
else:
settings = [
{"dict": "INCAR",
"action": {"_set": {"ISTART": 1, "IBRION": 1}}},
{"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}}]
job_number += 1
yield VaspJob(vasp_command, final=converged, backup=backup,
suffix=suffix, settings_override=settings)
def do_run(args):
handlers = [VaspErrorHandler(), MeshSymmetryErrorHandler(),
UnconvergedErrorHandler(), NonConvergingErrorHandler(),
PotimErrorHandler()]
c = Custodian(handlers, get_runs(args), max_errors=10, gzipped_output=args.gzip)
c.run()
logging.info("Geometry optimization complete")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="""
converge_geometry performs a geometry optimization. What this script will do
is run a particular VASP relaxation repeatedly until the geometry
is converged within the first ionic step. This is a common practice for
converging molecular geometries in VASP, especially in situations where
the geometry needs to be precise: such as frequency calculations.
""",
epilog="""
Author: Stephen Dacek
Version: {}
Last updated: {}""".format(__version__, __date__))
parser.add_argument(
"-c", "--command", dest="command", nargs="?",
default="pvasp", type=str,
help="VASP command. Defaults to pvasp. If you are using mpirun, "
"set this to something like \"mpirun pvasp\".", )
parser.add_argument(
"-z", "--gzip", dest="gzip", action="store_true",
help="Add this option to gzip the final output. Do not gzip if you "
"are going to perform an additional static run.")
parser.add_argument(
"-mr", "--max_relaxtions", dest="max_relax",
default=10, type=int,
help="Maximum number of relaxations to allow")
args = parser.parse_args()
do_run(args)
|
specter119/custodian
|
custodian/cli/converge_geometry.py
|
Python
|
mit
| 3,807
|
[
"VASP",
"pymatgen"
] |
aa62e56548667c06816212787bea1f2b955dded562c806b35934c0ae9d4c0395
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Functions Analysis Module """
import json
import os
from operator import itemgetter, attrgetter, mul
from functools import reduce
from itertools import chain
from collections.abc import Mapping
from enum import IntEnum
import pandas as pd
from pandas.api.types import is_numeric_dtype
import numpy as np
import holoviews as hv
from lisa.utils import FrozenDict, memoized, unzip_into
from lisa.datautils import df_merge
from lisa.analysis.base import TraceAnalysisBase, AnalysisHelpers
from lisa.analysis.load_tracking import LoadTrackingAnalysis
from lisa.trace import MissingTraceEventError, requires_one_event_of
from lisa.conf import ConfigKeyError
from lisa.stats import Stats
from lisa.pelt import PELT_SCALE
class FunctionsAnalysis(TraceAnalysisBase):
"""
Support for ftrace events-based kernel functions profiling and analysis
"""
name = 'functions'
def df_resolve_ksym(self, df, addr_col, name_col='func_name', addr_map=None, exact=True):
"""
Resolve the kernel function names.
.. note:: If the ``addr_col`` is not of a numeric dtype, it will be
assumed to be function names already and the content will be copied
to ``name_col``.
:param df: Dataframe to augment
:type df: pandas.DataFrame
:param addr_col: Name of the column containing a kernel address.
:type addr_col: str
:param name_col: Name of the column to create with symbol names
:param name_col: str
:param addr_map: If provided, the mapping of kernel addresses to symbol
names. If missing, the symbols addresses from the
:class:`lisa.platforms.platinfo.PlatformInfo` attached to the trace
will be used.
:type addr_map: dict(int, str)
:param exact: If ``True``, an exact symbol address is expected. If
``False``, symbol addresses are sorted and paired to form
intervals, which are then used to infer the name. This is suited to
resolve an instruction pointer that could point anywhere inside of
a function (but before the starting address of the next function).
:type exact: bool
"""
trace = self.trace
df = df.copy(deep=False)
# Names already resolved, we can just copy the address column to the
# name one
if not is_numeric_dtype(df[addr_col].dtype):
df[name_col] = df[addr_col]
return df
if addr_map is None:
addr_map = trace.plat_info['kernel']['symbols-address']
if exact:
df[name_col] = df[addr_col].map(addr_map)
# Not exact means the function addresses will be used as ranges, so
# we can find in which function any instruction point value is
else:
# Sort by address, so that each consecutive pair of address
# constitue a range of address belonging to a given function.
addr_list = sorted(
addr_map.items(),
key=itemgetter(0)
)
bins, labels = zip(*addr_list)
# "close" the last bucket with the highest value possible of that column
max_addr = np.iinfo(df[addr_col].dtype).max
bins = list(bins) + [max_addr]
name_i = pd.cut(
df[addr_col],
bins=bins,
# Since our labels are not unique, we cannot pass it here
# directly. Instead, use an index into the labels list
labels=range(len(labels)),
# Include the left boundary and exclude the right one
include_lowest=True,
right=False,
)
df[name_col] = name_i.apply(lambda x: labels[x])
return df
def _df_with_ksym(self, event, *args, **kwargs):
df = self.trace.df_event(event)
try:
return self.df_resolve_ksym(df, *args, **kwargs)
except ConfigKeyError:
self.logger.warning(f'Missing symbol addresses, function names will not be resolved: {e}')
return df
@requires_one_event_of('funcgraph_entry', 'funcgraph_exit')
@TraceAnalysisBase.cache
def df_funcgraph(self, event):
"""
Return augmented dataframe of the event with the following column:
* ``func_name``: Name of the calling function if it could be
resolved.
:param event: One of:
* ``entry`` (``funcgraph_entry`` event)
* ``exit`` (``funcgraph_exit`` event)
:type event: str
"""
event = f'funcgraph_{event}'
return self._df_with_ksym(event, 'func', 'func_name', exact=False)
@df_funcgraph.used_events
@LoadTrackingAnalysis.df_cpus_signal.used_events
def _get_callgraph(self, tag_df=None, thread_root_functions=None):
entry_df = self.df_funcgraph(event='entry').copy(deep=False)
entry_df['event'] = _CallGraph._EVENT.ENTRY
exit_df = self.df_funcgraph(event='exit').copy(deep=False)
exit_df['event'] = _CallGraph._EVENT.EXIT
# Attempt to get the CPU capacity signal to normalize the results
capacity_cols = ['__cpu', 'event', 'capacity']
try:
capacity_df = self.ana.load_tracking.df_cpus_signal('capacity')
except MissingTraceEventError:
capacity_df = pd.DataFrame(columns=capacity_cols)
else:
capacity_df = capacity_df.copy(deep=False)
capacity_df['__cpu'] = capacity_df['cpu']
capacity_df['event'] = _CallGraph._EVENT.SET_CAPACITY
capacity_df = capacity_df[capacity_cols]
# Set a reasonable initial capacity
try:
orig_capacities = self.trace.plat_info['cpu-capacities']['orig']
except KeyError:
pass
else:
orig_capacities_df = pd.DataFrame.from_records(
(
(-1 * cpu, cpu, _CallGraph._EVENT.SET_CAPACITY, cap)
for cpu, cap in orig_capacities.items()
),
columns=['Time', '__cpu', 'event', 'capacity'],
index='Time',
)
capacity_df = pd.concat((orig_capacities_df, capacity_df))
to_merge = [entry_df, exit_df, capacity_df]
if tag_df is not None:
cpu = tag_df['__cpu']
tag_df = tag_df.drop(columns=['__cpu'])
tag_df = pd.DataFrame(dict(
tags=tag_df.apply(pd.Series.to_dict, axis=1),
__cpu=cpu,
))
tag_df['event'] = _CallGraph._EVENT.SET_TAG
to_merge.append(tag_df)
df = df_merge(to_merge)
return _CallGraph.from_df(
df,
thread_root_functions=thread_root_functions
)
@_get_callgraph.used_events
def df_calls(self, tag_df=None, thread_root_functions=None, normalize=True):
"""
Return a :class:`pandas.DataFrame` with a row for each function call,
along some metrics:
* ``cum_time``: cumulative time spent in that function. This
includes the time spent in all children too.
* ``self_time``: time spent in that function only. This
excludes the time spent in all children.
:param tag_df: Dataframe containing the tag event, which is used to tag
paths in the callgraph. The ``__cpu`` column is mandatory in order
to know which CPU is to be tagged at any index. Other colunms will
be used as tag keys. Tags are inherited from from both parents and
children. This allows a leaf function to emit an event and use it
for the whole path that lead to there. Equally, if a function emits
a tag, all the children of this call will inherit the tag too. This
allows a top-level function to tag a whole subtree at once.
:type tag_df: pandas.DataFrame
:param thread_root_functions: Functions that are considered to be a
root of threads. When they appear in the callgraph, the profiler
will consider the current function to be preempted and will not
register the call as a child of it and will avoid to count it in
the cumulative time.
:type thread_root_functions: list(str) or None
:param normalize: Normalize metrics according to the current CPU
capacity so that they appear to have run on the fastest CPU at
maximum frequency. This allows merging calls regardless of their
origin (CPU and frequency).
.. note:: Normalization only currently takes into account the
capacity of the CPU when the function is entered. If it changes
during execution, the result will be somewhat wrong.
:type normalize: bool
.. note:: Calls during which the current function name changes are not
accounted for. They are typically a sign of functions that did not
properly return, for example functions triggering a context switch
and returning to userspace.
"""
graph = self._get_callgraph(
tag_df=tag_df,
thread_root_functions=thread_root_functions,
)
metrics = _CallGraphNode._METRICS
def get_metric(node, metric):
val = node[metric]
if normalize:
return (node.cpu_capacity / PELT_SCALE) * val
else:
return val
return pd.DataFrame.from_records(
(
(
node.entry_time, node.cpu, node.func_name, FrozenDict(node.tags), node.tagged_name,
*(
get_metric(node, metric)
for metric in metrics
)
)
for node in graph.all_nodes
),
columns=['Time', 'cpu', 'function', 'tags', 'tagged_name'] + metrics,
index='Time',
)
@df_calls.used_events
def compare_with_traces(self, others, normalize=True, **kwargs):
"""
Compare the :class:`~lisa.trace.Trace` it's called on with the other
traces passed as ``others``. The reference is the trace it's called on.
:returns: a :class:`lisa.stats.Stats` object just like
:meth:`profile_stats`.
:param others: List of traces to compare against.
:type others: list(lisa.trace.Trace)
:Variable keyword arguments: Forwarded to :meth:`profile_stats`.
"""
ref = self.trace
traces = [ref] + list(others)
paths = [
trace.trace_path
for trace in traces
]
common_prefix_len = len(os.path.commonprefix(paths))
common_suffix_len = len(os.path.commonprefix(list(map(lambda x: str(reversed(x)), paths))))
def get_name(trace):
name = trace.trace_path[common_prefix_len:common_suffix_len]
if not name:
if trace is ref:
name = 'ref'
else:
name = str(traces.index(trace))
return name
def get_df(trace):
df = self.df_calls(normalize=normalize)
df = df.copy(deep=False)
df['trace'] = get_name(trace)
return df
df = df_merge(map(get_df, traces))
ref_group = {
'trace': get_name(ref)
}
return self._profile_stats_from_df(df, ref_group=ref_group, **kwargs)
@df_calls.used_events
def profile_stats(self, tag_df=None, normalize=True, ref_function=None, ref_tags=None, **kwargs):
"""
Create a :class:`lisa.stats.Stats` out of profiling information of the
trace.
:param tag_df: Dataframe of tags, forwarded to :meth:`df_calls`
:type tag_df: pandas.DataFrame or None
:param normalize: Normalize execution time according to CPU capacity,
forwarded to to :meth:`df_calls`
:type normalize: bool
:param metric: Name of the metric to use for statistics. Can be one of:
* ``self_time``: Time spent in the function, not accounting for
time spent in children
* ``cum_time``: Total time spent in the function, including the
time spent in children.
Defaults to ``self_time``.
:type metric: str
:param functions: Restrict the statistics to the given list of
function.
:type functions: list(str) or None
:param ref_function: Function to compare to.
:type ref_function: str or None
:param ref_tags: Function tags to compare to. Ignored if ``ref_function
is None``.
:type ref_tags: dict(str, set(object)) or None
:param cpus: List of CPUs where the functions were called to take into
account. If left to ``None``, all CPUs are considered.
:type cpus: list(int) or None
:param per_cpu: If ``True``, the per-function statistics are separated
for each CPU they ran on. This is useful if the frequency was fixed and
the only variation in speed was coming from the CPU it ran on.
:type per_cpu: bool or None
:param tags: Restrict the statistics to the function tagged with the
given tag values. If a function has multiple values for a given tag
and one of the value is in ``tags``, the function is selected.
:type tags: dict(str, object)
:Variable keyword arguments: Forwarded to :class:`lisa.stats.Stats`.
.. note:: Recursive calls are treated as if they were inlined in their
callers. This means that the count of calls will be counting the
toplevel calls only, and that the ``self_time`` for a recursive
function is directly linked to how much time each level consumes
multiplied by the number of levels. ``cum_time`` will also be
tracked on the top-level call only to provide a more accurate
result.
"""
df = self.df_calls(tag_df=tag_df, normalize=normalize)
if ref_function:
ref_tags = ref_tags or {}
ref_group = {
'f': _CallGraphNode.format_name(ref_function, ref_tags)
}
else:
ref_group = None
return self._profile_stats_from_df(df, ref_group=ref_group, **kwargs)
@staticmethod
def _profile_stats_from_df(df, metric='self_time', functions=None, per_cpu=True, cpus=None, tags=None, **kwargs):
metrics = _CallGraphNode._METRICS
# Get rid of the other value columns to avoid treating them as
# tags
other_metrics = set(metrics) - {metric}
if functions:
df = df[df['function'].isin(functions)]
if cpus is not None:
df = df[df['cpu'].isin(cpus)]
if tags:
# Select all rows that are a subset of the given tags
def select_tag(row_tags):
return all(
val in row_tags.get(tag, [])
for tag, val in tags.items()
)
df = df[df['tags'].apply(select_tag)]
df = df.copy(deep=False)
# Use tagged_name for display
df['f'] = df['tagged_name']
to_drop = list(other_metrics) + ['tags', 'function', 'tagged_name']
# Calls are already uniquely identified by their timestamp, so grouping
# per CPU is optional
if not per_cpu:
to_drop.append('cpu')
df = df.drop(columns=to_drop)
df['unit'] = 's'
index_name = df.index.name
df = df.reset_index()
return Stats(
df,
agg_cols=[index_name],
value_col=metric,
**kwargs,
)
class _CallGraph:
class _EVENT(IntEnum):
"""
To be used as events for the dataframe passed to
:meth:`from_df`.
"""
ENTRY = 1
"""Enter the given function"""
EXIT = 2
"""Exit the given function"""
SET_TAG = 3
"""
Tag the current call graph path (parents and
children) with the given value
"""
SET_CAPACITY = 4
"""
Set the capacity of the current CPU. Values are between 0 and
:attr:`lisa.pelt.PELT_SCALE`.
"""
def __init__(self, cpu_nodes):
self.cpu_nodes = cpu_nodes
@property
def all_nodes(self):
return chain.from_iterable(
node.indirect_children
for node in self.cpu_nodes.values()
)
@classmethod
def from_df(cls, df, thread_root_functions=None, ts_cols=('calltime', 'rettime')):
"""
Build a :class:`_CallGraph` from a :class:`pandas.DataFrame` with the
following columns:
* ``event``: One of :class:`_CallGraph._EVENT` enumeration.
* ``func_name``: Name of the function for ``entry`` and ``exit``
events.
* ``tags``: ``dict(str, object)`` of tags for ``tag`` event.
:param thread_root_functions: Functions that are considered to be a
root of threads. When they appear in the callgraph, the profiler
will consider the current function to be preempted and will not
register the call as a child of it and will avoid to count it in
the cumulative time.
:type thread_root_functions: list(str) or None
:param ts_cols: Name of the columns for the
:attr:`_CallGraph._EVENT.EXIT` rows that contain timestamps for
entry and exit. If they are provided, they will be used instead of
the index.
:type ts_cols: tuple(str) or None
"""
thread_root_functions = set(thread_root_functions) if thread_root_functions else set()
def make_visitor():
_max_thread = -1
def make_thread():
nonlocal _max_thread
_max_thread += 1
return _max_thread
root_node = _CallGraphNode(
func_name=None,
parent=None,
cpu=None,
cpu_capacity=None,
logical_thread=make_thread(),
)
curr_node = root_node
# This is expected to be overriden right away by a SET_CAPACITY
# event
curr_capacity = PELT_SCALE
event_enum = cls._EVENT
def visit(row):
nonlocal curr_node, curr_capacity
curr_event = row['event']
if curr_event == event_enum.ENTRY:
func_name = row['func_name']
cpu = row['__cpu']
# If we got preempted by a function that is considered to
# be part of different logical thread (e.g. the toplevel
# function of an ISR), create a new ID
if func_name in thread_root_functions:
logical_thread = make_thread()
# Otherwise, just inherit it from the parent
else:
logical_thread = curr_node.logical_thread
child = _CallGraphNode(
func_name=func_name,
cpu=cpu,
parent=curr_node,
cpu_capacity=curr_capacity,
entry_time=row.name,
logical_thread=logical_thread,
)
curr_node._children.append(child)
curr_node = child
elif curr_event == event_enum.EXIT:
# We are trying to exit the root, which is probably the sign of
# a missing entry event (could have been cropped out of the
# trace). We therefore just ignore it.
if curr_node is not root_node:
# That node is unusable for stats, since the function
# used to enter the call is not the same one as for the
# exit. This usually means that the kernel returned to
# userspace in between.
if row['func_name'] != curr_node.func_name:
curr_node.valid_metrics = False
if ts_cols is None:
curr_node.exit_time = row.name
else:
entry_ts, exit_ts = ts_cols
curr_node.entry_time = row[entry_ts] * 1e-9
curr_node.exit_time = row[exit_ts] * 1e-9
curr_node = curr_node.parent
elif curr_event == event_enum.SET_TAG:
tags = row['tags']
curr_node.set_tags(tags)
elif curr_event == event_enum.SET_CAPACITY:
curr_capacity = row['capacity']
else:
raise ValueError(f'Unknown event "{curr_event}"')
def finalize(df):
# Fixup the exit time if there were missing exit events
if curr_node is not root_node:
last_time = df.index[-1]
for node in chain([curr_node], curr_node.parents):
node.exit_time = last_time
node.valid_metrics = False
root_children = root_node.children
if root_children:
root_node.entry_time = min(map(attrgetter('entry_time'), root_children))
root_node.exit_time = max(map(attrgetter('exit_time'), root_children))
else:
root_node.entry_time = 0
root_node.exit_time = 0
return (root_node, visit, finalize)
def build_graph(subdf):
root_node, visitor, finalizer = make_visitor()
subdf.apply(visitor, axis=1)
finalizer(subdf)
return root_node
return cls(
cpu_nodes = {
cpu: build_graph(subdf)
for cpu, subdf in df.groupby('__cpu', observed=True)
}
)
class _CallGraphNode(Mapping):
"""
Represent a function call extracted from some profiling information.
"""
__slots__ = [
'func_name',
'cpu',
'cpu_capacity',
'_tags',
'_children',
'parent',
'logical_thread',
'entry_time',
'exit_time',
'valid_metrics',
'__weakref__',
]
_METRICS = sorted((
'cum_time',
'self_time',
))
def __init__(self, func_name, parent, logical_thread, cpu, cpu_capacity, entry_time=None, exit_time=None, valid_metrics=True):
self.func_name = func_name
self.cpu = cpu
self.cpu_capacity = cpu_capacity
self.parent = parent
self.logical_thread = logical_thread
self._children = []
self._tags = {}
self.entry_time = entry_time
self.exit_time = exit_time
self.valid_metrics = valid_metrics
def __hash__(self):
return id(self)
def __eq__(self, other):
return self is other
@property
@memoized
def _expanded_children(self):
def visit(node):
children = node._children
children_visit = map(visit, children)
is_recursive, children_expansion = unzip_into(2, children_visit)
# Check if we are part of any recursive chain
is_recursive = any(is_recursive) or node.func_name == self.func_name
# If we are part of a recursion chain, expand all of our children
# so that they are reparented into our caller
if is_recursive:
expansion = list(chain.from_iterable(children_expansion))
else:
expansion = [node]
return (is_recursive, expansion)
return visit(self)[1]
@property
@memoized
def children(self):
return [
child
for child in self._expanded_children
if not self._is_preempted_by(child)
]
@property
def _preempting_children(self):
return {
child
for child in self._expanded_children
if self._is_preempted_by(child)
}
def _is_preempted_by(self, node):
return self.logical_thread != node.logical_thread
def _str(self, idt):
idt_str = idt * ' '
if self.children:
children = ':\n' + '\n'.join(child._str(idt + 1) for child in self.children)
else:
children = ''
return f'{idt_str}{self.func_name}, self={self["self_time"]}s cum={self["cum_time"]}s tags={self.tags}{children}'
def __str__(self):
return self._str(0)
@property
def tagged_name(self):
return self.format_name(self.func_name, self.tags)
@staticmethod
def format_name(func_name, tags):
tags = tags or {}
tags = ', '.join(
f'{tag}={"|".join(map(str, vals))}'
for tag, vals in sorted(tags.items())
)
tags = f' ({tags})' if tags else ''
return f'{func_name}{tags}'
@memoized
def __getitem__(self, key):
if not self.valid_metrics:
return np.NaN
delta = self.exit_time - self.entry_time
if key == 'self_time':
return delta - sum(
node.exit_time - node.entry_time
# Substract the time spent in all the children, including the
# ones that preempted us
for node in self._expanded_children
)
elif key == 'cum_time':
# Define cum_time in terms of self_time, so that preempting
# children are properly accounted for recurisvely
return self['self_time'] + sum(
node['cum_time']
for node in self.children
)
else:
raise KeyError(f'Unknown metric "{key}"')
def __iter__(self):
return iter(self._METRICS)
def __len__(self):
return len(self._METRICS)
@property
def _inherited_tags(self):
def merge_tags(tags1, tags2):
common_keys = tags1.keys() & tags2.keys()
new = {
tag: tags1[tag] | tags2[tag]
for tag in common_keys
}
for tags in (tags1, tags2):
new.update({
tag: tags[tag]
for tag in tags.keys() - common_keys
})
return new
# Since merge_tags() is commutative (merge_tags(a, b) == merge_tags(b,
# a)), we don't need any specific ordering on the parents
nodes = chain(self.parents, self.indirect_children)
tags = reduce(merge_tags, map(attrgetter('_tags'), nodes), {})
return tags
@property
@memoized
def tags(self):
return dict(
(key, frozenset(vals))
for key, vals in {
**self._inherited_tags,
**self._tags,
}.items()
)
@property
def parents(self):
parent = self.parent
if parent is not None:
yield parent
yield from parent.parents
@property
def indirect_children(self):
for child in self.children:
yield child
yield from child.indirect_children
def set_tags(self, tags):
for tag, val in tags.items():
self._tags.setdefault(tag, set()).add(val)
class JSONStatsFunctionsAnalysis(AnalysisHelpers):
"""
Support for kernel functions profiling and analysis
:param stats_path: Path to JSON function stats as returned by devlib
:meth:`devlib.collector.ftrace.FtraceCollector.get_stats`
:type stats_path: str
"""
name = 'functions_json'
def __init__(self, stats_path):
self.stats_path = stats_path
# Opening functions profiling JSON data file
with open(self.stats_path) as f:
stats = json.load(f)
# Build DataFrame of function stats
frames = {}
for cpu, data in stats.items():
frames[int(cpu)] = pd.DataFrame.from_dict(data, orient='index')
# Build and keep track of the DataFrame
self._df = pd.concat(list(frames.values()),
keys=list(frames.keys()))
def get_default_plot_path(self, **kwargs):
return super().get_default_plot_path(
default_dir=os.path.dirname(self.stats_path),
**kwargs,
)
def df_functions_stats(self, functions=None):
"""
Get a DataFrame of specified kernel functions profile data
For each profiled function a DataFrame is returned which reports stats
on kernel functions execution time. The reported stats are per-CPU and
includes: number of times the function has been executed (hits),
average execution time (avg), overall execution time (time) and samples
variance (s_2).
By default returns a DataFrame of all the functions profiled.
:param functions: the name of the function or a list of function names
to report
:type functions: list(str)
"""
df = self._df
if functions:
return df.loc[df.index.get_level_values(1).isin(functions)]
else:
return df
@AnalysisHelpers.plot_method
def plot_profiling_stats(self, functions: str=None, metrics: str='avg'):
"""
Plot functions profiling metrics for the specified kernel functions.
For each speficied metric a barplot is generated which report the value
of the metric when the kernel function has been executed on each CPU.
By default all the kernel functions are plotted.
:param functions: the name of list of name of kernel functions to plot
:type functions: str or list(str)
:param metrics: the metrics to plot
avg - average execution time
time - total execution time
:type metrics: list(str)
"""
df = self.df_functions_stats(functions)
# Check that all the required metrics are acutally availabe
available_metrics = df.columns.tolist()
if not set(metrics).issubset(set(available_metrics)):
msg = f'Metrics {(set(metrics) - set(available_metrics))} not supported, available metrics are {available_metrics}'
raise ValueError(msg)
def plot_metric(metric):
if metric.upper() == 'AVG':
ylabel = 'Average completion time [us]'
if metric.upper() == 'TIME':
ylabel = 'Total execution time [us]'
data = df[metric.casefold()].unstack()
return hv.Bars(data).options(
title=title,
ylabel=ylabel,
xlabel='CPU',
invert_axes=True,
)
return reduce(mul, map(plot_metric, metrics)).options(
title='Execution time stats',
)
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
|
ARM-software/lisa
|
lisa/analysis/functions.py
|
Python
|
apache-2.0
| 32,153
|
[
"VisIt"
] |
7a8f9739112af853f91c0dca8a8dee8061596c93a22eca3d0e6c6d9113eb22e7
|
# Copyright 2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .boost import BoostDependency
from .cuda import CudaDependency
from .hdf5 import hdf5_factory
from .base import Dependency, InternalDependency, ExternalDependency, NotFoundDependency
from .base import (
ExternalLibrary, DependencyException, DependencyMethods,
BuiltinDependency, SystemDependency)
from .cmake import CMakeDependency
from .configtool import ConfigToolDependency
from .dub import DubDependency
from .framework import ExtraFrameworkDependency
from .pkgconfig import PkgConfigDependency
from .factory import DependencyFactory
from .detect import find_external_dependency, get_dep_identifier, packages, _packages_accept_language
from .dev import (
ValgrindDependency, JNISystemDependency, JDKSystemDependency, gmock_factory, gtest_factory,
llvm_factory, zlib_factory)
from .coarrays import coarray_factory
from .mpi import mpi_factory
from .scalapack import scalapack_factory
from .misc import (
BlocksDependency, OpenMPDependency, cups_factory, curses_factory, gpgme_factory,
libgcrypt_factory, libwmf_factory, netcdf_factory, pcap_factory, python3_factory,
shaderc_factory, threads_factory, ThreadDependency, iconv_factory, intl_factory,
dl_factory, openssl_factory, libcrypto_factory, libssl_factory,
)
from .platform import AppleFrameworks
from .qt import qt4_factory, qt5_factory, qt6_factory
from .ui import GnuStepDependency, WxDependency, gl_factory, sdl2_factory, vulkan_factory
__all__ = [
'Dependency',
'InternalDependency',
'ExternalDependency',
'SystemDependency',
'BuiltinDependency',
'NotFoundDependency',
'ExternalLibrary',
'DependencyException',
'DependencyMethods',
'CMakeDependency',
'ConfigToolDependency',
'DubDependency',
'ExtraFrameworkDependency',
'PkgConfigDependency',
'DependencyFactory',
'ThreadDependency',
'find_external_dependency',
'get_dep_identifier',
]
"""Dependency representations and discovery logic.
Meson attempts to largely abstract away dependency discovery information, and
to encapsulate that logic itself so that the DSL doesn't have too much direct
information. There are some cases where this is impossible/undesirable, such
as the `get_variable()` method.
Meson has four primary dependency types:
1. pkg-config
2. apple frameworks
3. CMake
4. system
Plus a few more niche ones.
When a user calls `dependency('foo')` Meson creates a list of candidates, and
tries those candidates in order to find one that matches the criteria
provided by the user (such as version requirements, or optional components
that are required.)
Except to work around bugs or handle odd corner cases, pkg-config and CMake
generally just work™, though there are exceptions. Most of this package is
concerned with dependencies that don't (always) provide CMake and/or
pkg-config files.
For these cases one needs to write a `system` dependency. These dependencies
descend directly from `ExternalDependency`, in their constructor they
manually set up the necessary link and compile args (and additional
dependencies as necessary).
For example, imagine a dependency called Foo, it uses an environment variable
called `$FOO_ROOT` to point to its install root, which looks like this:
```txt
$FOOROOT
→ include/
→ lib/
```
To use Foo, you need its include directory, and you need to link to
`lib/libfoo.ext`.
You could write code that looks like:
```python
class FooSystemDependency(ExternalDependency):
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
super().__init__(name, environment, kwargs)
root = os.environ.get('FOO_ROOT')
if root is None:
mlog.debug('$FOO_ROOT is unset.')
self.is_found = False
return
lib = self.clib_compiler.find_library('foo', environment, [os.path.join(root, 'lib')])
if lib is None:
mlog.debug('Could not find lib.')
self.is_found = False
return
self.compile_args.append(f'-I{os.path.join(root, "include")}')
self.link_args.append(lib)
self.is_found = True
```
This code will look for `FOO_ROOT` in the environment, handle `FOO_ROOT` being
undefined gracefully, then set its `compile_args` and `link_args` gracefully.
It will also gracefully handle not finding the required lib (hopefully that
doesn't happen, but it could if, for example, the lib is only static and
shared linking is requested).
There are a couple of things about this that still aren't ideal. For one, we
don't want to be reading random environment variables at this point. Those
should actually be added to `envconfig.Properties` and read in
`environment.Environment._set_default_properties_from_env` (see how
`BOOST_ROOT` is handled). We can also handle the `static` keyword. So
now that becomes:
```python
class FooSystemDependency(ExternalDependency):
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
super().__init__(name, environment, kwargs)
root = environment.properties[self.for_machine].foo_root
if root is None:
mlog.debug('foo_root is unset.')
self.is_found = False
return
static = Mesonlib.LibType.STATIC if kwargs.get('static', False) else Mesonlib.LibType.SHARED
lib = self.clib_compiler.find_library(
'foo', environment, [os.path.join(root, 'lib')], libtype=static)
if lib is None:
mlog.debug('Could not find lib.')
self.is_found = False
return
self.compile_args.append(f'-I{os.path.join(root, "include")}')
self.link_args.append(lib)
self.is_found = True
```
This is nicer in a couple of ways. First we can properly cross compile as we
are allowed to set `FOO_ROOT` for both the build and host machines, it also
means that users can override this in their machine files, and if that
environment variables changes during a Meson reconfigure Meson won't re-read
it, this is important for reproducibility. Finally, Meson will figure out
whether it should be finding `libfoo.so` or `libfoo.a` (or the platform
specific names). Things are looking pretty good now, so it can be added to
the `packages` dict below:
```python
packages.update({
'foo': FooSystemDependency,
})
```
Now, what if foo also provides pkg-config, but it's only shipped on Unices,
or only included in very recent versions of the dependency? We can use the
`DependencyFactory` class:
```python
foo_factory = DependencyFactory(
'foo',
[DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM],
system_class=FooSystemDependency,
)
```
This is a helper function that will generate a default pkg-config based
dependency, and use the `FooSystemDependency` as well. It can also handle
custom finders for pkg-config and cmake based dependencies that need some
extra help. You would then add the `foo_factory` to packages instead of
`FooSystemDependency`:
```python
packages.update({
'foo': foo_factory,
})
```
If you have a dependency that is very complicated, (such as having multiple
implementations) you may need to write your own factory function. There are a
number of examples in this package.
_Note_ before we moved to factory functions it was common to use an
`ExternalDependency` class that would instantiate different types of
dependencies and hold the one it found. There are a number of drawbacks to
this approach, and no new dependencies should do this.
"""
# This is a dict where the keys should be strings, and the values must be one
# of:
# - An ExternalDependency subclass
# - A DependencyFactory object
# - A callable with a signature of (Environment, MachineChoice, Dict[str, Any]) -> List[Callable[[], ExternalDependency]]
packages.update({
# From dev:
'gtest': gtest_factory,
'gmock': gmock_factory,
'llvm': llvm_factory,
'valgrind': ValgrindDependency,
'zlib': zlib_factory,
'jni': JNISystemDependency,
'jdk': JDKSystemDependency,
'boost': BoostDependency,
'cuda': CudaDependency,
# per-file
'coarray': coarray_factory,
'hdf5': hdf5_factory,
'mpi': mpi_factory,
'scalapack': scalapack_factory,
# From misc:
'blocks': BlocksDependency,
'curses': curses_factory,
'netcdf': netcdf_factory,
'openmp': OpenMPDependency,
'python3': python3_factory,
'threads': threads_factory,
'pcap': pcap_factory,
'cups': cups_factory,
'libwmf': libwmf_factory,
'libgcrypt': libgcrypt_factory,
'gpgme': gpgme_factory,
'shaderc': shaderc_factory,
'iconv': iconv_factory,
'intl': intl_factory,
'dl': dl_factory,
'openssl': openssl_factory,
'libcrypto': libcrypto_factory,
'libssl': libssl_factory,
# From platform:
'appleframeworks': AppleFrameworks,
# From ui:
'gl': gl_factory,
'gnustep': GnuStepDependency,
'qt4': qt4_factory,
'qt5': qt5_factory,
'qt6': qt6_factory,
'sdl2': sdl2_factory,
'wxwidgets': WxDependency,
'vulkan': vulkan_factory,
})
_packages_accept_language.update({
'hdf5',
'mpi',
'netcdf',
'openmp',
})
|
mesonbuild/meson
|
mesonbuild/dependencies/__init__.py
|
Python
|
apache-2.0
| 9,768
|
[
"NetCDF"
] |
a055966b60b9afc880e38dd55794b4cedd5638d3b6ca76ca0bef2c2adba556bb
|
################################################################################
#
# Author: Bastian Knippschild (B.Knippschild@gmx.de)
# Date: October 2014
#
# Copyright (C) 2014 Bastian Knippschild
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tmLQCD. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
#
# Function: Computation of Luescher's Zeta Function. This program is based on
# arXiv:1107.5023v2, e.g. equation (5). The parameter \Lambda is set
# to 1 in this implementation.
#
# For informations on input parameters see the description of the function.
#
################################################################################
#
# Performed tests:
# 1.) Against Mathematica code provided by Liuming Liu w. and w.o. tbc in cms
# and l=0, m=0
# 2.) Against data from arXiv:1107.5023v2 w. and w.o. moving frames and l=0, m=0
# 3.) Against data from arXiv:1011.5288 w. and w.0. moving frames and linear
# combinations of l=2, m=-2,0,2.
#
# See the test function at the very end of this file for more information!
#
################################################################################
import math
import cmath
import numpy as np
import scipy.special
import scipy.integrate
################################################################################
#
# Luescher's Zeta Function
#
# This is the ONLY function which should and needs to be called from outside.
#
# input: q2 : (IMPORTANT:) SQUARED scattering momentum fraction, ONLY
# MANDATORY INPUT PARAMETER !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# gamma : Lorentz factor for moving frames, see e.g. arXiv:1011.5288
# l : orbital quantum number
# m : magnetic quantum number
# d : total three momentum of the system. (TBC: d can be used as
# a twist angle as well. The correspondance is:
# d = -theta/pi !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# m_split : coefficient when the masses of the scattering particles are
# different. It is given by: m_split = 1+(m_1^2-m_2^2)/E_cm^2
# where E_cm^2 is the interacting energy in the cms.
# precision: precision of the calculation
# verbose : 0, no output on screen; 1, detailed output with convergence
# informations
#
# return: The value of Luescher's Zeta function as a COMPLEX number.
#
# minor details: The three terms A, B, and C correspond to the three terms of
# equation (5) in arXiv:1107.5023v2.
#
################################################################################
def Z(q2, gamma = 1.0, l = 0, m = 0, d = np.array([0., 0., 0.]), \
m_split = 1, precision = 10e-6, verbose = 0):
# some small checks
if gamma < 1.0:
print 'Gamma must be larger or equal to 1.0'
exit(0)
# reading the three momenta for summation from file
n = np.load("./momenta.npy")
# the computation
res = A(q2, gamma, l, m, d, precision, verbose, m_split, n) + \
B(q2, gamma, l, precision, verbose) + \
C(q2, gamma, l, m, d, precision, verbose, m_split, n)
if verbose:
print 'Luescher Zeta function:', res
return res
################################################################################
################################################################################
################################################################################
#
# IMPLEMENTATION
#
################################################################################
# Transforms an array of 3d vectors from cartesian to spherical coordinates
################################################################################
def appendSpherical_np(xyz):
ptsnew = np.zeros(xyz.shape)
xy = xyz[:,0]**2 + xyz[:,1]**2
ptsnew[:,0] = np.sqrt(xy + xyz[:,2]**2)
ptsnew[:,1] = np.arctan2(np.sqrt(xy), xyz[:,2])
ptsnew[:,2] = np.arctan2(xyz[:,1], xyz[:,0])
return ptsnew
# Computes the vector r for the sum in term A and returns it in spherical
# coordinates
################################################################################
def compute_r_in_spherical_coordinates(a, d, gamma, m_split):
out = np.zeros(a.shape)
if (np.linalg.norm(d) == 0.0):
for r, i in zip(a, range(0,a.shape[0])):
out[i,:] = r/gamma
# splitting every vector in a in parallel and orthogonal part w.r.t. d
else:
for r, i in zip(a, range(0,a.shape[0])):
r_p = np.dot(r, d)/np.dot(d,d)*d
r_o = r-r_p
out[i,:] = (r_p-0.5*m_split*d)/gamma + r_o
return appendSpherical_np(out)
# compute spherical harmonics
################################################################################
def sph_harm(m = 0, l = 0, phi = 0, theta = 0):
if l is 0 and m is 0:
return 0.28209479177387814
elif l is 1 and m is -1:
return 0.3454941494713355*np.sin(theta)*np.exp(-1.j*phi)
elif l is 1 and m is 0:
return 0.48860251190291992*np.cos(theta)
elif l is 1 and m is 1:
return -0.3454941494713355*np.sin(theta)*np.exp(1.j*phi)
elif l is 2 and m is -2:
return 0.38627420202318957*np.sin(theta)*np.sin(theta) * np.exp(-2.*1.j*phi)
elif l is 2 and m is 0:
return 0.31539156525252005*(3.*np.cos(theta)*np.cos(theta) - 1.)
elif l is 2 and m is 2:
return 0.38627420202318957*np.sin(theta)*np.sin(theta) * np.exp(2.*1.j*phi)
else:
return scipy.special.sph_harm(m, l, phi, theta)
# returns the part of the momentum array for a given momentum squared
################################################################################
def return_momentum_array(p, n):
if len(n[p,0]) is 0:
p += 1
out = n[p, 0]
p += 1
return out, p
# Computes a part of the sum in term A
################################################################################
def compute_summands_A(a_sph, q, l, m):
inter = []
counter = 0
for r in a_sph:
breaker = 0
if counter > 0: # one way to avoid double computation
for i in range(0, counter):
if abs(float(r[0]) - float(a_sph[i, 0])) < 1e-8:
inter.append(inter[i])
breaker = 1
break
if breaker == 0:
inter.append((np.exp(-(r[0]**2.-q)) * r[0]**l) / (r[0]**2-q))
counter += 1
result = 0.0
counter = 0
for r in a_sph:
result += inter[counter]*sph_harm(m, l, r[2], r[1])
counter += 1
return result
# Computation of term A
################################################################################
def A(q, gamma, l, m, d, precision, verbose, m_split, n):
i = 0
r, i = return_momentum_array(i, n)
r_sph = compute_r_in_spherical_coordinates(r, d, gamma, m_split)
result = compute_summands_A(r_sph, q, l, m)
if verbose:
print 'convergence in term A:'
print '\t', i-1, result
# computing new sums until precision is reached
eps = 1
while (eps > precision):
r, i = return_momentum_array(i, n)
r_sph = compute_r_in_spherical_coordinates(r, d, gamma, m_split)
result_h = compute_summands_A(r_sph, q, l, m)
if result != 0.0:
eps = abs(result_h/result)
result += result_h
if verbose:
print '\t', i-1, result, eps
# if result is still zero after 4 iterations it is assumed to stay zero
if result == 0.0 and i > 4:
break
if verbose:
print 'Term A:', result
return result
# Computation of term B
################################################################################
def B(q, gamma, l, precision, verbose):
if l is not 0:
return 0.0
else:
a = 2.*scipy.special.sph_harm(0, 0, 0.0, 0.0)*gamma*math.pow(math.pi, 3./2.)
# The integral gives [2*(exp(q)*DawsonF(sqrt(q))/sqrt(q)] for Lambda = 1
# The Dawson function is only available in scipy 0.13 or so, so it is
# replaced by a representation with the Gaussian error function.
dawson = -1.j*np.sqrt(math.pi) * np.exp(-q) * \
scipy.special.erf(1.j*cmath.sqrt(q)) / 2.
b = q * 2.*np.exp(q)*dawson/cmath.sqrt(q)
c = math.exp(q)
if verbose:
print 'Term B:', a*(b-c)
return a*(b-c)
# Computes the term gamma*w and returns the result in spherical coordinates
################################################################################
def compute_gamma_w_in_spherical_coordinates(a, d, gamma):
out = np.zeros(a.shape)
if (np.linalg.norm(d) == 0.0):
for r, i in zip(a, range(0,a.shape[0])):
out[i,:] = r*gamma
# splitting every vector in a in parallel and orthogonal part w.r.t. d
else:
for r, i in zip(a, range(0,a.shape[0])):
r_p = np.dot(r, d)/np.dot(d,d)*d
r_o = r-r_p
out[i,:] = r_p*gamma + r_o
return appendSpherical_np(out)
# Just the integrand of term C
################################################################################
integrand = lambda t, q, l, w: ((math.pi/t)**(3./2.+l) ) * np.exp(q*t-w/t)
# Computes a part of the sum in term C
################################################################################
def compute_summands_C(w_sph, w, q, gamma, l, m, d, m_split, precision):
part1 = (-1.j)**l * gamma * (np.absolute(w_sph[:,0])**l) * \
np.exp((-1.j)*m_split*math.pi*np.dot(w, d)) * \
sph_harm(m, l, w_sph[:,2], w_sph[:,1])
# Factor two: The integral
part2 = []
counter = 0
for ww in w_sph:
breaker = 0
if counter > 0: # one way to avoid double computation
for i in range(0, counter):
if abs(float(ww[0]) - float(w_sph[i, 0])) < 1e-8:
part2.append(part2[i])
breaker = 1
break
if breaker == 0:
# the precision in this integral might be crucial at some point but it is
# rather high right now with a standard of 1e-8. It should be enough
# for all comoputations. In doubt, please change it.
part2.append((scipy.integrate.quadrature(integrand, 0., 1., \
args=(q, l, (math.pi*ww[0])**2), tol = precision*0.1, \
maxiter=1000))[0])
counter += 1
part2 = np.asarray(part2, dtype=float)
# return the result
return np.dot(part1, part2)
# Computation of term C
################################################################################
def C(q, gamma, l, m, d, precision, verbose, m_split, n):
i = 1
w, i = return_momentum_array(i, n)
w_sph = compute_gamma_w_in_spherical_coordinates(w, d, gamma)
result = compute_summands_C(w_sph, w, q, gamma, l, m, d, m_split, precision)
if verbose:
print 'convergence in term C:'
print '\t', i-1, result
# computing new sums until precision is reached
eps = 1
while (eps > precision):
w, i = return_momentum_array(i, n)
w_sph = compute_gamma_w_in_spherical_coordinates(w, d, gamma)
result_h = compute_summands_C(w_sph, w, q, gamma, l, m, d, m_split, precision)
if result != 0.0:
eps = abs(result_h/result)
result += result_h
if verbose:
print '\t', i-1, result, eps
# if result is still zero after 4 iterations it is assumed to stay zero
if result == 0.0 and i > 4:
break
if verbose:
print 'Term C:', result
return result
def test():
# cms ##########################
print '\nTest in cms:'
Pcm = np.array([0., 0., 0.])
q = 0.1207*24/(2.*math.pi)
gamma = 1.0
zeta = Z(q*q, gamma, d = Pcm).real
print 'q, gamma:', q, gamma
delta = np.arctan(math.pi**(3./2.)*q/zeta)*180./math.pi
if delta < 0:
delta = 180+delta
print 'delta:', delta, 'delta should be: 136.6527'
# mv1 ##########################
print '\nTest in mv1:'
Pcm = np.array([0., 0., 1.])
L = 32
q = 0.161*L/(2.*math.pi)
E = 0.440
Ecm = 0.396
gamma = E/Ecm
Z00 = Z(q*q, gamma, d = Pcm).real
Z20 = Z(q*q, gamma, d = Pcm, l = 2).real
print 'q, gamma:', q, gamma
delta = np.arctan(gamma*math.pi**(3./2.) * q / \
(Z00 + (2./(q*q*math.sqrt(5)))*Z20))*180./math.pi
if delta < 0:
delta = 180+delta
print 'delta:', delta, 'delta should be: 115.7653'
# mv2 ##########################
print '\nTest in mv2:'
Pcm = np.array([1., 1., 0.])
L = 32
q = 0.167*L/(2.*math.pi)
E = 0.490
Ecm = 0.407
gamma = E/Ecm
Z00 = Z(q*q, gamma, d = Pcm).real
Z20 = Z(q*q, gamma, d = Pcm, l = 2).real
Z22 = Z(q*q, gamma, d = Pcm, l = 2, m = 2).imag
Z2_2 = Z(q*q, gamma, d = Pcm, l = 2, m = -2).imag
print 'q, gamma:', q, gamma
delta = np.arctan(gamma*math.pi**(3./2.) * q / \
(Z00 - (1./(q*q*math.sqrt(5)))*Z20 \
+ ((math.sqrt(3./10.)/(q*q))*(Z22-Z2_2))))*180./math.pi
if delta < 0:
delta = 180+delta
print 'delta:', delta, 'delta should be: 127.9930'
|
knippsch/LueschersZetaFunction
|
zeta.py
|
Python
|
gpl-3.0
| 13,244
|
[
"Gaussian"
] |
ac5dd52704357692bbc8d3502315285a515e2f811d1a95cdd14bc702e4e90439
|
#
# This file is part of the CCP1 Graphical User Interface (ccp1gui)
#
# (C) 2002-2005 CCLRC Daresbury Laboratory
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
#
import Pmw
import Tkinter
import re
from tools import *
class FFTool(Tool):
""" A tool for defining the forcefield """
def __init__(self,editor,molecule=None,**kw):
apply(Tool.__init__, (self,editor), kw)
self.widget = Pmw.Group(self.parent,tag_text="Forcefield Selection")
interior = self.widget.interior()
if molecule:
self.widget.set_molecule(molecule)
#f1 = Tkinter.Frame(interior)
self.f2 = Tkinter.Frame(interior)
#fr = Tkinter.Frame(interior)
self.from_quanta_tool = BooleanTool(self.editor,"from_quanta","Use files from Quanta")
self.charmm_psf_file_tool = TextFieldTool(self.editor,"charmm_psf_file","charmm_psf_file",action='open')
self.charmm_pdb_file_tool = TextFieldTool(self.editor,"charmm_pdb_file","charmm_pdb_file",action='open')
self.charmm_parm_file_tool = TextFieldTool(self.editor,"charmm_parm_file","charmm_parm_file",action='open')
self.charmm_mass_file_tool = TextFieldTool(self.editor,"charmm_mass_file","charmm_mass_file",action='open')
self.mm_defs_file_tool = FileTool(self.editor,"mm_defs","Forcefield File ",action='open')
all_avail = ["UFF", "Charmm DataFiles", "User FF file" ]
self.select_ff = Pmw.OptionMenu(interior,
labelpos = 'w',
label_text = 'Select Forcefield',
command = self.__choose_ff,
items = all_avail,
initialitem = all_avail[0])
self.select_ff.pack(side='top')
self.f2.pack(side='top',expand=1,fill='x')
self.__choose_ff(all_avail[0])
def __choose_ff(self,choice):
"""Handler for the option menu"""
if choice == "Charmm DataFiles":
self.from_quanta_tool.widget.pack(expand='yes',anchor='w',padx=10,in_=self.f2)
self.charmm_psf_file_tool.widget.pack(expand = 1, fill = 'x',in_=self.f2)
self.charmm_pdb_file_tool.widget.pack(expand = 1, fill = 'x',in_=self.f2)
self.charmm_parm_file_tool.widget.pack(expand = 1, fill = 'x',in_=self.f2)
self.charmm_mass_file_tool.widget.pack(expand = 1, fill = 'x',in_=self.f2)
else:
self.from_quanta_tool.widget.forget()
self.charmm_psf_file_tool.widget.forget()
self.charmm_pdb_file_tool.widget.forget()
self.charmm_parm_file_tool.widget.forget()
self.charmm_mass_file_tool.widget.forget()
if choice == "User FF file":
self.mm_defs_file_tool.widget.pack(in_=self.f2)
else:
self.mm_defs_file_tool.widget.forget()
def set_molecule(self,molecule):
self.molecule = molecule
def ReadWidget(self):
# read any component tools
for x in [self.charmm_psf_file_tool,
self.charmm_pdb_file_tool,
self.charmm_parm_file_tool,
self.charmm_mass_file_tool,
self.mm_defs_file_tool] :
x.ReadWidget()
choice = self.select_ff.getvalue()
if choice == "Charmm DataFiles":
self.editor.calc.set_parameter('use_charmm',1)
self.editor.calc.set_parameter("forcefield","none")
elif choice == "User FF file":
self.editor.calc.set_parameter('use_charmm',0)
self.editor.calc.set_parameter("forcefield","none")
else:
self.editor.calc.set_parameter('use_charmm',0)
self.editor.calc.set_parameter("forcefield",choice)
def UpdateWidget(self):
pass
if __name__ == '__main__':
from dlpoly import *
from zmatrix import *
root = Tkinter.Tk()
model = Zmatrix()
atom = ZAtom()
atom.symbol = 'C'
atom.name = 'C'
model.insert_atom(0,atom)
atom = ZAtom()
atom.symbol = 'Cl'
atom.name = 'Cl'
atom.coord = [ 1.,0.,0. ]
model.insert_atom(1,atom)
atom = ZAtom()
atom.symbol = 'H'
atom.name = 'H'
atom.coord = [ 1.,1.,0. ]
model.insert_atom(1,atom)
calc = DLPOLYCalc()
calc.set_input('mol_obj',model)
jm = JobManager()
je = JobEditor(root,jm)
vt = DLPOLYCalcEd(root,calc,None)
# t = FFTool(vt)
# vt.pack()
root.mainloop()
|
alexei-matveev/ccp1gui
|
interfaces/mmtools.py
|
Python
|
gpl-2.0
| 5,076
|
[
"CHARMM"
] |
1105b185f7a36df53629d276138ac95f055e8b7ffba86e646a39a8be16aebcd0
|
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
'''
Created on 8 Nov 2012
@author: George
'''
'''
Models a FIFO queue where entities can wait in order to get into a server
'''
import simpy
from CoreObject import CoreObject
# ===========================================================================
# the Queue object
# ===========================================================================
class Queue(CoreObject):
family='Buffer'
#===========================================================================
# the __init__ method of the Queue
#===========================================================================
def __init__(self, id='', name='', capacity=1, isDummy=False, schedulingRule="FIFO",
level=None, gatherWipStat=False, **kw):
self.type="Queue" # String that shows the type of object
CoreObject.__init__(self, id, name)
capacity=float(capacity)
if capacity<0 or capacity==float("inf"):
self.capacity=float("inf")
else:
self.capacity=int(capacity)
self.isDummy=bool(int(isDummy)) #Boolean that shows if it is the dummy first Queue
self.schedulingRule=schedulingRule #the scheduling rule that the Queue follows
self.multipleCriterionList=[] #list with the criteria used to sort the Entities in the Queue
SRlist = [schedulingRule]
if schedulingRule.startswith("MC"): # if the first criterion is MC aka multiple criteria
SRlist = schedulingRule.split("-") # split the string of the criteria (delimiter -)
self.schedulingRule=SRlist.pop(0) # take the first criterion of the list
self.multipleCriterionList=SRlist # hold the criteria list in the property multipleCriterionList
for scheduling_rule in SRlist:
if scheduling_rule not in self.getSupportedSchedulingRules():
raise ValueError("Unknown scheduling rule %s for %s" %
(scheduling_rule, id))
self.gatherWipStat=gatherWipStat
# trigger level for the reallocation of operators
if level:
assert level<=self.capacity, "the level cannot be bigger than the capacity of the queue"
self.level=level
from Globals import G
G.QueueList.append(self)
@staticmethod
def getSupportedSchedulingRules():
return ("FIFO", "Priority", "EDD", "EOD",
"NumStages", "RPC", "LPT", "SPT", "MS", "WINQ")
#===========================================================================
# the initialize method of the Queue class
#===========================================================================
def initialize(self):
# using the Process __init__ and not the CoreObject __init__
CoreObject.initialize(self)
# initialise the internal Queue (type Resource) of the Queue object
self.Res=simpy.Resource(self.env, self.capacity)
# event used by router
self.loadOperatorAvailable=self.env.event()
self.expectedSignals['isRequested']=1
self.expectedSignals['canDispose']=1
self.expectedSignals['loadOperatorAvailable']=1
#===========================================================================
# run method of the queue
#===========================================================================
def run(self):
activeObjectQueue=self.Res.users
# check if there is WIP and signal receiver
self.initialSignalReceiver()
while 1:
self.printTrace(self.id, waitEvent='')
# wait until the Queue can accept an entity and one predecessor requests it
self.expectedSignals['canDispose']=1
self.expectedSignals['isRequested']=1
self.expectedSignals['loadOperatorAvailable']=1
receivedEvent=yield self.env.any_of([self.isRequested, self.canDispose, self.loadOperatorAvailable])
self.printTrace(self.id, received='')
# if the event that activated the thread is isRequested then getEntity
if self.isRequested in receivedEvent:
transmitter, eventTime=self.isRequested.value
self.printTrace(self.id, isRequested=transmitter.id)
# reset the isRequested signal parameter
self.isRequested=self.env.event()
self.getEntity()
#if entity just got to the dummyQ set its startTime as the current time
if self.isDummy:
activeObjectQueue[0].startTime=self.env.now
# if the queue received an loadOperatorIsAvailable (from Router) with signalparam time
if self.loadOperatorAvailable in receivedEvent:
transmitter, eventTime=self.loadOperatorAvailable.value
self.loadOperatorAvailable=self.env.event()
# if the queue received an canDispose with signalparam time, this means that the signals was sent from a MouldAssemblyBuffer
if self.canDispose in receivedEvent:
transmitter, eventTime=self.canDispose.value
self.printTrace(self.id, canDispose='')
self.canDispose=self.env.event()
# if the event that activated the thread is canDispose then signalReceiver
if self.haveToDispose():
if self.receiver:
if not self.receiver.entryIsAssignedTo():
# try to signal receiver. In case of failure signal giver (for synchronization issues)
if not self.signalReceiver():
self.signalGiver()
continue
self.signalReceiver()
# signal the giver (for synchronization issues)
self.signalGiver()
# =======================================================================
# checks if the Queue can accept an entity
# it checks also who called it and returns TRUE
# only to the predecessor that will give the entity.
# =======================================================================
def canAccept(self, callerObject=None):
activeObjectQueue=self.Res.users
#if we have only one predecessor just check if there is a place available
# this is done to achieve better (cpu) processing time
# then we can also use it as a filter for a yield method
if(callerObject==None):
return len(activeObjectQueue)<self.capacity
thecaller=callerObject
return len(activeObjectQueue)<self.capacity and (self.isInRouteOf(thecaller))
# =======================================================================
# checks if the Queue can dispose an entity to the following object
# it checks also who called it and returns TRUE
# only to the receiver that will give the entity.
# this is kind of slow I think got to check
# =======================================================================
def haveToDispose(self, callerObject=None):
activeObjectQueue=self.Res.users
#if we have only one possible receiver just check if the Queue holds one or more entities
if(callerObject==None):
return len(activeObjectQueue)>0
thecaller=callerObject
return len(activeObjectQueue)>0 and thecaller.isInRouteOf(self)
# =======================================================================
# removes an entity from the Object
# =======================================================================
def removeEntity(self, entity=None):
activeEntity=CoreObject.removeEntity(self, entity) #run the default method
if self.canAccept():
self.signalGiver()
# TODO: disable that for the mouldAssemblyBuffer
if not self.__class__.__name__=='MouldAssemblyBufferManaged':
if self.haveToDispose():
# self.printTrace(self.id, attemptSignalReceiver='(removeEntity)')
self.signalReceiver()
# reset the signals for the Queue. It be in the start of the loop for now
# xxx consider to dothis in all CoreObjects
self.expectedSignals['isRequested']=1
self.expectedSignals['canDispose']=1
self.expectedSignals['loadOperatorAvailable']=1
# check if the queue is empty, if yes then try to signal the router, operators may need reallocation
try:
if self.level:
if not len(self.getActiveObjectQueue()) and self.checkForDedicatedOperators():
self.requestAllocation()
except:
pass
return activeEntity
# =======================================================================
# checks if the Queue can accept an entity and
# there is an entity in some predecessor waiting for it
# also updates the predecessorIndex to the one that is to be taken
# =======================================================================
def canAcceptAndIsRequested(self,callerObject=None):
activeObjectQueue=self.Res.users
giverObject=callerObject
assert giverObject, 'there must be a caller for canAcceptAndIsRequested'
return len(activeObjectQueue)<self.capacity and giverObject.haveToDispose(self)
# =======================================================================
# gets an entity from the predecessor that
# the predecessor index points to
# =======================================================================
def getEntity(self):
activeEntity=CoreObject.getEntity(self) #run the default behavior
# if the level is reached then try to signal the Router to reallocate the operators
try:
if self.level:
if len(self.getActiveObjectQueue())==self.level and self.checkForDedicatedOperators():
self.requestAllocation()
except:
pass
return activeEntity
#===========================================================================
# checks whether the entity can proceed to a successor object
#===========================================================================
def canDeliver(self, entity=None):
assert self.isInActiveQueue(entity), entity.id +' not in the internalQueue of'+ self.id
activeEntity=entity
mayProceed=False
# for all the possible receivers of an entity check whether they can accept and then set accordingly the canProceed flag of the entity
for nextObject in [object for object in self.next if object.canAcceptEntity(activeEntity)]:
activeEntity.proceed=True
activeEntity.candidateReceivers.append(nextObject)
mayProceed=True
return mayProceed
# =======================================================================
# sorts the Entities of the Queue according to the scheduling rule
# =======================================================================
def sortEntities(self):
#if we have sorting according to multiple criteria we have to call the sorter many times
if self.schedulingRule=="MC":
for criterion in reversed(self.multipleCriterionList):
self.activeQSorter(criterion=criterion)
#else we just use the default scheduling rule
else:
self.activeQSorter()
# =======================================================================
# sorts the Entities of the Queue according to the scheduling rule
# =======================================================================
def activeQSorter(self, criterion=None):
activeObjectQ=self.Res.users
if criterion==None:
criterion=self.schedulingRule
#if the schedulingRule is first in first out
if criterion=="FIFO":
pass
#if the schedulingRule is based on a pre-defined priority
elif criterion=="Priority":
activeObjectQ.sort(key=lambda x: x.priority)
#if the schedulingRule is earliest due date
elif criterion=="EDD":
activeObjectQ.sort(key=lambda x: x.dueDate)
#if the schedulingRule is earliest order date
elif criterion=="EOD":
activeObjectQ.sort(key=lambda x: x.orderDate)
#if the schedulingRule is to sort Entities according to the stations they have to visit
elif criterion=="NumStages":
activeObjectQ.sort(key=lambda x: len(x.remainingRoute), reverse=True)
#if the schedulingRule is to sort Entities according to the their remaining processing time in the system
elif criterion=="RPC":
for entity in activeObjectQ:
RPT=0
for step in entity.remainingRoute:
processingTime=step.get('processingTime',None)
if processingTime:
RPT+=float(processingTime.get('Fixed',{}).get('mean',0))
entity.remainingProcessingTime=RPT
activeObjectQ.sort(key=lambda x: x.remainingProcessingTime, reverse=True)
#if the schedulingRule is to sort Entities according to longest processing time first in the next station
elif criterion=="LPT":
for entity in activeObjectQ:
processingTime = entity.remainingRoute[0].get('processingTime',None)
if processingTime:
entity.processingTimeInNextStation=float(processingTime.get('Fixed',{}).get('mean',0))
else:
entity.processingTimeInNextStation=0
activeObjectQ.sort(key=lambda x: x.processingTimeInNextStation, reverse=True)
#if the schedulingRule is to sort Entities according to shortest processing time first in the next station
elif criterion=="SPT":
for entity in activeObjectQ:
processingTime = entity.remainingRoute[0].get('processingTime',None)
if processingTime:
entity.processingTimeInNextStation=float(processingTime.get('Fixed',{}).get('mean',0))
else:
entity.processingTimeInNextStation=0
activeObjectQ.sort(key=lambda x: x.processingTimeInNextStation)
#if the schedulingRule is to sort Entities based on the minimum slackness
elif criterion=="MS":
for entity in activeObjectQ:
RPT=0
for step in entity.remainingRoute:
processingTime=step.get('processingTime',None)
if processingTime:
RPT+=float(processingTime.get('Fixed',{}).get('mean',0))
entity.remainingProcessingTime=RPT
activeObjectQ.sort(key=lambda x: (x.dueDate-x.remainingProcessingTime))
#if the schedulingRule is to sort Entities based on the length of the following Queue
elif criterion=="WINQ":
from Globals import G
for entity in activeObjectQ:
nextObjIds=entity.remainingRoute[1].get('stationIdsList',[])
for obj in G.ObjList:
if obj.id in nextObjIds:
nextObject=obj
entity.nextQueueLength=len(nextObject.Res.users)
activeObjectQ.sort(key=lambda x: x.nextQueueLength)
else:
assert False, "Unknown scheduling criterion %r" % (criterion, )
def outputResultsJSON(self):
from Globals import G
json = {'_class': 'Dream.%s' % self.__class__.__name__,
'id': str(self.id),
'family': self.family,
'results': {} }
if self.gatherWipStat:
json['results']['wip_stat_list']=self.WipStat
G.outputJSON['elementList'].append(json)
|
jerome-nexedi/dream
|
dream/simulation/Queue.py
|
Python
|
gpl-3.0
| 17,117
|
[
"VisIt"
] |
9666f205ff14edf11649c069e0fd6dd24354f553bb9fce6946dd7f256d864102
|
# Copyright 2001 by Gavin E. Crooks. All rights reserved.
# Modifications Copyright 2004/2005 James Casbon. All rights Reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Changes made by James Casbon:
# - New Astral class
# - SQL functionality for both Scop and Astral classes
# - All sunids are int not strings
#
# Code written by Jeffrey Chang to access SCOP over the internet, which
# was previously in Bio.WWW.SCOP, has now been merged into this module.
""" SCOP: Structural Classification of Proteins.
The SCOP database aims to provide a manually constructed classification of
all know protein structures into a hierarchy, the main levels of which
are family, superfamily and fold.
* "SCOP":http://scop.mrc-lmb.cam.ac.uk/scop/
* "Introduction":http://scop.mrc-lmb.cam.ac.uk/scop/intro.html
* "SCOP parsable files":http://scop.mrc-lmb.cam.ac.uk/scop/parse/
The Scop object in this module represents the entire SCOP classification. It
can be built from the three SCOP parsable files, modified is so desired, and
converted back to the same file formats. A single SCOP domain (represented
by the Domain class) can be obtained from Scop using the domain's SCOP
identifier (sid).
nodeCodeDict -- A mapping between known 2 letter node codes and a longer
description. The known node types are 'cl' (class), 'cf'
(fold), 'sf' (superfamily), 'fa' (family), 'dm' (domain),
'sp' (species), 'px' (domain). Additional node types may
be added in the future.
This module also provides code to access SCOP over the WWW.
Functions:
search -- Access the main CGI script.
_open -- Internally used function.
"""
from types import *
import os
import Des
import Cla
import Hie
from Residues import *
from Bio import SeqIO
from Bio.Seq import Seq
nodeCodeDict = { 'cl':'class', 'cf':'fold', 'sf':'superfamily',
'fa':'family', 'dm':'protein', 'sp':'species', 'px':'domain'}
_nodetype_to_code= { 'class': 'cl', 'fold': 'cf', 'superfamily': 'sf',
'family': 'fa', 'protein': 'dm', 'species': 'sp', 'domain': 'px'}
nodeCodeOrder = [ 'ro', 'cl', 'cf', 'sf', 'fa', 'dm', 'sp', 'px' ]
astralBibIds = [10,20,25,30,35,40,50,70,90,95,100]
astralEvs = [10, 5, 1, 0.5, 0.1, 0.05, 0.01, 0.005, 0.001, 1e-4, 1e-5, 1e-10, 1e-15,
1e-20, 1e-25, 1e-50]
astralEv_to_file = { 10: 'e+1', 5: 'e+0,7', 1: 'e+0', 0.5: 'e-0,3', 0.1: 'e-1',
0.05: 'e-1,3', 0.01: 'e-2', 0.005: 'e-2,3', 0.001: 'e-3',
1e-4: 'e-4', 1e-5: 'e-5', 1e-10: 'e-10', 1e-15: 'e-15',
1e-20: 'e-20', 1e-25: 'e-25', 1e-50: 'e-50' }
astralEv_to_sql = { 10: 'e1', 5: 'e0_7', 1: 'e0', 0.5: 'e_0_3', 0.1: 'e_1',
0.05: 'e_1_3', 0.01: 'e_2', 0.005: 'e_2_3', 0.001: 'e_3',
1e-4: 'e_4', 1e-5: 'e_5', 1e-10: 'e_10', 1e-15: 'e_15',
1e-20: 'e_20', 1e-25: 'e_25', 1e-50: 'e_50' }
try:
#See if the cmp function exists (will on Python 2)
_cmp = cmp
except NameError:
def _cmp(a,b):
"""Implementation of cmp(x,y) for Python 3 (PRIVATE).
Based on Python 3 docs which say if you really need the cmp()
functionality, you could use the expression (a > b) - (a < b)
as the equivalent for cmp(a, b)
"""
return (a > b) - (a < b)
def cmp_sccs(sccs1, sccs2):
"""Order SCOP concise classification strings (sccs).
a.4.5.1 < a.4.5.11 < b.1.1.1
A sccs (e.g. a.4.5.11) compactly represents a domain's classification.
The letter represents the class, and the numbers are the fold,
superfamily, and family, respectively.
"""
s1 = sccs1.split(".")
s2 = sccs2.split(".")
if s1[0] != s2[0]: return _cmp(s1[0], s2[0])
s1 = list(map(int, s1[1:]))
s2 = list(map(int, s2[1:]))
return _cmp(s1,s2)
_domain_re = re.compile(r">?([\w_\.]*)\s+([\w\.]*)\s+\(([^)]*)\) (.*)")
def parse_domain(str):
"""Convert an ASTRAL header string into a Scop domain.
An ASTRAL (http://astral.stanford.edu/) header contains a concise
description of a SCOP domain. A very similar format is used when a
Domain object is converted into a string. The Domain returned by this
method contains most of the SCOP information, but it will not be located
within the SCOP hierarchy (i.e. The parent node will be None). The
description is composed of the SCOP protein and species descriptions.
A typical ASTRAL header looks like --
>d1tpt_1 a.46.2.1 (1-70) Thymidine phosphorylase {Escherichia coli}
"""
m = _domain_re.match(str)
if (not m) : raise ValueError("Domain: "+ str)
dom = Domain()
dom.sid = m.group(1)
dom.sccs = m.group(2)
dom.residues = Residues(m.group(3))
if not dom.residues.pdbid:
dom.residues.pdbid= dom.sid[1:5]
dom.description = m.group(4).strip()
return dom
def _open_scop_file(scop_dir_path, version, filetype):
filename = "dir.%s.scop.txt_%s" % (filetype,version)
handle = open(os.path.join( scop_dir_path, filename))
return handle
class Scop:
"""The entire SCOP hierarchy.
root -- The root node of the hierarchy
"""
def __init__(self, cla_handle=None, des_handle=None, hie_handle=None,
dir_path=None, db_handle=None, version=None):
"""Build the SCOP hierarchy from the SCOP parsable files, or a sql backend.
If no file handles are given, then a Scop object with a single
empty root node is returned.
If a directory and version are given (with dir_path=.., version=...) or
file handles for each file, the whole scop tree will be built in memory.
If a MySQLdb database handle is given, the tree will be built as needed,
minimising construction times. To build the SQL database to the methods
write_xxx_sql to create the tables.
"""
self._sidDict = {}
self._sunidDict = {}
if cla_handle==des_handle==hie_handle==dir_path==db_handle==None: return
if dir_path is None and db_handle is None:
if cla_handle == None or des_handle==None or hie_handle==None:
raise RuntimeError("Need CLA, DES and HIE files to build SCOP")
sunidDict = {}
self.db_handle = db_handle
try:
if db_handle:
# do nothing if we have a db handle, we'll do it all on the fly
pass
else:
# open SCOP parseable files
if dir_path:
if not version:
raise RuntimeError("Need SCOP version to find parsable files in directory")
if cla_handle or des_handle or hie_handle:
raise RuntimeError("Cannot specify SCOP directory and specific files")
cla_handle = _open_scop_file( dir_path, version, 'cla')
des_handle = _open_scop_file( dir_path, version, 'des')
hie_handle = _open_scop_file( dir_path, version, 'hie')
root = Node()
domains = []
root.sunid=0
root.type='ro'
sunidDict[root.sunid] = root
self.root = root
root.description = 'SCOP Root'
# Build the rest of the nodes using the DES file
records = Des.parse(des_handle)
for record in records:
if record.nodetype =='px':
n = Domain()
n.sid = record.name
domains.append(n)
else :
n = Node()
n.sunid = record.sunid
n.type = record.nodetype
n.sccs = record.sccs
n.description = record.description
sunidDict[n.sunid] = n
# Glue all of the Nodes together using the HIE file
records = Hie.parse(hie_handle)
for record in records:
if record.sunid not in sunidDict:
print record.sunid
n = sunidDict[record.sunid]
if record.parent != '' : # Not root node
if record.parent not in sunidDict:
raise ValueError("Incomplete data?")
n.parent = sunidDict[record.parent]
for c in record.children:
if c not in sunidDict:
raise ValueError("Incomplete data?")
n.children.append(sunidDict[c])
# Fill in the gaps with information from the CLA file
sidDict = {}
records = Cla.parse(cla_handle)
for record in records:
n = sunidDict[record.sunid]
assert n.sccs == record.sccs
assert n.sid == record.sid
n.residues = record.residues
sidDict[n.sid] = n
# Clean up
self._sunidDict = sunidDict
self._sidDict = sidDict
self._domains = tuple(domains)
finally:
if dir_path:
# If we opened the files, we close the files
if cla_handle : cla_handle.close()
if des_handle : des_handle.close()
if hie_handle : hie_handle.close()
def getRoot(self):
return self.getNodeBySunid(0)
def getDomainBySid(self, sid):
"""Return a domain from its sid"""
if sid in self._sidDict:
return self._sidDict[sid]
if self.db_handle:
self.getDomainFromSQL(sid=sid)
if sid in self._sidDict:
return self._sidDict[sid]
else:
return None
def getNodeBySunid(self, sunid):
"""Return a node from its sunid"""
if sunid in self._sunidDict:
return self._sunidDict[sunid]
if self.db_handle:
self.getDomainFromSQL(sunid=sunid)
if sunid in self._sunidDict:
return self._sunidDict[sunid]
else:
return None
def getDomains(self):
"""Returns an ordered tuple of all SCOP Domains"""
if self.db_handle:
return self.getRoot().getDescendents('px')
else:
return self._domains
def write_hie(self, handle):
"""Build an HIE SCOP parsable file from this object"""
nodes = self._sunidDict.values()
# We order nodes to ease comparison with original file
nodes.sort(key = lambda n: n.sunid)
for n in nodes:
handle.write(str(n.toHieRecord()))
def write_des(self, handle):
"""Build a DES SCOP parsable file from this object"""
nodes = self._sunidDict.values()
# Origional SCOP file is not ordered?
nodes.sort(key = lambda n: n.sunid)
for n in nodes:
if n != self.root:
handle.write(str(n.toDesRecord()))
def write_cla(self, handle):
"""Build a CLA SCOP parsable file from this object"""
nodes = self._sidDict.values()
# We order nodes to ease comparison with original file
nodes.sort(key = lambda n: n.sunid)
for n in nodes:
handle.write(str(n.toClaRecord()))
def getDomainFromSQL(self, sunid=None, sid=None):
"""Load a node from the SQL backend using sunid or sid"""
if sunid==sid==None: return None
cur = self.db_handle.cursor()
if sid:
cur.execute("SELECT sunid FROM cla WHERE sid=%s", sid)
res = cur.fetchone()
if res is None:
return None
sunid = res[0]
cur.execute("SELECT * FROM des WHERE sunid=%s", sunid)
data = cur.fetchone()
if data is not None:
n = None
#determine if Node or Domain
if data[1] != "px":
n = Node(scop=self)
cur.execute("SELECT child FROM hie WHERE parent=%s", sunid)
children = []
for c in cur.fetchall():
children.append(c[0])
n.children = children
else:
n = Domain(scop=self)
cur.execute("select sid, residues, pdbid from cla where sunid=%s",
sunid)
[n.sid,n.residues,pdbid] = cur.fetchone()
n.residues = Residues(n.residues)
n.residues.pdbid=pdbid
self._sidDict[n.sid] = n
[n.sunid,n.type,n.sccs,n.description] = data
if data[1] != 'ro':
cur.execute("SELECT parent FROM hie WHERE child=%s", sunid)
n.parent = cur.fetchone()[0]
n.sunid = int(n.sunid)
self._sunidDict[n.sunid] = n
def getAscendentFromSQL(self, node, type):
"""Get ascendents using SQL backend"""
if nodeCodeOrder.index(type) >= nodeCodeOrder.index(node.type): return None
cur = self.db_handle.cursor()
cur.execute("SELECT "+type+" from cla WHERE "+node.type+"=%s", (node.sunid))
result = cur.fetchone()
if result is not None:
return self.getNodeBySunid(result[0])
else:
return None
def getDescendentsFromSQL(self, node, type):
"""Get descendents of a node using the database backend. This avoids
repeated iteration of SQL calls and is therefore much quicker than
repeatedly calling node.getChildren().
"""
if nodeCodeOrder.index(type) <= nodeCodeOrder.index(node.type): return []
des_list = []
# SQL cla table knows nothing about 'ro'
if node.type == 'ro':
for c in node.getChildren():
for d in self.getDescendentsFromSQL(c,type):
des_list.append(d)
return des_list
cur = self.db_handle.cursor()
if type != 'px':
cur.execute("SELECT DISTINCT des.sunid,des.type,des.sccs,description FROM \
cla,des WHERE cla."+node.type+"=%s AND cla."+type+"=des.sunid", (node.sunid))
data = cur.fetchall()
for d in data:
if int(d[0]) not in self._sunidDict:
n = Node(scop=self)
[n.sunid,n.type,n.sccs,n.description] = d
n.sunid=int(n.sunid)
self._sunidDict[n.sunid] = n
cur.execute("SELECT parent FROM hie WHERE child=%s", n.sunid)
n.parent = cur.fetchone()[0]
cur.execute("SELECT child FROM hie WHERE parent=%s", n.sunid)
children = []
for c in cur.fetchall():
children.append(c[0])
n.children = children
des_list.append( self._sunidDict[int(d[0])] )
else:
cur.execute("SELECT cla.sunid,sid,pdbid,residues,cla.sccs,type,description,sp\
FROM cla,des where cla.sunid=des.sunid and cla."+node.type+"=%s",
node.sunid)
data = cur.fetchall()
for d in data:
if int(d[0]) not in self._sunidDict:
n = Domain(scop=self)
#[n.sunid, n.sid, n.pdbid, n.residues, n.sccs, n.type,
#n.description,n.parent] = data
[n.sunid,n.sid, pdbid,n.residues,n.sccs,n.type,n.description,
n.parent] = d[0:8]
n.residues = Residues(n.residues)
n.residues.pdbid = pdbid
n.sunid = int(n.sunid)
self._sunidDict[n.sunid] = n
self._sidDict[n.sid] = n
des_list.append( self._sunidDict[int(d[0])] )
return des_list
def write_hie_sql(self, handle):
"""Write HIE data to SQL database"""
cur = handle.cursor()
cur.execute("DROP TABLE IF EXISTS hie")
cur.execute("CREATE TABLE hie (parent INT, child INT, PRIMARY KEY (child),\
INDEX (parent) )")
for p in self._sunidDict.itervalues():
for c in p.children:
cur.execute("INSERT INTO hie VALUES (%s,%s)" % (p.sunid, c.sunid))
def write_cla_sql(self, handle):
"""Write CLA data to SQL database"""
cur = handle.cursor()
cur.execute("DROP TABLE IF EXISTS cla")
cur.execute("CREATE TABLE cla (sunid INT, sid CHAR(8), pdbid CHAR(4),\
residues VARCHAR(50), sccs CHAR(10), cl INT, cf INT, sf INT, fa INT,\
dm INT, sp INT, px INT, PRIMARY KEY (sunid), INDEX (SID) )")
for n in self._sidDict.itervalues():
c = n.toClaRecord()
cur.execute( "INSERT INTO cla VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
(n.sunid, n.sid, c.residues.pdbid, c.residues, n.sccs,
n.getAscendent('cl').sunid, n.getAscendent('cf').sunid,
n.getAscendent('sf').sunid, n.getAscendent('fa').sunid,
n.getAscendent('dm').sunid, n.getAscendent('sp').sunid,
n.sunid ))
def write_des_sql(self, handle):
"""Write DES data to SQL database"""
cur = handle.cursor()
cur.execute("DROP TABLE IF EXISTS des")
cur.execute("CREATE TABLE des (sunid INT, type CHAR(2), sccs CHAR(10),\
description VARCHAR(255),\
PRIMARY KEY (sunid) )")
for n in self._sunidDict.itervalues():
cur.execute( "INSERT INTO des VALUES (%s,%s,%s,%s)",
( n.sunid, n.type, n.sccs, n.description ) )
class Node:
""" A node in the Scop hierarchy
sunid -- SCOP unique identifiers. e.g. '14986'
parent -- The parent node
children -- A list of child nodes
sccs -- SCOP concise classification string. e.g. 'a.1.1.2'
type -- A 2 letter node type code. e.g. 'px' for domains
description --
"""
def __init__(self, scop=None):
"""Create a Node in the scop hierarchy. If a Scop instance is provided to the
constructor, this will be used to lookup related references using the SQL
methods. If no instance is provided, it is assumed the whole tree exists
and is connected."""
self.sunid=''
self.parent = None
self.children=[]
self.sccs = ''
self.type =''
self.description =''
self.scop=scop
def __str__(self):
s = []
s.append(str(self.sunid))
s.append(self.sccs)
s.append(self.type)
s.append(self.description)
return " ".join(s)
def toHieRecord(self):
"""Return an Hie.Record"""
rec = Hie.Record()
rec.sunid = str(self.sunid)
if self.getParent() : #Not root node
rec.parent = str(self.getParent().sunid)
else:
rec.parent = '-'
for c in self.getChildren():
rec.children.append(str(c.sunid))
return rec
def toDesRecord(self):
"""Return a Des.Record"""
rec = Des.Record()
rec.sunid = str(self.sunid)
rec.nodetype = self.type
rec.sccs = self.sccs
rec.description = self.description
return rec
def getChildren(self):
"""Return a list of children of this Node"""
if self.scop is None:
return self.children
else:
return map ( self.scop.getNodeBySunid, self.children )
def getParent(self):
"""Return the parent of this Node"""
if self.scop is None:
return self.parent
else:
return self.scop.getNodeBySunid( self.parent )
def getDescendents( self, node_type):
""" Return a list of all decendent nodes of the given type. Node type can a
two letter code or longer description. e.g. 'fa' or 'family'
"""
if node_type in _nodetype_to_code:
node_type = _nodetype_to_code[node_type]
nodes = [self]
if self.scop:
return self.scop.getDescendentsFromSQL(self,node_type)
while nodes[0].type != node_type:
if nodes[0].type == 'px' : return [] # Fell of the bottom of the hierarchy
child_list = []
for n in nodes:
for child in n.getChildren():
child_list.append( child )
nodes = child_list
return nodes
def getAscendent( self, node_type):
""" Return the ancenstor node of the given type, or None.Node type can a
two letter code or longer description. e.g. 'fa' or 'family'"""
if node_type in _nodetype_to_code:
node_type = _nodetype_to_code[node_type]
if self.scop:
return self.scop.getAscendentFromSQL(self,node_type)
else:
n = self
if n.type == node_type: return None
while n.type != node_type:
if n.type == 'ro': return None # Fell of the top of the hierarchy
n = n.getParent()
return n
class Domain(Node):
""" A SCOP domain. A leaf node in the Scop hierarchy.
sid -- The SCOP domain identifier. e.g. 'd5hbib_'
residues -- A Residue object. It defines the collection
of PDB atoms that make up this domain.
"""
def __init__(self,scop=None):
Node.__init__(self,scop=scop)
self.sid = ''
self.residues = None
def __str__(self):
s = []
s.append(self.sid)
s.append(self.sccs)
s.append("("+str(self.residues)+")")
if not self.getParent():
s.append(self.description)
else:
sp = self.getParent()
dm = sp.getParent()
s.append(dm.description)
s.append("{"+sp.description+"}")
return " ".join(s)
def toDesRecord(self):
"""Return a Des.Record"""
rec = Node.toDesRecord(self)
rec.name = self.sid
return rec
def toClaRecord(self):
"""Return a Cla.Record"""
rec = Cla.Record()
rec.sid = self.sid
rec.residues = self.residues
rec.sccs = self.sccs
rec.sunid = self.sunid
n = self
while n.sunid != 0: #Not root node
rec.hierarchy.append( (n.type, str(n.sunid)) )
n = n.getParent()
rec.hierarchy.reverse()
return rec
class Astral:
"""Abstraction of the ASTRAL database, which has sequences for all the SCOP domains,
as well as clusterings by percent id or evalue.
"""
def __init__( self, dir_path=None, version=None, scop=None,
astral_file=None, db_handle=None):
"""
Initialise the astral database.
You must provide either a directory of SCOP files:
dir_path - string, the path to location of the scopseq-x.xx directory
(not the directory itself), and
version -a version number.
or, a FASTA file:
astral_file - string, a path to a fasta file (which will be loaded in memory)
or, a MYSQL database:
db_handle - a database handle for a MYSQL database containing a table
'astral' with the astral data in it. This can be created
using writeToSQL.
"""
if astral_file==dir_path==db_handle==None:
raise RuntimeError("Need either file handle, or (dir_path + "\
+ "version) or database handle to construct Astral")
if not scop:
raise RuntimeError("Must provide a Scop instance to construct")
self.scop = scop
self.db_handle = db_handle
if not astral_file and not db_handle:
if dir_path == None or version == None:
raise RuntimeError("must provide dir_path and version")
self.version = version
self.path = os.path.join( dir_path, "scopseq-%s" % version)
astral_file = "astral-scopdom-seqres-all-%s.fa" % self.version
astral_file = os.path.join (self.path, astral_file)
if astral_file:
#Build a dictionary of SeqRecord objects in the FASTA file, IN MEMORY
self.fasta_dict = SeqIO.to_dict(SeqIO.parse(open(astral_file), "fasta"))
self.astral_file = astral_file
self.EvDatasets = {}
self.EvDatahash = {}
self.IdDatasets = {}
self.IdDatahash = {}
def domainsClusteredByEv(self,id):
"""get domains clustered by evalue"""
if id not in self.EvDatasets:
if self.db_handle:
self.EvDatasets[id] = self.getAstralDomainsFromSQL(astralEv_to_sql[id])
else:
if not self.path:
raise RuntimeError("No scopseq directory specified")
file_prefix = "astral-scopdom-seqres-sel-gs"
filename = "%s-e100m-%s-%s.id" % (file_prefix, astralEv_to_file[id] ,
self.version)
filename = os.path.join(self.path,filename)
self.EvDatasets[id] = self.getAstralDomainsFromFile(filename)
return self.EvDatasets[id]
def domainsClusteredById(self,id):
"""get domains clustered by percent id"""
if id not in self.IdDatasets:
if self.db_handle:
self.IdDatasets[id] = self.getAstralDomainsFromSQL("id"+str(id))
else:
if not self.path:
raise RuntimeError("No scopseq directory specified")
file_prefix = "astral-scopdom-seqres-sel-gs"
filename = "%s-bib-%s-%s.id" % (file_prefix, id, self.version)
filename = os.path.join(self.path,filename)
self.IdDatasets[id] = self.getAstralDomainsFromFile(filename)
return self.IdDatasets[id]
def getAstralDomainsFromFile(self,filename=None,file_handle=None):
"""Get the scop domains from a file containing a list of sids"""
if file_handle == filename == None:
raise RuntimeError("You must provide a filename or handle")
if not file_handle:
file_handle = open(filename)
doms = []
while 1:
line = file_handle.readline()
if not line:
break
line = line.rstrip()
doms.append(line)
if filename:
file_handle.close()
doms = filter( lambda a: a[0]=='d', doms )
doms = map( self.scop.getDomainBySid, doms )
return doms
def getAstralDomainsFromSQL(self, column):
"""Load a set of astral domains from a column in the astral table of a MYSQL
database (which can be created with writeToSQL(...)"""
cur = self.db_handle.cursor()
cur.execute("SELECT sid FROM astral WHERE "+column+"=1")
data = cur.fetchall()
data = map( lambda x: self.scop.getDomainBySid(x[0]), data)
return data
def getSeqBySid(self,domain):
"""get the seq record of a given domain from its sid"""
if self.db_handle is None:
return self.fasta_dict[domain].seq
else:
cur = self.db_handle.cursor()
cur.execute("SELECT seq FROM astral WHERE sid=%s", domain)
return Seq(cur.fetchone()[0])
def getSeq(self,domain):
"""Return seq associated with domain"""
return self.getSeqBySid(domain.sid)
def hashedDomainsById(self,id):
"""Get domains clustered by sequence identity in a dict"""
if id not in self.IdDatahash:
self.IdDatahash[id] = {}
for d in self.domainsClusteredById(id):
self.IdDatahash[id][d] = 1
return self.IdDatahash[id]
def hashedDomainsByEv(self,id):
"""Get domains clustered by evalue in a dict"""
if id not in self.EvDatahash:
self.EvDatahash[id] = {}
for d in self.domainsClusteredByEv(id):
self.EvDatahash[id][d] = 1
return self.EvDatahash[id]
def isDomainInId(self,dom,id):
"""Returns true if the domain is in the astral clusters for percent ID"""
return dom in self.hashedDomainsById(id)
def isDomainInEv(self,dom,id):
"""Returns true if the domain is in the ASTRAL clusters for evalues"""
return dom in self.hashedDomainsByEv(id)
def writeToSQL(self, db_handle):
"""Write the ASTRAL database to a MYSQL database"""
cur = db_handle.cursor()
cur.execute("DROP TABLE IF EXISTS astral")
cur.execute("CREATE TABLE astral (sid CHAR(8), seq TEXT, PRIMARY KEY (sid))")
for dom in self.fasta_dict:
cur.execute("INSERT INTO astral (sid,seq) values (%s,%s)",
(dom, self.fasta_dict[dom].seq.data))
for i in astralBibIds:
cur.execute("ALTER TABLE astral ADD (id"+str(i)+" TINYINT)")
for d in self.domainsClusteredById(i):
cur.execute("UPDATE astral SET id"+str(i)+"=1 WHERE sid=%s",
d.sid)
for ev in astralEvs:
cur.execute("ALTER TABLE astral ADD ("+astralEv_to_sql[ev]+" TINYINT)")
for d in self.domainsClusteredByEv(ev):
cur.execute("UPDATE astral SET "+astralEv_to_sql[ev]+"=1 WHERE sid=%s",
d.sid)
def search(pdb=None, key=None, sid=None, disp=None, dir=None, loc=None,
cgi='http://scop.mrc-lmb.cam.ac.uk/scop/search.cgi', **keywds):
"""search(pdb=None, key=None, sid=None, disp=None, dir=None, loc=None,
cgi='http://scop.mrc-lmb.cam.ac.uk/scop/search.cgi', **keywds)
Access search.cgi and return a handle to the results. See the
online help file for an explanation of the parameters:
http://scop.mrc-lmb.cam.ac.uk/scop/help.html
Raises an IOError if there's a network error.
"""
params = {'pdb' : pdb, 'key' : key, 'sid' : sid, 'disp' : disp,
'dir' : dir, 'loc' : loc}
variables = {}
for k, v in params.iteritmes():
if v is not None:
variables[k] = v
variables.update(keywds)
return _open(cgi, variables)
def _open(cgi, params={}, get=1):
"""_open(cgi, params={}, get=1) -> UndoHandle
Open a handle to SCOP. cgi is the URL for the cgi script to access.
params is a dictionary with the options to pass to it. get is a boolean
that describes whether a GET should be used. Does some
simple error checking, and will raise an IOError if it encounters one.
"""
import urllib
from Bio import File
# Open a handle to SCOP.
options = urllib.urlencode(params)
if get: # do a GET
fullcgi = cgi
if options:
fullcgi = "%s?%s" % (cgi, options)
handle = urllib.urlopen(fullcgi)
else: # do a POST
handle = urllib.urlopen(cgi, options)
# Wrap the handle inside an UndoHandle.
uhandle = File.UndoHandle(handle)
# Should I check for 404? timeout? etc?
return uhandle
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/SCOP/__init__.py
|
Python
|
gpl-2.0
| 32,657
|
[
"Biopython"
] |
b9f9a872f69e582ee74154eb320ea91403c825a8223ea2863b920e5bce70cec0
|
#Brian Windle
import yaml
import json
yaml_file = 'myyaml.yml'
json_file = 'myjson.json'
file = open('/home/bwindle/device', 'r')
device = list(file.read().split())
with open('/home/bwindle/detail', 'r') as document:
detail = {}
for line in document:
x = line.split(",")
a=x[0]
b=x[1]
c=len(b)-1
b=b[0:c]
detail[a]=b
device.append(detail)
with open(yaml_file, "w") as f:
f.write(yaml.dump(device, default_flow_style=False))
with open(json_file, "w") as f:
json.dump(device, f)
|
bkwin66/pyclass
|
week1/ex6.py
|
Python
|
apache-2.0
| 552
|
[
"Brian"
] |
02f2f1cceacaab5ee87b3411cdb572c663d6ecbcd2494c8de99fa12733501816
|
# coding=utf8
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Collection of subprocess wrapper functions.
In theory you shouldn't need anything else in subprocess, or this module failed.
"""
import cStringIO
import errno
import logging
import os
import Queue
import subprocess
import sys
import time
import threading
# Constants forwarded from subprocess.
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
# Sends stdout or stderr to os.devnull.
VOID = object()
# Error code when a process was killed because it timed out.
TIMED_OUT = -2001
# Globals.
# Set to True if you somehow need to disable this hack.
SUBPROCESS_CLEANUP_HACKED = False
class CalledProcessError(subprocess.CalledProcessError):
"""Augment the standard exception with more data."""
def __init__(self, returncode, cmd, cwd, stdout, stderr):
super(CalledProcessError, self).__init__(returncode, cmd, output=stdout)
self.stdout = self.output # for backward compatibility.
self.stderr = stderr
self.cwd = cwd
def __str__(self):
out = 'Command %r returned non-zero exit status %s' % (
' '.join(self.cmd), self.returncode)
if self.cwd:
out += ' in ' + self.cwd
return '\n'.join(filter(None, (out, self.stdout, self.stderr)))
class CygwinRebaseError(CalledProcessError):
"""Occurs when cygwin's fork() emulation fails due to rebased dll."""
## Utility functions
def kill_pid(pid):
"""Kills a process by its process id."""
try:
# Unable to import 'module'
# pylint: disable=E1101,F0401
import signal
return os.kill(pid, signal.SIGKILL)
except ImportError:
pass
def kill_win(process):
"""Kills a process with its windows handle.
Has no effect on other platforms.
"""
try:
# Unable to import 'module'
# pylint: disable=F0401
import win32process
# Access to a protected member _handle of a client class
# pylint: disable=W0212
return win32process.TerminateProcess(process._handle, -1)
except ImportError:
pass
def add_kill():
"""Adds kill() method to subprocess.Popen for python <2.6"""
if hasattr(subprocess.Popen, 'kill'):
return
if sys.platform == 'win32':
subprocess.Popen.kill = kill_win
else:
subprocess.Popen.kill = lambda process: kill_pid(process.pid)
def hack_subprocess():
"""subprocess functions may throw exceptions when used in multiple threads.
See http://bugs.python.org/issue1731717 for more information.
"""
global SUBPROCESS_CLEANUP_HACKED
if not SUBPROCESS_CLEANUP_HACKED and threading.activeCount() != 1:
# Only hack if there is ever multiple threads.
# There is no point to leak with only one thread.
subprocess._cleanup = lambda: None
SUBPROCESS_CLEANUP_HACKED = True
def get_english_env(env):
"""Forces LANG and/or LANGUAGE to be English.
Forces encoding to utf-8 for subprocesses.
Returns None if it is unnecessary.
"""
if sys.platform == 'win32':
return None
env = env or os.environ
# Test if it is necessary at all.
is_english = lambda name: env.get(name, 'en').startswith('en')
if is_english('LANG') and is_english('LANGUAGE'):
return None
# Requires modifications.
env = env.copy()
def fix_lang(name):
if not is_english(name):
env[name] = 'en_US.UTF-8'
fix_lang('LANG')
fix_lang('LANGUAGE')
return env
class NagTimer(object):
"""
Triggers a callback when a time interval passes without an event being fired.
For example, the event could be receiving terminal output from a subprocess;
and the callback could print a warning to stderr that the subprocess appeared
to be hung.
"""
def __init__(self, interval, cb):
self.interval = interval
self.cb = cb
self.timer = threading.Timer(self.interval, self.fn)
self.last_output = self.previous_last_output = 0
def start(self):
self.last_output = self.previous_last_output = time.time()
self.timer.start()
def event(self):
self.last_output = time.time()
def fn(self):
now = time.time()
if self.last_output == self.previous_last_output:
self.cb(now - self.previous_last_output)
# Use 0.1 fudge factor, just in case
# (self.last_output - now) is very close to zero.
sleep_time = (self.last_output - now - 0.1) % self.interval
self.previous_last_output = self.last_output
self.timer = threading.Timer(sleep_time + 0.1, self.fn)
self.timer.start()
def cancel(self):
self.timer.cancel()
class Popen(subprocess.Popen):
"""Wraps subprocess.Popen() with various workarounds.
- Forces English output since it's easier to parse the stdout if it is always
in English.
- Sets shell=True on windows by default. You can override this by forcing
shell parameter to a value.
- Adds support for VOID to not buffer when not needed.
- Adds self.start property.
Note: Popen() can throw OSError when cwd or args[0] doesn't exist. Translate
exceptions generated by cygwin when it fails trying to emulate fork().
"""
def __init__(self, args, **kwargs):
# Make sure we hack subprocess if necessary.
hack_subprocess()
add_kill()
env = get_english_env(kwargs.get('env'))
if env:
kwargs['env'] = env
if kwargs.get('shell') is None:
# *Sigh*: Windows needs shell=True, or else it won't search %PATH% for
# the executable, but shell=True makes subprocess on Linux fail when it's
# called with a list because it only tries to execute the first item in
# the list.
kwargs['shell'] = bool(sys.platform=='win32')
if isinstance(args, basestring):
tmp_str = args
elif isinstance(args, (list, tuple)):
tmp_str = ' '.join(args)
else:
raise CalledProcessError(None, args, kwargs.get('cwd'), None, None)
if kwargs.get('cwd', None):
tmp_str += '; cwd=%s' % kwargs['cwd']
logging.debug(tmp_str)
self.stdout_cb = None
self.stderr_cb = None
self.stdin_is_void = False
self.stdout_is_void = False
self.stderr_is_void = False
self.cmd_str = tmp_str
if kwargs.get('stdin') is VOID:
kwargs['stdin'] = open(os.devnull, 'r')
self.stdin_is_void = True
for stream in ('stdout', 'stderr'):
if kwargs.get(stream) in (VOID, os.devnull):
kwargs[stream] = open(os.devnull, 'w')
setattr(self, stream + '_is_void', True)
if callable(kwargs.get(stream)):
setattr(self, stream + '_cb', kwargs[stream])
kwargs[stream] = PIPE
self.start = time.time()
self.timeout = None
self.nag_timer = None
self.nag_max = None
self.shell = kwargs.get('shell', None)
# Silence pylint on MacOSX
self.returncode = None
try:
super(Popen, self).__init__(args, **kwargs)
except OSError, e:
if e.errno == errno.EAGAIN and sys.platform == 'cygwin':
# Convert fork() emulation failure into a CygwinRebaseError().
raise CygwinRebaseError(
e.errno,
args,
kwargs.get('cwd'),
None,
'Visit '
'http://code.google.com/p/chromium/wiki/CygwinDllRemappingFailure '
'to learn how to fix this error; you need to rebase your cygwin '
'dlls')
# Popen() can throw OSError when cwd or args[0] doesn't exist.
raise OSError('Execution failed with error: %s.\n'
'Check that %s or %s exist and have execution permission.'
% (str(e), kwargs.get('cwd'), args[0]))
def _tee_threads(self, input): # pylint: disable=W0622
"""Does I/O for a process's pipes using threads.
It's the simplest and slowest implementation. Expect very slow behavior.
If there is a callback and it doesn't keep up with the calls, the timeout
effectiveness will be delayed accordingly.
"""
# Queue of either of <threadname> when done or (<threadname>, data). In
# theory we would like to limit to ~64kb items to not cause large memory
# usage when the callback blocks. It is not done because it slows down
# processing on OSX10.6 by a factor of 2x, making it even slower than
# Windows! Revisit this decision if it becomes a problem, e.g. crash
# because of memory exhaustion.
queue = Queue.Queue()
done = threading.Event()
nag = None
def write_stdin():
try:
stdin_io = cStringIO.StringIO(input)
while True:
data = stdin_io.read(1024)
if data:
self.stdin.write(data)
else:
self.stdin.close()
break
finally:
queue.put('stdin')
def _queue_pipe_read(pipe, name):
"""Queues characters read from a pipe into a queue."""
try:
while True:
data = pipe.read(1)
if not data:
break
if nag:
nag.event()
queue.put((name, data))
finally:
queue.put(name)
def timeout_fn():
try:
done.wait(self.timeout)
finally:
queue.put('timeout')
def wait_fn():
try:
self.wait()
finally:
queue.put('wait')
# Starts up to 5 threads:
# Wait for the process to quit
# Read stdout
# Read stderr
# Write stdin
# Timeout
threads = {
'wait': threading.Thread(target=wait_fn),
}
if self.timeout is not None:
threads['timeout'] = threading.Thread(target=timeout_fn)
if self.stdout_cb:
threads['stdout'] = threading.Thread(
target=_queue_pipe_read, args=(self.stdout, 'stdout'))
if self.stderr_cb:
threads['stderr'] = threading.Thread(
target=_queue_pipe_read, args=(self.stderr, 'stderr'))
if input:
threads['stdin'] = threading.Thread(target=write_stdin)
elif self.stdin:
# Pipe but no input, make sure it's closed.
self.stdin.close()
for t in threads.itervalues():
t.start()
if self.nag_timer:
def _nag_cb(elapsed):
logging.warn(' No output for %.0f seconds from command:' % elapsed)
logging.warn(' %s' % self.cmd_str)
if (self.nag_max and
int('%.0f' % (elapsed / self.nag_timer)) >= self.nag_max):
queue.put('timeout')
done.set() # Must do this so that timeout thread stops waiting.
nag = NagTimer(self.nag_timer, _nag_cb)
nag.start()
timed_out = False
try:
# This thread needs to be optimized for speed.
while threads:
item = queue.get()
if item[0] == 'stdout':
self.stdout_cb(item[1])
elif item[0] == 'stderr':
self.stderr_cb(item[1])
else:
# A thread terminated.
if item in threads:
threads[item].join()
del threads[item]
if item == 'wait':
# Terminate the timeout thread if necessary.
done.set()
elif item == 'timeout' and not timed_out and self.poll() is None:
logging.debug('Timed out after %.0fs: killing' % (
time.time() - self.start))
self.kill()
timed_out = True
finally:
# Stop the threads.
done.set()
if nag:
nag.cancel()
if 'wait' in threads:
# Accelerate things, otherwise it would hang until the child process is
# done.
logging.debug('Killing child because of an exception')
self.kill()
# Join threads.
for thread in threads.itervalues():
thread.join()
if timed_out:
self.returncode = TIMED_OUT
# pylint: disable=W0221,W0622
def communicate(self, input=None, timeout=None, nag_timer=None,
nag_max=None):
"""Adds timeout and callbacks support.
Returns (stdout, stderr) like subprocess.Popen().communicate().
- The process will be killed after |timeout| seconds and returncode set to
TIMED_OUT.
- If the subprocess runs for |nag_timer| seconds without producing terminal
output, print a warning to stderr.
"""
self.timeout = timeout
self.nag_timer = nag_timer
self.nag_max = nag_max
if (not self.timeout and not self.nag_timer and
not self.stdout_cb and not self.stderr_cb):
return super(Popen, self).communicate(input)
if self.timeout and self.shell:
raise TypeError(
'Using timeout and shell simultaneously will cause a process leak '
'since the shell will be killed instead of the child process.')
stdout = None
stderr = None
# Convert to a lambda to workaround python's deadlock.
# http://docs.python.org/library/subprocess.html#subprocess.Popen.wait
# When the pipe fills up, it would deadlock this process.
if self.stdout and not self.stdout_cb and not self.stdout_is_void:
stdout = []
self.stdout_cb = stdout.append
if self.stderr and not self.stderr_cb and not self.stderr_is_void:
stderr = []
self.stderr_cb = stderr.append
self._tee_threads(input)
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
return (stdout, stderr)
def communicate(args, timeout=None, nag_timer=None, nag_max=None, **kwargs):
"""Wraps subprocess.Popen().communicate() and add timeout support.
Returns ((stdout, stderr), returncode).
- The process will be killed after |timeout| seconds and returncode set to
TIMED_OUT.
- If the subprocess runs for |nag_timer| seconds without producing terminal
output, print a warning to stderr.
- Automatically passes stdin content as input so do not specify stdin=PIPE.
"""
stdin = kwargs.pop('stdin', None)
if stdin is not None:
if isinstance(stdin, basestring):
# When stdin is passed as an argument, use it as the actual input data and
# set the Popen() parameter accordingly.
kwargs['stdin'] = PIPE
else:
kwargs['stdin'] = stdin
stdin = None
proc = Popen(args, **kwargs)
if stdin:
return proc.communicate(stdin, timeout, nag_timer), proc.returncode
else:
return proc.communicate(None, timeout, nag_timer), proc.returncode
def call(args, **kwargs):
"""Emulates subprocess.call().
Automatically convert stdout=PIPE or stderr=PIPE to VOID.
In no case they can be returned since no code path raises
subprocess2.CalledProcessError.
"""
if kwargs.get('stdout') == PIPE:
kwargs['stdout'] = VOID
if kwargs.get('stderr') == PIPE:
kwargs['stderr'] = VOID
return communicate(args, **kwargs)[1]
def check_call_out(args, **kwargs):
"""Improved version of subprocess.check_call().
Returns (stdout, stderr), unlike subprocess.check_call().
"""
out, returncode = communicate(args, **kwargs)
if returncode:
raise CalledProcessError(
returncode, args, kwargs.get('cwd'), out[0], out[1])
return out
def check_call(args, **kwargs):
"""Emulate subprocess.check_call()."""
check_call_out(args, **kwargs)
return 0
def capture(args, **kwargs):
"""Captures stdout of a process call and returns it.
Returns stdout.
- Discards returncode.
- Blocks stdin by default if not specified since no output will be visible.
"""
kwargs.setdefault('stdin', VOID)
# Like check_output, deny the caller from using stdout arg.
return communicate(args, stdout=PIPE, **kwargs)[0][0]
def check_output(args, **kwargs):
"""Emulates subprocess.check_output().
Captures stdout of a process call and returns stdout only.
- Throws if return code is not 0.
- Works even prior to python 2.7.
- Blocks stdin by default if not specified since no output will be visible.
- As per doc, "The stdout argument is not allowed as it is used internally."
"""
kwargs.setdefault('stdin', VOID)
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it would be overridden.')
return check_call_out(args, stdout=PIPE, **kwargs)[0]
|
chinmaygarde/depot_tools
|
subprocess2.py
|
Python
|
bsd-3-clause
| 15,980
|
[
"VisIt"
] |
a18d4d522e5989a361ab8b3470aeaa4e9f197ffaa259aca6343f4c95b116bb1b
|
# -*- coding: utf-8 -*-
"""
Bok choy acceptance tests for problems in the LMS
See also old lettuce tests in lms/djangoapps/courseware/features/problems.feature
"""
from textwrap import dedent
from ..helpers import UniqueCourseTest
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.problem import ProblemPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ..helpers import EventsTestMixin
class ProblemsTest(UniqueCourseTest):
"""
Base class for tests of problems in the LMS.
"""
USERNAME = "joe_student"
EMAIL = "joe@example.com"
def setUp(self):
super(ProblemsTest, self).setUp()
self.xqueue_grade_response = None
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with a hierarchy and problems
course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
problem = self.get_problem()
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(problem)
)
).install()
# Auto-auth register for the course.
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=False).visit()
def get_problem(self):
""" Subclasses should override this to complete the fixture """
raise NotImplementedError()
class ProblemClarificationTest(ProblemsTest):
"""
Tests the <clarification> element that can be used in problem XML.
"""
def get_problem(self):
"""
Create a problem with a <clarification>
"""
xml = dedent("""
<problem markdown="null">
<text>
<p>
Given the data in Table 7 <clarification>Table 7: "Example PV Installation Costs",
Page 171 of Roberts textbook</clarification>, compute the ROI
<clarification>Return on Investment <strong>(per year)</strong></clarification> over 20 years.
</p>
<numericalresponse answer="6.5">
<textline label="Enter the annual ROI" trailing_text="%" />
</numericalresponse>
</text>
</problem>
""")
return XBlockFixtureDesc('problem', 'TOOLTIP TEST PROBLEM', data=xml)
def test_clarification(self):
"""
Test that we can see the <clarification> tooltips.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'TOOLTIP TEST PROBLEM')
problem_page.click_clarification(0)
self.assertIn('"Example PV Installation Costs"', problem_page.visible_tooltip_text)
problem_page.click_clarification(1)
tooltip_text = problem_page.visible_tooltip_text
self.assertIn('Return on Investment', tooltip_text)
self.assertIn('per year', tooltip_text)
self.assertNotIn('strong', tooltip_text)
class ProblemExtendedHintTest(ProblemsTest, EventsTestMixin):
"""
Test that extended hint features plumb through to the page html and tracking log.
"""
def get_problem(self):
"""
Problem with extended hint features.
"""
xml = dedent("""
<problem>
<p>question text</p>
<stringresponse answer="A">
<stringequalhint answer="B">hint</stringequalhint>
<textline size="20"/>
</stringresponse>
<demandhint>
<hint>demand-hint1</hint>
<hint>demand-hint2</hint>
</demandhint>
</problem>
""")
return XBlockFixtureDesc('problem', 'TITLE', data=xml)
def test_check_hint(self):
"""
Test clicking Check shows the extended hint in the problem message.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_text[0], u'question text')
problem_page.fill_answer('B')
problem_page.click_check()
self.assertEqual(problem_page.message_text, u'Incorrect: hint')
# Check for corresponding tracking event
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.problem.hint.feedback_displayed'},
number_of_matches=1
)
self.assert_events_match(
[{'event': {'hint_label': u'Incorrect',
'trigger_type': 'single',
'student_answer': [u'B'],
'correctness': False,
'question_type': 'stringresponse',
'hints': [{'text': 'hint'}]}}],
actual_events)
def test_demand_hint(self):
"""
Test clicking hint button shows the demand hint in its div.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
# The hint button rotates through multiple hints
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, u'Hint (1 of 2): demand-hint1')
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, u'Hint (2 of 2): demand-hint2')
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, u'Hint (1 of 2): demand-hint1')
# Check corresponding tracking events
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.problem.hint.demandhint_displayed'},
number_of_matches=3
)
self.assert_events_match(
[
{'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'demand-hint1'}},
{'event': {u'hint_index': 1, u'hint_len': 2, u'hint_text': u'demand-hint2'}},
{'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'demand-hint1'}}
],
actual_events)
class ProblemWithMathjax(ProblemsTest):
"""
Tests the <MathJax> used in problem
"""
def get_problem(self):
"""
Create a problem with a <MathJax> in body and hint
"""
xml = dedent(r"""
<problem>
<p>Check mathjax has rendered [mathjax]E=mc^2[/mathjax]</p>
<multiplechoiceresponse>
<choicegroup label="Answer this?" type="MultipleChoice">
<choice correct="true">Choice1 <choicehint>Correct choice message</choicehint></choice>
<choice correct="false">Choice2<choicehint>Wrong choice message</choicehint></choice>
</choicegroup>
</multiplechoiceresponse>
<demandhint>
<hint>mathjax should work1 \(E=mc^2\) </hint>
<hint>mathjax should work2 [mathjax]E=mc^2[/mathjax]</hint>
</demandhint>
</problem>
""")
return XBlockFixtureDesc('problem', 'MATHJAX TEST PROBLEM', data=xml)
def test_mathjax_in_hint(self):
"""
Test that MathJax have successfully rendered in problem hint
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, "MATHJAX TEST PROBLEM")
# Verify Mathjax have been rendered
self.assertTrue(problem_page.mathjax_rendered_in_problem, "MathJax did not rendered in body")
# The hint button rotates through multiple hints
problem_page.click_hint()
self.assertIn("Hint (1 of 2): mathjax should work1", problem_page.hint_text)
self.assertTrue(problem_page.mathjax_rendered_in_hint, "MathJax did not rendered in problem hint")
# Rotate the hint and check the problem hint
problem_page.click_hint()
self.assertIn("Hint (2 of 2): mathjax should work2", problem_page.hint_text)
self.assertTrue(problem_page.mathjax_rendered_in_hint, "MathJax did not rendered in problem hint")
class ProblemPartialCredit(ProblemsTest):
"""
Makes sure that the partial credit is appearing properly.
"""
def get_problem(self):
"""
Create a problem with partial credit.
"""
xml = dedent("""
<problem>
<p>The answer is 1. Partial credit for -1.</p>
<numericalresponse answer="1" partial_credit="list">
<formulaequationinput label="How many miles away from Earth is the sun? Use scientific notation to answer." />
<responseparam type="tolerance" default="0.01" />
<responseparam partial_answers="-1" />
</numericalresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'PARTIAL CREDIT TEST PROBLEM', data=xml)
def test_partial_credit(self):
"""
Test that we can see the partial credit value and feedback.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'PARTIAL CREDIT TEST PROBLEM')
problem_page.fill_answer_numerical('-1')
problem_page.click_check()
self.assertTrue(problem_page.simpleprob_is_partially_correct())
|
tiagochiavericosta/edx-platform
|
common/test/acceptance/tests/lms/test_lms_problems.py
|
Python
|
agpl-3.0
| 9,654
|
[
"VisIt"
] |
1d95edd6207490bf81ef4f7f8d83c5dc2e66e11c0ef4a5135162056d76770f3e
|
from nose.tools import assert_equal, assert_not_equal #@UnresolvedImport
import copy
from whoosh import fields, query
from whoosh.compat import u
from whoosh.filedb.filestore import RamStorage
from whoosh.qparser import QueryParser
from whoosh.query import (And, AndMaybe, ConstantScoreQuery, Every, DateRange,
DisjunctionMax, FuzzyTerm, Not, NullQuery,
NumericRange, Or, Phrase, Prefix, Require, Term,
TermRange, Variations, Wildcard)
from whoosh.spans import SpanContains, SpanFirst, SpanNear, SpanNot, SpanOr
def test_all_terms():
q = QueryParser("a", None).parse(u('hello b:there c:"my friend"'))
ts = q.all_terms(phrases=False)
assert_equal(sorted(ts), [("a", "hello"), ("b", "there")])
ts = q.all_terms(phrases=True)
assert_equal(sorted(ts), [("a", "hello"), ("b", "there"), ("c", "friend"), ("c", "my")])
def test_existing_terms():
s = fields.Schema(key=fields.ID, value=fields.TEXT)
ix = RamStorage().create_index(s)
w = ix.writer()
w.add_document(key=u("a"), value=u("alfa bravo charlie delta echo"))
w.add_document(key=u("b"), value=u("foxtrot golf hotel india juliet"))
w.commit()
r = ix.reader()
q = QueryParser("value", None).parse(u('alfa hotel tango "sierra bravo"'))
ts = q.existing_terms(r, phrases=False)
assert_equal(sorted(ts), [("value", "alfa"), ("value", "hotel")])
ts = q.existing_terms(r)
assert_equal(sorted(ts), [("value", "alfa"), ("value", "bravo"), ("value", "hotel")])
ts = set()
q.existing_terms(r, ts, reverse=True)
assert_equal(sorted(ts), [("value", "sierra"), ("value", "tango")])
def test_wildcard_existing_terms():
s = fields.Schema(key=fields.ID, value=fields.TEXT)
ix = RamStorage().create_index(s)
w = ix.writer()
w.add_document(key=u("a"), value=u("alfa bravo bear charlie delta"))
w.add_document(key=u("a"), value=u("boggle echo render rendering renders"))
w.commit()
r = ix.reader()
qp = QueryParser("value", ix.schema)
def words(terms):
z = []
for t in terms:
assert t[0] == "value"
z.append(t[1])
return " ".join(sorted(z))
q = qp.parse(u("b*"))
ts = q.existing_terms(r)
assert_equal(ts, set())
ts = q.existing_terms(r, expand=True)
assert_equal(words(ts), "bear boggle bravo")
q = qp.parse(u("[a TO f]"))
ts = q.existing_terms(r)
assert_equal(ts, set())
ts = q.existing_terms(r, expand=True)
assert_equal(words(ts), "alfa bear boggle bravo charlie delta echo")
q = query.Variations("value", "render")
ts = q.existing_terms(r, expand=False)
assert_equal(ts, set())
ts = q.existing_terms(r, expand=True)
assert_equal(words(ts), "render rendering renders")
def test_replace():
q = And([Or([Term("a", "b"), Term("b", "c")], boost=1.2), Variations("a", "b", boost=2.0)])
q = q.replace("a", "b", "BB")
assert_equal(q, And([Or([Term("a", "BB"), Term("b", "c")], boost=1.2),
Variations("a", "BB", boost=2.0)]))
def test_apply():
def visit(q):
if isinstance(q, (Term, Variations, FuzzyTerm)):
q.text = q.text.upper()
return q
return q.apply(visit)
before = And([Not(Term("a", u("b"))), Variations("a", u("c")), Not(FuzzyTerm("a", u("d")))])
after = visit(before)
assert_equal(after, And([Not(Term("a", u("B"))), Variations("a", u("C")),
Not(FuzzyTerm("a", u("D")))]))
def term2var(q):
if isinstance(q, Term):
return Variations(q.fieldname, q.text)
else:
return q.apply(term2var)
q = And([Term("f", "alfa"), Or([Term("f", "bravo"), Not(Term("f", "charlie"))])])
q = term2var(q)
assert_equal(q, And([Variations('f', 'alfa'),
Or([Variations('f', 'bravo'), Not(Variations('f', 'charlie'))])]))
def test_accept():
def boost_phrases(q):
if isinstance(q, Phrase):
q.boost *= 2.0
return q
before = And([Term("a", u("b")), Or([Term("c", u("d")), Phrase("a", [u("e"), u("f")])]),
Phrase("a", [u("g"), u("h")], boost=0.25)])
after = before.accept(boost_phrases)
assert_equal(after, And([Term("a", u("b")),
Or([Term("c", u("d")), Phrase("a", [u("e"), u("f")], boost=2.0)]),
Phrase("a", [u("g"), u("h")], boost=0.5)]))
before = Phrase("a", [u("b"), u("c")], boost=2.5)
after = before.accept(boost_phrases)
assert_equal(after, Phrase("a", [u("b"), u("c")], boost=5.0))
def test_simplify():
s = fields.Schema(k=fields.ID, v=fields.TEXT)
ix = RamStorage().create_index(s)
w = ix.writer()
w.add_document(k=u("1"), v=u("aardvark apple allan alfa bear bee"))
w.add_document(k=u("2"), v=u("brie glue geewhiz goop julia"))
w.commit()
r = ix.reader()
q1 = And([Prefix("v", "b", boost=2.0), Term("v", "juliet")])
q2 = And([Or([Term('v', u('bear'), boost=2.0), Term('v', u('bee'), boost=2.0),
Term('v', u('brie'), boost=2.0)]), Term('v', 'juliet')])
assert_equal(q1.simplify(r), q2)
def test_merge_ranges():
q = And([TermRange("f1", u("a"), None), TermRange("f1", None, u("z"))])
assert_equal(q.normalize(), TermRange("f1", u("a"), u("z")))
q = And([NumericRange("f1", None, u("aaaaa")), NumericRange("f1", u("zzzzz"), None)])
assert_equal(q.normalize(), q)
q = And([TermRange("f1", u("a"), u("z")), TermRange("f1", "b", "x")])
assert_equal(q.normalize(), TermRange("f1", u("a"), u("z")))
q = And([TermRange("f1", u("a"), u("m")), TermRange("f1", u("f"), u("q"))])
assert_equal(q.normalize(), TermRange("f1", u("f"), u("m")))
q = Or([TermRange("f1", u("a"), u("m")), TermRange("f1", u("f"), u("q"))])
assert_equal(q.normalize(), TermRange("f1", u("a"), u("q")))
q = Or([TermRange("f1", u("m"), None), TermRange("f1", None, u("n"))])
assert_equal(q.normalize(), Every("f1"))
q = And([Every("f1"), Term("f1", "a"), Variations("f1", "b")])
assert_equal(q.normalize(), Every("f1"))
q = Or([Term("f1", u("q")), TermRange("f1", u("m"), None), TermRange("f1", None, u("n"))])
assert_equal(q.normalize(), Every("f1"))
q = And([Or([Term("f1", u("a")), Term("f1", u("b"))]), Every("f1")])
assert_equal(q.normalize(), Every("f1"))
q = And([Term("f1", u("a")), And([Or([Every("f1")])])])
assert_equal(q.normalize(), Every("f1"))
def test_normalize_compound():
def oq():
return Or([Term("a", u("a")), Term("a", u("b"))])
def nq(level):
if level == 0:
return oq()
else:
return Or([nq(level - 1), nq(level - 1), nq(level - 1)])
q = nq(7)
q = q.normalize()
assert_equal(q, Or([Term("a", u("a")), Term("a", u("b"))]))
def test_duplicates():
q = And([Term("a", u("b")), Term("a", u("b"))])
assert_equal(q.normalize(), Term("a", u("b")))
q = And([Prefix("a", u("b")), Prefix("a", u("b"))])
assert_equal(q.normalize(), Prefix("a", u("b")))
q = And([Variations("a", u("b")), And([Variations("a", u("b")), Term("a", u("b"))])])
assert_equal(q.normalize(), And([Variations("a", u("b")), Term("a", u("b"))]))
q = And([Term("a", u("b")), Prefix("a", u("b")), Term("a", u("b"), boost=1.1)])
assert_equal(q.normalize(), q)
# Wildcard without * or ? normalizes to Term
q = And([Wildcard("a", u("b")), And([Wildcard("a", u("b")), Term("a", u("b"))])])
assert_equal(q.normalize(), Term("a", u("b")))
# TODO: FIX THIS
def test_query_copy_hash():
def do(q1, q2):
q1a = copy.deepcopy(q1)
assert_equal(q1, q1a)
assert_equal(hash(q1), hash(q1a))
assert_not_equal(q1, q2)
do(Term("a", u("b"), boost=1.1), Term("a", u("b"), boost=1.5))
do(And([Term("a", u("b")), Term("c", u("d"))], boost=1.1),
And([Term("a", u("b")), Term("c", u("d"))], boost=1.5))
do(Or([Term("a", u("b"), boost=1.1), Term("c", u("d"))]),
Or([Term("a", u("b"), boost=1.8), Term("c", u("d"))], boost=1.5))
do(DisjunctionMax([Term("a", u("b"), boost=1.8), Term("c", u("d"))]),
DisjunctionMax([Term("a", u("b"), boost=1.1), Term("c", u("d"))], boost=1.5))
do(Not(Term("a", u("b"), boost=1.1)), Not(Term("a", u("b"), boost=1.5)))
do(Prefix("a", u("b"), boost=1.1), Prefix("a", u("b"), boost=1.5))
do(Wildcard("a", u("b*x?"), boost=1.1), Wildcard("a", u("b*x?"), boost=1.5))
do(FuzzyTerm("a", u("b"), constantscore=True),
FuzzyTerm("a", u("b"), constantscore=False))
do(FuzzyTerm("a", u("b"), boost=1.1), FuzzyTerm("a", u("b"), boost=1.5))
do(TermRange("a", u("b"), u("c")), TermRange("a", u("b"), u("d")))
do(TermRange("a", None, u("c")), TermRange("a", None, None))
do(TermRange("a", u("b"), u("c"), boost=1.1),
TermRange("a", u("b"), u("c"), boost=1.5))
do(TermRange("a", u("b"), u("c"), constantscore=True),
TermRange("a", u("b"), u("c"), constantscore=False))
do(NumericRange("a", 1, 5), NumericRange("a", 1, 6))
do(NumericRange("a", None, 5), NumericRange("a", None, None))
do(NumericRange("a", 3, 6, boost=1.1), NumericRange("a", 3, 6, boost=1.5))
do(NumericRange("a", 3, 6, constantscore=True),
NumericRange("a", 3, 6, constantscore=False))
# do(DateRange)
do(Variations("a", u("render")), Variations("a", u("renders")))
do(Variations("a", u("render"), boost=1.1),
Variations("a", u("renders"), boost=1.5))
do(Phrase("a", [u("b"), u("c"), u("d")]), Phrase("a", [u("b"), u("c"), u("e")]))
do(Phrase("a", [u("b"), u("c"), u("d")], boost=1.1),
Phrase("a", [u("b"), u("c"), u("d")], boost=1.5))
do(Phrase("a", [u("b"), u("c"), u("d")], slop=1),
Phrase("a", [u("b"), u("c"), u("d")], slop=2))
# do(Ordered)
do(Every(), Every("a"))
do(Every("a"), Every("b"))
do(Every("a", boost=1.1), Every("a", boost=1.5))
do(NullQuery, Term("a", u("b")))
do(ConstantScoreQuery(Term("a", u("b"))), ConstantScoreQuery(Term("a", u("c"))))
do(ConstantScoreQuery(Term("a", u("b")), score=2.0),
ConstantScoreQuery(Term("a", u("c")), score=2.1))
do(Require(Term("a", u("b")), Term("c", u("d"))),
Require(Term("a", u("b"), boost=1.1), Term("c", u("d"))))
# do(Require)
# do(AndMaybe)
# do(AndNot)
# do(Otherwise)
do(SpanFirst(Term("a", u("b")), limit=1), SpanFirst(Term("a", u("b")), limit=2))
do(SpanNear(Term("a", u("b")), Term("c", u("d"))),
SpanNear(Term("a", u("b")), Term("c", u("e"))))
do(SpanNear(Term("a", u("b")), Term("c", u("d")), slop=1),
SpanNear(Term("a", u("b")), Term("c", u("d")), slop=2))
do(SpanNear(Term("a", u("b")), Term("c", u("d")), mindist=1),
SpanNear(Term("a", u("b")), Term("c", u("d")), mindist=2))
do(SpanNear(Term("a", u("b")), Term("c", u("d")), ordered=True),
SpanNear(Term("a", u("b")), Term("c", u("d")), ordered=False))
do(SpanNot(Term("a", u("b")), Term("a", u("c"))),
SpanNot(Term("a", u("b")), Term("a", u("d"))))
do(SpanOr([Term("a", u("b")), Term("a", u("c")), Term("a", u("d"))]),
SpanOr([Term("a", u("b")), Term("a", u("c")), Term("a", u("e"))]))
do(SpanContains(Term("a", u("b")), Term("a", u("c"))),
SpanContains(Term("a", u("b")), Term("a", u("d"))))
# do(SpanBefore)
# do(SpanCondition)
def test_requires():
a = Term("f", u("a"))
b = Term("f", u("b"))
assert_equal(And([a, b]).requires(), set([a, b]))
assert_equal(Or([a, b]).requires(), set())
assert_equal(AndMaybe(a, b).requires(), set([a]))
assert_equal(a.requires(), set([a]))
def test_highlight_daterange():
from datetime import datetime
schema = fields.Schema(id=fields.ID(unique=True, stored=True),
title=fields.TEXT(stored=True),
content=fields.TEXT(stored=True),
released=fields.DATETIME(stored=True))
ix = RamStorage().create_index(schema)
w = ix.writer()
w.update_document(
id=u('1'),
title=u('Life Aquatic'),
content=u('A nautic film crew sets out to kill a gigantic shark.'),
released=datetime(2004, 12, 25)
)
w.update_document(
id=u('2'),
title=u('Darjeeling Limited'),
content=u('Three brothers meet in India for a life changing train journey.'),
released=datetime(2007, 10, 27)
)
w.commit()
s = ix.searcher()
r = s.search(Term('content', u('train')), terms=True)
assert_equal(len(r), 1)
assert_equal(r[0]["id"], "2")
assert_equal(r[0].highlights("content"), 'for a life changing <b class="match term0">train</b> journey')
r = s.search(DateRange('released', datetime(2007, 1, 1), None))
assert_equal(len(r), 1)
assert_equal(r[0].highlights("content"), '')
|
mzdaniel/oh-mainline
|
vendor/packages/whoosh/tests/test_queries.py
|
Python
|
agpl-3.0
| 12,920
|
[
"VisIt"
] |
e807451012edf90a378189270cada306ecf609300df6dd0fd807c9a7b317a074
|
#!/usr/bin/env python
#! -*- coding: utf-8 -*-
###
# Copyright (c) Rice University 2012-13
# This software is subject to
# the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
###
"""
This is to test the user service once it is up and running on http.
We *should* be able to traverse with just the base URL, but I am not yet supplying in responses the navigation bits we need for REST.
"""
from urlparse import urljoin
import requests
import os
###### config - should be replaced with generic runner?
HERE = os.path.abspath(os.path.dirname(__file__))
CONFD_PATH = os.path.join(HERE, "../../local.ini")
from rhaptos2.common.configuration import (
find_configuration_file,
Configuration,
)
config = Configuration.from_file(CONFD_PATH)
userhost=config['globals']['bamboo_global']['userserviceurl']
############################
def test_viewall():
r = requests.get(urljoin(userhost, "users/"))
d = r.json
assert len(d) > 0
def test_get_known_userid():
r = requests.get(urljoin(userhost, "user/org.cnx.user-75e06194-baee-4395-8e1a-566b656f6920"))
d = r.json
assert d['fullname'] == 'Paul Brian'
def test_get_known_user_by_openid():
r = requests.get(urljoin(userhost, "openid/?user=https://paulbrian.myopenid.com"))
d = r.json
assert d['fullname'] == 'Paul Brian'
def test_put():
payload = {'email': 'testput-email', 'fullname': 'testput-fullname'}
requests.put(urljoin(userhost, "user/org.cnx.user-75e06194-baee-4395-8e1a-566b656f6920"))
r = requests.get(urljoin(userhost, "openid/?user=https://paulbrian.myopenid.com"))
d = r.json
assert d['fullname'] == 'testput-fullname'
|
jbarmash/rhaptos2.user
|
rhaptos2/user/test_client.py
|
Python
|
agpl-3.0
| 1,721
|
[
"Brian"
] |
9d67d017c008bd798c02a7f495c33bbfffb91cb74969f6556e9ba80a74053bed
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the courseware unit bookmarks.
"""
import json
from unittest import skip
import requests
from nose.plugins.attrib import attr
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.common import BASE_URL
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.common.logout import LogoutPage
from common.test.acceptance.pages.lms.bookmarks import BookmarksPage
from common.test.acceptance.pages.lms.course_home import CourseHomePage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage as StudioCourseOutlinePage
from common.test.acceptance.tests.helpers import EventsTestMixin, UniqueCourseTest, is_404_page
class BookmarksTestMixin(EventsTestMixin, UniqueCourseTest):
"""
Mixin with helper methods for testing Bookmarks.
"""
USERNAME = "STUDENT"
EMAIL = "student@example.com"
def setUp(self):
super(BookmarksTestMixin, self).setUp()
self.studio_course_outline_page = StudioCourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.bookmarks_page = BookmarksPage(self.browser, self.course_id)
# Get session to be used for bookmarking units
self.session = requests.Session()
params = {'username': self.USERNAME, 'email': self.EMAIL, 'course_id': self.course_id}
response = self.session.get(BASE_URL + "/auto_auth", params=params)
self.assertTrue(response.ok, "Failed to get session")
def setup_test(self, num_chapters=2):
"""
Setup test settings.
Arguments:
num_chapters: number of chapters to create in course
"""
self.create_course_fixture(num_chapters)
# Auto-auth register for the course.
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL, course_id=self.course_id).visit()
self.courseware_page.visit()
def create_course_fixture(self, num_chapters):
"""
Create course fixture
Arguments:
num_chapters: number of chapters to create
"""
self.course_fixture = CourseFixture( # pylint: disable=attribute-defined-outside-init
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
xblocks = []
for index in range(num_chapters):
xblocks += [
XBlockFixtureDesc('chapter', 'TestSection{}'.format(index)).add_children(
XBlockFixtureDesc('sequential', 'TestSubsection{}'.format(index)).add_children(
XBlockFixtureDesc('vertical', 'TestVertical{}'.format(index))
)
)
]
self.course_fixture.add_children(*xblocks).install()
def verify_event_data(self, event_type, event_data):
"""
Verify emitted event data.
Arguments:
event_type: expected event type
event_data: expected event data
"""
actual_events = self.wait_for_events(event_filter={'event_type': event_type}, number_of_matches=1)
self.assert_events_match(event_data, actual_events)
def _bookmark_unit(self, location):
"""
Bookmark a unit
Arguments:
location (str): unit location
"""
_headers = {
'Content-type': 'application/json',
'X-CSRFToken': self.session.cookies['csrftoken'],
}
params = {'course_id': self.course_id}
data = json.dumps({'usage_id': location})
response = self.session.post(
BASE_URL + '/api/bookmarks/v1/bookmarks/',
data=data,
params=params,
headers=_headers
)
self.assertTrue(response.ok, "Failed to bookmark unit")
def bookmark_units(self, num_units):
"""
Bookmark first `num_units` units
Arguments:
num_units(int): Number of units to bookmarks
"""
xblocks = self.course_fixture.get_nested_xblocks(category="vertical")
for index in range(num_units):
self._bookmark_unit(xblocks[index].locator)
@attr(shard=8)
class BookmarksTest(BookmarksTestMixin):
"""
Tests to verify bookmarks functionality.
"""
def _breadcrumb(self, num_units, modified_name=None):
"""
Creates breadcrumbs for the first `num_units`
Arguments:
num_units(int): Number of units for which we want to create breadcrumbs
Returns:
list of breadcrumbs
"""
breadcrumbs = []
for index in range(num_units):
breadcrumbs.append(
[
'TestSection{}'.format(index),
'TestSubsection{}'.format(index),
modified_name if modified_name else 'TestVertical{}'.format(index)
]
)
return breadcrumbs
def _delete_section(self, index):
""" Delete a section at index `index` """
# Logout and login as staff
LogoutPage(self.browser).visit()
AutoAuthPage(
self.browser, username=self.USERNAME, email=self.EMAIL, course_id=self.course_id, staff=True
).visit()
# Visit course outline page in studio.
self.studio_course_outline_page.visit()
self.studio_course_outline_page.wait_for_page()
self.studio_course_outline_page.section_at(index).delete()
# Logout and login as a student.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL, course_id=self.course_id).visit()
# Visit courseware as a student.
self.courseware_page.visit()
self.courseware_page.wait_for_page()
def _toggle_bookmark_and_verify(self, bookmark_icon_state, bookmark_button_state, bookmarked_count):
"""
Bookmark/Un-Bookmark a unit and then verify
"""
self.assertTrue(self.courseware_page.bookmark_button_visible)
self.courseware_page.click_bookmark_unit_button()
self.assertEqual(self.courseware_page.bookmark_icon_visible, bookmark_icon_state)
self.assertEqual(self.courseware_page.bookmark_button_state, bookmark_button_state)
self.bookmarks_page.visit()
self.assertEqual(self.bookmarks_page.count(), bookmarked_count)
def _verify_pagination_info(
self,
bookmark_count_on_current_page,
header_text,
previous_button_enabled,
next_button_enabled,
current_page_number,
total_pages
):
"""
Verify pagination info
"""
self.assertEqual(self.bookmarks_page.count(), bookmark_count_on_current_page)
self.assertEqual(self.bookmarks_page.get_pagination_header_text(), header_text)
self.assertEqual(self.bookmarks_page.is_previous_page_button_enabled(), previous_button_enabled)
self.assertEqual(self.bookmarks_page.is_next_page_button_enabled(), next_button_enabled)
self.assertEqual(self.bookmarks_page.get_current_page_number(), current_page_number)
self.assertEqual(self.bookmarks_page.get_total_pages, total_pages)
def _verify_breadcrumbs(self, num_units, modified_name=None):
"""
Verifies the breadcrumb trail.
"""
bookmarked_breadcrumbs = self.bookmarks_page.breadcrumbs()
# Verify bookmarked breadcrumbs.
breadcrumbs = self._breadcrumb(num_units=num_units, modified_name=modified_name)
breadcrumbs.reverse()
self.assertEqual(bookmarked_breadcrumbs, breadcrumbs)
def update_and_publish_block_display_name(self, modified_name):
"""
Update and publish the block/unit display name.
"""
self.studio_course_outline_page.visit()
self.studio_course_outline_page.wait_for_page()
self.studio_course_outline_page.expand_all_subsections()
section = self.studio_course_outline_page.section_at(0)
container_page = section.subsection_at(0).unit_at(0).go_to()
self.course_fixture._update_xblock(container_page.locator, { # pylint: disable=protected-access
"metadata": {
"display_name": modified_name
}
})
container_page.visit()
container_page.wait_for_page()
self.assertEqual(container_page.name, modified_name)
container_page.publish_action.click()
def test_bookmark_button(self):
"""
Scenario: Bookmark unit button toggles correctly
Given that I am a registered user
And I visit my courseware page
For first 2 units
I visit the unit
And I can see the Bookmark button
When I click on Bookmark button
Then unit should be bookmarked
Then I click again on the bookmark button
And I should see a unit un-bookmarked
"""
self.setup_test()
for index in range(2):
self.course_home_page.visit()
self.course_home_page.outline.go_to_section('TestSection{}'.format(index), 'TestSubsection{}'.format(index))
self._toggle_bookmark_and_verify(True, 'bookmarked', 1)
self.course_home_page.visit()
self.course_home_page.outline.go_to_section('TestSection{}'.format(index), 'TestSubsection{}'.format(index))
self._toggle_bookmark_and_verify(False, '', 0)
# TODO: TNL-6546: Remove this test
def test_courseware_bookmarks_button(self):
"""
Scenario: (Temporarily) test that the courseware's "Bookmarks" button works.
"""
self.setup_test()
self.bookmark_units(2)
self.courseware_page.visit()
self.courseware_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.is_browser_on_page())
def test_empty_bookmarks_list(self):
"""
Scenario: An empty bookmarks list is shown if there are no bookmarked units.
Given that I am a registered user
And I visit my bookmarks page
Then I should see an empty bookmarks list
And empty bookmarks list content is correct
"""
self.setup_test()
self.bookmarks_page.visit()
empty_list_text = (
'Use bookmarks to help you easily return to courseware pages. '
'To bookmark a page, click "Bookmark this page" under the page title.')
self.assertEqual(self.bookmarks_page.empty_list_text(), empty_list_text)
def test_bookmarks_list(self):
"""
Scenario: A bookmarks list is shown if there are bookmarked units.
Given that I am a registered user
And I have bookmarked 2 units
And I visit my bookmarks page
Then I should see a bookmarked list with 2 bookmark links
And breadcrumb trail is correct for a bookmark
When I click on bookmarked link
Then I can navigate to correct bookmarked unit
"""
self.setup_test()
self.bookmark_units(2)
self.bookmarks_page.visit()
self._verify_breadcrumbs(num_units=2)
self._verify_pagination_info(
bookmark_count_on_current_page=2,
header_text='Showing 1-2 out of 2 total',
previous_button_enabled=False,
next_button_enabled=False,
current_page_number=1,
total_pages=1
)
# get usage ids for units
xblocks = self.course_fixture.get_nested_xblocks(category="vertical")
xblock_usage_ids = [xblock.locator for xblock in xblocks]
# Verify link navigation
for index in range(2):
self.bookmarks_page.visit()
self.bookmarks_page.click_bookmarked_block(index)
self.courseware_page.wait_for_page()
self.assertIn(self.courseware_page.active_usage_id(), xblock_usage_ids)
def test_bookmark_shows_updated_breadcrumb_after_publish(self):
"""
Scenario: A bookmark breadcrumb trail is updated after publishing the changed display name.
Given that I am a registered user
And I visit my courseware page
And I can see bookmarked unit
Then I visit unit page in studio
Then I change unit display_name
And I publish the changes
Then I visit my bookmarks page
When I see the bookmark
Then I can see the breadcrumb trail has the updated display_name.
"""
self.setup_test(num_chapters=1)
self.bookmark_units(num_units=1)
self.bookmarks_page.visit()
self._verify_breadcrumbs(num_units=1)
LogoutPage(self.browser).visit()
AutoAuthPage(
self.browser,
username=self.USERNAME,
email=self.EMAIL,
course_id=self.course_id,
staff=True
).visit()
modified_name = "Updated name"
self.update_and_publish_block_display_name(modified_name)
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL, course_id=self.course_id).visit()
self.bookmarks_page.visit()
self._verify_breadcrumbs(num_units=1, modified_name=modified_name)
@skip("andya: 10/19/17: potentially flaky test")
def test_unreachable_bookmark(self):
"""
Scenario: We should get a HTTP 404 for an unreachable bookmark.
Given that I am a registered user
And I have bookmarked 2 units
And I delete a bookmarked unit
And I visit my bookmarks page
Then I should see a bookmarked list
When I click on the deleted bookmark
Then I should navigated to 404 page
"""
self.setup_test(num_chapters=1)
self.bookmark_units(1)
self._delete_section(0)
self.bookmarks_page.visit()
self._verify_pagination_info(
bookmark_count_on_current_page=1,
header_text='Showing 1 out of 1 total',
previous_button_enabled=False,
next_button_enabled=False,
current_page_number=1,
total_pages=1
)
self.bookmarks_page.click_bookmarked_block(0)
self.assertTrue(is_404_page(self.browser))
def test_page_size_limit(self):
"""
Scenario: We can't get bookmarks more than default page size.
Given that I am a registered user
And I have bookmarked all the 11 units available
And I visit my bookmarks page
Then I should see a bookmarked list
And the bookmark list should contain 10 bookmarked items
"""
self.setup_test(11)
self.bookmark_units(11)
self.bookmarks_page.visit()
self._verify_pagination_info(
bookmark_count_on_current_page=10,
header_text='Showing 1-10 out of 11 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
def test_pagination_with_single_page(self):
"""
Scenario: Bookmarks list pagination is working as expected for single page
Given that I am a registered user
And I have bookmarked all the 2 units available
And I visit my bookmarks page
Then I should see a bookmarked list with 2 bookmarked items
And I should see paging header and footer with correct data
And previous and next buttons are disabled
"""
self.setup_test(num_chapters=2)
self.bookmark_units(num_units=2)
self.bookmarks_page.visit()
self.assertTrue(self.bookmarks_page.results_present())
self._verify_pagination_info(
bookmark_count_on_current_page=2,
header_text='Showing 1-2 out of 2 total',
previous_button_enabled=False,
next_button_enabled=False,
current_page_number=1,
total_pages=1
)
def test_next_page_button(self):
"""
Scenario: Next button is working as expected for bookmarks list pagination
Given that I am a registered user
And I have bookmarked all the 12 units available
And I visit my bookmarks page
Then I should see a bookmarked list of 10 items
And I should see paging header and footer with correct info
Then I click on next page button in footer
And I should be navigated to second page
And I should see a bookmarked list with 2 items
And I should see paging header and footer with correct info
"""
self.setup_test(num_chapters=12)
self.bookmark_units(num_units=12)
self.bookmarks_page.visit()
self.assertTrue(self.bookmarks_page.results_present())
self._verify_pagination_info(
bookmark_count_on_current_page=10,
header_text='Showing 1-10 out of 12 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
self.bookmarks_page.press_next_page_button()
self._verify_pagination_info(
bookmark_count_on_current_page=2,
header_text='Showing 11-12 out of 12 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
def test_previous_page_button(self):
"""
Scenario: Previous button is working as expected for bookmarks list pagination
Given that I am a registered user
And I have bookmarked all the 12 units available
And I visit my bookmarks page
Then I click on next page button in footer
And I should be navigated to second page
And I should see a bookmarked list with 2 items
And I should see paging header and footer with correct info
Then I click on previous page button
And I should be navigated to first page
And I should see paging header and footer with correct info
"""
self.setup_test(num_chapters=12)
self.bookmark_units(num_units=12)
self.bookmarks_page.visit()
self.assertTrue(self.bookmarks_page.results_present())
self.bookmarks_page.press_next_page_button()
self._verify_pagination_info(
bookmark_count_on_current_page=2,
header_text='Showing 11-12 out of 12 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
self.bookmarks_page.press_previous_page_button()
self._verify_pagination_info(
bookmark_count_on_current_page=10,
header_text='Showing 1-10 out of 12 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
def test_pagination_with_valid_page_number(self):
"""
Scenario: Bookmarks list pagination works as expected for valid page number
Given that I am a registered user
And I have bookmarked all the 12 units available
And I visit my bookmarks page
Then I should see a bookmarked list
And I should see total page value is 2
Then I enter 2 in the page number input
And I should be navigated to page 2
"""
self.setup_test(num_chapters=11)
self.bookmark_units(num_units=11)
self.bookmarks_page.visit()
self.assertTrue(self.bookmarks_page.results_present())
self.assertEqual(self.bookmarks_page.get_total_pages, 2)
self.bookmarks_page.go_to_page(2)
self._verify_pagination_info(
bookmark_count_on_current_page=1,
header_text='Showing 11-11 out of 11 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
def test_pagination_with_invalid_page_number(self):
"""
Scenario: Bookmarks list pagination works as expected for invalid page number
Given that I am a registered user
And I have bookmarked all the 11 units available
And I visit my bookmarks page
Then I should see a bookmarked list
And I should see total page value is 2
Then I enter 3 in the page number input
And I should stay at page 1
"""
self.setup_test(num_chapters=11)
self.bookmark_units(num_units=11)
self.bookmarks_page.visit()
self.assertTrue(self.bookmarks_page.results_present())
self.assertEqual(self.bookmarks_page.get_total_pages, 2)
self.bookmarks_page.go_to_page(3)
self._verify_pagination_info(
bookmark_count_on_current_page=10,
header_text='Showing 1-10 out of 11 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
def test_bookmarked_unit_accessed_event(self):
"""
Scenario: Bookmark events are emitted with correct data when we access/visit a bookmarked unit.
Given that I am a registered user
And I visit my courseware page
And I have bookmarked a unit
When I click on bookmarked unit
Then `edx.course.bookmark.accessed` event is emitted
"""
self.setup_test(num_chapters=1)
self.reset_event_tracking()
# create expected event data
xblocks = self.course_fixture.get_nested_xblocks(category="vertical")
event_data = [
{
'event': {
'bookmark_id': '{},{}'.format(self.USERNAME, xblocks[0].locator),
'component_type': xblocks[0].category,
'component_usage_id': xblocks[0].locator,
}
}
]
self.bookmark_units(num_units=1)
self.bookmarks_page.visit()
self._verify_pagination_info(
bookmark_count_on_current_page=1,
header_text='Showing 1 out of 1 total',
previous_button_enabled=False,
next_button_enabled=False,
current_page_number=1,
total_pages=1
)
self.bookmarks_page.click_bookmarked_block(0)
self.verify_event_data('edx.bookmark.accessed', event_data)
@attr('a11y')
class BookmarksA11yTests(BookmarksTestMixin):
"""
Tests for checking the a11y of the bookmarks page.
"""
def test_view_a11y(self):
"""
Verify the basic accessibility of the bookmarks page while paginated.
"""
self.setup_test(num_chapters=11)
self.bookmark_units(num_units=11)
self.bookmarks_page.visit()
self.bookmarks_page.a11y_audit.check_for_accessibility_errors()
|
lduarte1991/edx-platform
|
common/test/acceptance/tests/lms/test_bookmarks.py
|
Python
|
agpl-3.0
| 23,481
|
[
"VisIt"
] |
161ad193e37905df6d261954a0eeed20b087c398581fd903091659a2f1fa2155
|
import numpy as np
from scipy.stats import norm as ndist
from ...tests.instance import gaussian_instance
from ..screening import marginal_screening
from ..lasso import lasso
def test_marginal(n=500,
p=50,
s=5,
sigma=3,
rho=0.4,
randomizer_scale=0.5,
use_MLE=True,
marginal=False):
while True:
X = gaussian_instance(n=n,
p=p,
equicorrelated=False,
rho=rho)[0]
W = rho**(np.fabs(np.subtract.outer(np.arange(p), np.arange(p))))
sqrtW = np.linalg.cholesky(W)
sigma = 0.15
Z = np.random.standard_normal(p).dot(sqrtW.T) * sigma
beta = (2 * np.random.binomial(1, 0.5, size=(p,)) - 1) * 5 * sigma
beta[s:] = 0
np.random.shuffle(beta)
true_mean = W.dot(beta)
score = Z + true_mean
idx = np.arange(p)
n, p = X.shape
q = 0.1
marginal_select = marginal_screening.type1(score,
W * sigma**2,
q,
randomizer_scale * sigma)
boundary = marginal_select.fit()
nonzero = boundary != 0
if nonzero.sum() > 0:
if marginal:
(observed_target,
cov_target,
crosscov_target_score,
alternatives) = marginal_select.marginal_targets(nonzero)
else:
(observed_target,
cov_target,
crosscov_target_score,
alternatives) = marginal_select.multivariate_targets(nonzero, dispersion=sigma**2)
if use_MLE:
estimate, _, _, pval, intervals, _ = marginal_select.selective_MLE(observed_target,
cov_target,
crosscov_target_score)
# run summary
else:
_, pval, intervals = marginal_select.summary(observed_target,
cov_target,
crosscov_target_score,
alternatives,
compute_intervals=True)
print(pval)
if marginal:
beta_target = true_mean[nonzero]
else:
beta_target = beta[nonzero]
print("beta_target and intervals", beta_target, intervals)
coverage = (beta_target > intervals[:, 0]) * (beta_target < intervals[:, 1])
print("coverage for selected target", coverage.sum()/float(nonzero.sum()))
return pval[beta[nonzero] == 0], pval[beta[nonzero] != 0], coverage, intervals
def test_simple(n=100,
p=20,
s=3,
use_MLE=False):
while True:
Z = np.random.standard_normal(p)
beta = (2 * np.random.binomial(1, 0.5, size=(p,)) - 1) * 5
beta[s:] = 0
np.random.shuffle(beta)
true_mean = beta
score = Z + true_mean
idx = np.arange(p)
q = 0.1
marginal_select = marginal_screening.type1(score,
np.identity(p),
q,
1.)
boundary = marginal_select.fit()
nonzero = boundary != 0
# compare to LASSO
# should have same affine constraints
perturb = marginal_select._initial_omega # randomization used
randomized_lasso = lasso.gaussian(np.identity(p),
score,
marginal_select.threshold,
randomizer_scale=1.,
ridge_term=0.)
randomized_lasso.fit(perturb = perturb)
np.testing.assert_allclose(randomized_lasso.sampler.affine_con.mean,
marginal_select.sampler.affine_con.mean)
np.testing.assert_allclose(randomized_lasso.sampler.affine_con.covariance,
marginal_select.sampler.affine_con.covariance)
np.testing.assert_allclose(randomized_lasso.sampler.affine_con.linear_part,
marginal_select.sampler.affine_con.linear_part)
np.testing.assert_allclose(randomized_lasso.sampler.affine_con.offset,
marginal_select.sampler.affine_con.offset)
if nonzero.sum() > 0:
(observed_target,
cov_target,
crosscov_target_score,
alternatives) = marginal_select.marginal_targets(nonzero)
if use_MLE:
estimate, _, _, pval, intervals, _ = marginal_select.selective_MLE(observed_target,
cov_target,
crosscov_target_score)
# run summary
else:
_, pval, intervals = marginal_select.summary(observed_target,
cov_target,
crosscov_target_score,
alternatives,
compute_intervals=True)
print(pval)
beta_target = cov_target.dot(true_mean[nonzero])
print("beta_target and intervals", beta_target, intervals)
coverage = (beta_target > intervals[:, 0]) * (beta_target < intervals[:, 1])
print("coverage for selected target", coverage.sum()/float(nonzero.sum()))
return pval[beta[nonzero] == 0], pval[beta[nonzero] != 0], coverage, intervals
def test_both():
test_marginal(marginal=True)
test_marginal(marginal=False)
def main(nsim=1000, test_fn=test_marginal, use_MLE=False):
import matplotlib.pyplot as plt
import statsmodels.api as sm
U = np.linspace(0, 1, 101)
P0, PA, cover, length_int = [], [], [], []
for i in range(nsim):
p0, pA, cover_, intervals = test_fn(use_MLE=use_MLE)
cover.extend(cover_)
P0.extend(p0)
PA.extend(pA)
print(np.mean(cover),'coverage so far')
if i % 50 == 0 and i > 0:
plt.clf()
plt.plot(U, sm.distributions.ECDF(P0)(U), 'b', label='null')
plt.plot(U, sm.distributions.ECDF(PA)(U), 'r', label='alt')
plt.plot([0, 1], [0, 1], 'k--')
plt.savefig('marginal_screening_pvals.pdf')
|
selective-inference/selective-inference
|
selectinf/randomized/tests/test_marginal_screening.py
|
Python
|
bsd-3-clause
| 7,138
|
[
"Gaussian"
] |
9ac981222361a708faaaa45d329f7d99175b7e6fff7af60bc0f1f105ce3dde57
|
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/pdfbase/cidfonts.py
#$Header $
__version__=''' $Id: cidfonts.py,v 1.1 2006/05/26 19:19:46 thomas Exp $ '''
__doc__="""CID (Asian multi-byte) font support.
This defines classes to represent CID fonts. They know how to calculate
their own width and how to write themselves into PDF files."""
import os
from types import ListType, TupleType, DictType
from string import find, split, strip
import marshal
import md5
import time
import reportlab
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase._cidfontdata import allowedTypeFaces, allowedEncodings, CIDFontInfo
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase import pdfdoc
from reportlab.rl_config import CMapSearchPath
def findCMapFile(name):
"Returns full filename, or raises error"
for dirname in CMapSearchPath:
cmapfile = dirname + os.sep + name
if os.path.isfile(cmapfile):
return cmapfile
raise IOError, 'CMAP file for encodings "%s" not found!' % name
def structToPDF(structure):
"Converts deeply nested structure to PDFdoc dictionary/array objects"
if type(structure) is DictType:
newDict = {}
for k, v in structure.items():
newDict[k] = structToPDF(v)
return pdfdoc.PDFDictionary(newDict)
elif type(structure) in (ListType, TupleType):
newList = []
for elem in structure:
newList.append(structToPDF(elem))
return pdfdoc.PDFArray(newList)
else:
return structure
class CIDEncoding(pdfmetrics.Encoding):
"""Multi-byte encoding. These are loaded from CMAP files.
A CMAP file is like a mini-codec. It defines the correspondence
between code points in the (multi-byte) input data and Character
IDs. """
# aims to do similar things to Brian Hooper's CMap class,
# but I could not get it working and had to rewrite.
# also, we should really rearrange our current encoding
# into a SingleByteEncoding since many of its methods
# should not apply here.
def __init__(self, name, useCache=1):
self.name = name
self._mapFileHash = None
self._codeSpaceRanges = []
self._notDefRanges = []
self._cmap = {}
self.source = None
if useCache:
from reportlab.lib.utils import get_rl_tempdir
fontmapdir = get_rl_tempdir('FastCMAPS')
if os.path.isfile(fontmapdir + os.sep + name + '.fastmap'):
self.fastLoad(fontmapdir)
self.source = fontmapdir + os.sep + name + '.fastmap'
else:
self.parseCMAPFile(name)
self.source = 'CMAP: ' + name
self.fastSave(fontmapdir)
else:
self.parseCMAPFile(name)
def _hash(self, text):
hasher = md5.new()
hasher.update(text)
return hasher.digest()
def parseCMAPFile(self, name):
"""This is a tricky one as CMAP files are Postscript
ones. Some refer to others with a 'usecmap'
command"""
started = time.clock()
cmapfile = findCMapFile(name)
# this will CRAWL with the unicode encodings...
rawdata = open(cmapfile, 'r').read()
self._mapFileHash = self._hash(rawdata)
#if it contains the token 'usecmap', parse the other
#cmap file first....
usecmap_pos = find(rawdata, 'usecmap')
if usecmap_pos > -1:
#they tell us to look in another file
#for the code space ranges. The one
# to use will be the previous word.
chunk = rawdata[0:usecmap_pos]
words = split(chunk)
otherCMAPName = words[-1]
#print 'referred to another CMAP %s' % otherCMAPName
self.parseCMAPFile(otherCMAPName)
# now continue parsing this, as it may
# override some settings
words = split(rawdata)
while words <> []:
if words[0] == 'begincodespacerange':
words = words[1:]
while words[0] <> 'endcodespacerange':
strStart, strEnd, words = words[0], words[1], words[2:]
start = int(strStart[1:-1], 16)
end = int(strEnd[1:-1], 16)
self._codeSpaceRanges.append((start, end),)
elif words[0] == 'beginnotdefrange':
words = words[1:]
while words[0] <> 'endnotdefrange':
strStart, strEnd, strValue = words[0:3]
start = int(strStart[1:-1], 16)
end = int(strEnd[1:-1], 16)
value = int(strValue)
self._notDefRanges.append((start, end, value),)
words = words[3:]
elif words[0] == 'begincidrange':
words = words[1:]
while words[0] <> 'endcidrange':
strStart, strEnd, strValue = words[0:3]
start = int(strStart[1:-1], 16)
end = int(strEnd[1:-1], 16)
value = int(strValue)
# this means that 'start' corresponds to 'value',
# start+1 corresponds to value+1 and so on up
# to end
offset = 0
while start + offset <= end:
self._cmap[start + offset] = value + offset
offset = offset + 1
words = words[3:]
else:
words = words[1:]
finished = time.clock()
print 'parsed CMAP %s in %0.4f seconds' % (self.name, finished - started)
def translate(self, text):
"Convert a string into a list of CIDs"
output = []
cmap = self._cmap
lastChar = ''
for char in text:
if lastChar <> '':
#print 'convert character pair "%s"' % (lastChar + char)
num = ord(lastChar) * 256 + ord(char)
else:
#print 'convert character "%s"' % char
num = ord(char)
lastChar = char
found = 0
for low, high in self._codeSpaceRanges:
if low < num < high:
try:
cid = cmap[num]
#print '%d -> %d' % (num, cid)
except KeyError:
#not defined. Try to find the appropriate
# notdef character, or failing that return
# zero
cid = 0
for low2, high2, notdef in self._notDefRanges:
if low2 < num < high2:
cid = notdef
break
output.append(cid)
found = 1
break
if found:
lastChar = ''
else:
lastChar = char
return output
def fastSave(self, directory):
f = open(os.path.join(directory, self.name + '.fastmap'), 'wb')
marshal.dump(self._mapFileHash, f)
marshal.dump(self._codeSpaceRanges, f)
marshal.dump(self._notDefRanges, f)
marshal.dump(self._cmap, f)
f.close()
def fastLoad(self, directory):
started = time.clock()
f = open(os.path.join(directory, self.name + '.fastmap'), 'rb')
self._mapFileHash = marshal.load(f)
self._codeSpaceRanges = marshal.load(f)
self._notDefRanges = marshal.load(f)
self._cmap = marshal.load(f)
f.close()
finished = time.clock()
#print 'loaded %s in %0.4f seconds' % (self.name, finished - started)
class CIDTypeFace(pdfmetrics.TypeFace):
"""Multi-byte type face.
Conceptually similar to a single byte typeface,
but the glyphs are identified by a numeric Character
ID (CID) and not a glyph name. """
def __init__(self, name):
"""Initialised from one of the canned dictionaries in allowedEncodings
Or rather, it will be shortly..."""
pdfmetrics.TypeFace.__init__(self, name)
self._extractDictInfo(name)
def _extractDictInfo(self, name):
try:
fontDict = CIDFontInfo[name]
except KeyError:
raise KeyError, ("Unable to find information on CID typeface '%s'" % name +
"Only the following font names work:" + repr(allowedTypeFaces)
)
descFont = fontDict['DescendantFonts'][0]
self.ascent = descFont['FontDescriptor']['Ascent']
self.descent = descFont['FontDescriptor']['Descent']
self._defaultWidth = descFont['DW']
self._explicitWidths = self._expandWidths(descFont['W'])
# should really support self.glyphWidths, self.glyphNames
# but not done yet.
def _expandWidths(self, compactWidthArray):
"""Expands Adobe nested list structure to get a dictionary of widths.
Here is an example of such a structure.
(
# starting at character ID 1, next n characters have the widths given.
1, (277,305,500,668,668,906,727,305,445,445,508,668,305,379,305,539),
# all Characters from ID 17 to 26 are 668 em units wide
17, 26, 668,
27, (305, 305, 668, 668, 668, 566, 871, 727, 637, 652, 699, 574, 555,
676, 687, 242, 492, 664, 582, 789, 707, 734, 582, 734, 605, 605,
641, 668, 727, 945, 609, 609, 574, 445, 668, 445, 668, 668, 590,
555, 609, 547, 602, 574, 391, 609, 582, 234, 277, 539, 234, 895,
582, 605, 602, 602, 387, 508, 441, 582, 562, 781, 531, 570, 555,
449, 246, 449, 668),
# these must be half width katakana and the like.
231, 632, 500
)
"""
data = compactWidthArray[:]
widths = {}
while data:
start, data = data[0], data[1:]
if type(data[0]) in (ListType, TupleType):
items, data = data[0], data[1:]
for offset in range(len(items)):
widths[start + offset] = items[offset]
else:
end, width, data = data[0], data[1], data[2:]
for idx in range(start, end+1):
widths[idx] = width
return widths
def getCharWidth(self, characterId):
return self._explicitWidths.get(characterId, self._defaultWidth)
class CIDFont(pdfmetrics.Font):
"Represents a built-in multi-byte font"
def __init__(self, face, encoding):
self._multiByte = 1
assert face in allowedTypeFaces, "TypeFace '%s' not supported! Use any of these instead: %s" % (face, allowedTypeFaces)
self.faceName = face
#should cache in registry...
self.face = CIDTypeFace(face)
assert encoding in allowedEncodings, "Encoding '%s' not supported! Use any of these instead: %s" % (encoding, allowedEncodings)
self.encodingName = encoding
self.encoding = CIDEncoding(encoding)
#legacy hack doing quick cut and paste.
self.fontName = self.faceName + '-' + self.encodingName
self.name = self.fontName
# need to know if it is vertical or horizontal
self.isVertical = (self.encodingName[-1] == 'V')
def stringWidth(self, text, size):
cidlist = self.encoding.translate(text)
if self.isVertical:
#this part is "not checked!" but seems to work.
#assume each is 1000 ems high
return len(cidlist) * size
else:
w = 0
for cid in cidlist:
w = w + self.face.getCharWidth(cid)
return 0.001 * w * size
def addObjects(self, doc):
"""The explicit code in addMinchoObjects and addGothicObjects
will be replaced by something that pulls the data from
_cidfontdata.py in the next few days."""
internalName = 'F' + repr(len(doc.fontMapping)+1)
bigDict = CIDFontInfo[self.face.name]
bigDict['Name'] = '/' + internalName
bigDict['Encoding'] = '/' + self.encodingName
#convert to PDF dictionary/array objects
cidObj = structToPDF(bigDict)
# link into document, and add to font map
r = doc.Reference(cidObj, internalName)
fontDict = doc.idToObject['BasicFonts'].dict
fontDict[internalName] = r
doc.fontMapping[self.name] = '/' + internalName
def precalculate(cmapdir):
# crunches through all, making 'fastmap' files
import os
files = os.listdir(cmapdir)
for file in files:
if os.path.isfile(cmapdir + os.sep + self.name + '.fastmap'):
continue
try:
enc = CIDEncoding(file)
except:
print 'cannot parse %s, skipping' % enc
continue
enc.fastSave(cmapdir)
print 'saved %s.fastmap' % file
def test():
# only works if you have cirrect encodings on your box!
c = Canvas('test_japanese.pdf')
c.setFont('Helvetica', 30)
c.drawString(100,700, 'Japanese Font Support')
pdfmetrics.registerFont(CIDFont('HeiseiMin-W3','90ms-RKSJ-H'))
pdfmetrics.registerFont(CIDFont('HeiseiKakuGo-W5','90ms-RKSJ-H'))
# the two typefaces
c.setFont('HeiseiMin-W3-90ms-RKSJ-H', 16)
# this says "This is HeiseiMincho" in shift-JIS. Not all our readers
# have a Japanese PC, so I escaped it. On a Japanese-capable
# system, print the string to see Kanji
message1 = '\202\261\202\352\202\315\225\275\220\254\226\276\222\251\202\305\202\267\201B'
c.drawString(100, 675, message1)
c.save()
print 'saved test_japanese.pdf'
## print 'CMAP_DIR = ', CMAP_DIR
## tf1 = CIDTypeFace('HeiseiMin-W3')
## print 'ascent = ',tf1.ascent
## print 'descent = ',tf1.descent
## for cid in [1,2,3,4,5,18,19,28,231,1742]:
## print 'width of cid %d = %d' % (cid, tf1.getCharWidth(cid))
encName = '90ms-RKSJ-H'
enc = CIDEncoding(encName)
print message1, '->', enc.translate(message1)
f = CIDFont('HeiseiMin-W3','90ms-RKSJ-H')
print 'width = %0.2f' % f.stringWidth(message1, 10)
#testing all encodings
## import time
## started = time.time()
## import glob
## for encName in _cidfontdata.allowedEncodings:
## #encName = '90ms-RKSJ-H'
## enc = CIDEncoding(encName)
## print 'encoding %s:' % encName
## print ' codeSpaceRanges = %s' % enc._codeSpaceRanges
## print ' notDefRanges = %s' % enc._notDefRanges
## print ' mapping size = %d' % len(enc._cmap)
## finished = time.time()
## print 'constructed all encodings in %0.2f seconds' % (finished - started)
if __name__=='__main__':
test()
|
tschalch/pyTray
|
src/lib/reportlab/pdfbase/cidfonts.py
|
Python
|
bsd-3-clause
| 15,413
|
[
"Brian"
] |
d8c1bf5ad4d7a98bb62bbd3b7520163f6792e421389ab454d2646fd5029c23f1
|
#! /usr/bin/env python3
# findlinksto
#
# find symbolic links to a path matching a regular expression
import os
import sys
import re
import getopt
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], '')
if len(args) < 2:
raise getopt.GetoptError('not enough arguments', None)
except getopt.GetoptError as msg:
sys.stdout = sys.stderr
print(msg)
print('usage: findlinksto pattern directory ...')
sys.exit(2)
pat, dirs = args[0], args[1:]
prog = re.compile(pat)
for dirname in dirs:
os.walk(dirname, visit, prog)
def visit(prog, dirname, names):
if os.path.islink(dirname):
names[:] = []
return
if os.path.ismount(dirname):
print('descend into', dirname)
for name in names:
name = os.path.join(dirname, name)
try:
linkto = os.readlink(name)
if prog.search(linkto) is not None:
print(name, '->', linkto)
except os.error:
pass
if __name__ == '__main__':
main()
|
andreparrish/python-for-android
|
python3-alpha/python3-src/Tools/scripts/findlinksto.py
|
Python
|
apache-2.0
| 1,072
|
[
"VisIt"
] |
80adee14b913f652e3640dbe1680b3c1483775eac8d264d28eefa297abf89cc5
|
#!/usr/bin/python
#
# convert a blastn outfmt=6 file to BED6 format.
import sys
import datetime
if len(sys.argv)!=2:
print "blast2bed.py <blast-file>"
sys.exit(0)
filename = sys.argv[1]
f = open(filename, 'rU')
for line in f:
chunks = line.split('\t')
name = chunks[0]
chromosome = chunks[1]
if int(chunks[8])<int(chunks[9]):
strand = '+'
sstart = int(chunks[8])-1
send = int(chunks[9])
else:
strand = '-'
sstart = int(chunks[9])-1
send = int(chunks[8])
evalue = chunks[10]
print chromosome+'\t'+str(sstart)+'\t'+str(send)+'\t'+name+'\t'+evalue+'\t'+strand
f.close()
|
carnegie-dpb/biotools
|
blast2bed.py
|
Python
|
gpl-2.0
| 661
|
[
"BLAST"
] |
354c5a7aa8b8ab77274bed75692d2177cbc3b943e493b9289b99691b7acf298c
|
import logging
from cl.donate.paypal import process_paypal_payment
from cl.donate.forms import DonationForm, UserForm, ProfileForm
from cl.donate.stripe_helpers import process_stripe_payment
from cl.users.utils import create_stub_account
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
logger = logging.getLogger(__name__)
def send_thank_you_email(donation):
user = donation.donor
email_subject = 'Thanks for your donation to Free Law Project!'
email_body = ('Hello %s,\n\nThanks for your donation of $%0.2f to Free '
'Law Project. We are currently using donations like yours '
'for a variety of important projects that would never exist '
'without your help.\n\n'
'We are a federally-recognized 501(c)(3) public charity '
'and a California non-profit public benefit corporation. '
'Our EIN is %s.\n\n'
'If you have any questions about your donation, please '
'don\'t hesitate to get in touch.\n\n'
'Thanks again,\n\n'
'Michael Lissner and Brian Carver\n'
'Founders of Free Law Project\n'
'http://freelawproject.org/contact/') % \
(user.first_name, donation.amount, settings.EIN, )
send_mail(
email_subject,
email_body,
'Free Law Project <donate@freelawproject.org>',
[user.email]
)
def route_and_process_donation(cd_donation_form, cd_user_form, stripe_token):
"""Routes the donation to the correct payment provider, then normalizes
its response.
Returns a dict with:
- message: Any error messages that apply
- status: The status of the payment for the database
- payment_id: The ID of the payment
"""
if cd_donation_form['payment_provider'] == 'paypal':
response = process_paypal_payment(cd_donation_form)
if response['result'] == 'created':
response = {
'message': None,
'status': 0, # AWAITING_PAYMENT
'payment_id': response['payment_id'],
'transaction_id': response['transaction_id'],
'redirect': response['redirect'],
}
else:
response = {
'message': 'We had an error working with PayPal. Please try '
'another payment method.',
'status': 1, # ERROR
'payment_id': None,
'redirect': None,
}
elif cd_donation_form['payment_provider'] == 'cc':
response = process_stripe_payment(
cd_donation_form,
cd_user_form,
stripe_token
)
else:
response = None
return response
def donate(request):
"""Load the donate page or process a submitted donation.
This page has several branches. The logic is as follows:
if GET:
--> Load the page
elif POST:
if user is anonymous:
if email address on record as a stub account:
--> Use it.
elif new email address or a non-stub account:
--> We cannot allow anonymous people to update real
accounts, or this is a new email address, so create a
new stub account.
elif user is logged in:
--> associate with account.
We now have an account. Process the payment and associate it.
"""
message = None
if request.method == 'POST':
donation_form = DonationForm(request.POST)
if request.user.is_anonymous():
# Either this is a new account, a stubbed one, or a user that's
# simply not logged into their account
try:
stub_account = User.objects.filter(
profile__stub_account=True
).get(
email__iexact=request.POST.get('email')
)
except User.DoesNotExist:
stub_account = False
if stub_account:
# We use the stub account and anonymous users even are allowed
# to update it. This is OK, because we don't care too much
# about the accuracy of this data. Later if/when this becomes
# a real account, anonymous users won't be able to update this
# information -- that's what matters.
user_form = UserForm(
request.POST,
instance=stub_account
)
profile_form = ProfileForm(
request.POST,
instance=stub_account.profile
)
else:
# Either a regular account or an email address we've never
# seen before. Create a new user from the POST data.
user_form = UserForm(request.POST)
profile_form = ProfileForm(request.POST)
else:
user_form = UserForm(
request.POST,
instance=request.user
)
profile_form = ProfileForm(
request.POST,
instance=request.user.profile
)
if all([donation_form.is_valid(),
user_form.is_valid(),
profile_form.is_valid()]):
# Process the data in form.cleaned_data
cd_donation_form = donation_form.cleaned_data
cd_user_form = user_form.cleaned_data
cd_profile_form = profile_form.cleaned_data
stripe_token = request.POST.get('stripeToken')
# Route the payment to a payment provider
response = route_and_process_donation(
cd_donation_form,
cd_user_form,
stripe_token
)
logger.info("Payment routed with response: %s" % response)
if response['status'] == 0:
if request.user.is_anonymous() and not stub_account:
# Create a stub account with an unusable password
user, profile = create_stub_account(
cd_user_form,
cd_profile_form,
)
user.save()
profile.save()
else:
# Logged in user or an existing stub account.
user = user_form.save()
profile = profile_form.save()
d = donation_form.save(commit=False)
d.status = response['status']
d.payment_id = response['payment_id']
d.transaction_id = response.get('transaction_id') # Will only work for Paypal.
d.donor = user
d.save()
return HttpResponseRedirect(response['redirect'])
else:
logger.critical("Got back status of %s when making initial "
"request of API. Message was:\n%s" %
(response['status'], response['message']))
message = response['message']
else:
# Loading the page...
try:
donation_form = DonationForm(
initial={
'referrer': request.GET.get('referrer')
}
)
user_form = UserForm(
initial={
'first_name': request.user.first_name,
'last_name': request.user.last_name,
'email': request.user.email,
}
)
up = request.user.profile
profile_form = ProfileForm(
initial={
'address1': up.address1,
'address2': up.address2,
'city': up.city,
'state': up.state,
'zip_code': up.zip_code,
'wants_newsletter': up.wants_newsletter
}
)
except AttributeError:
# for anonymous users, who lack profile info
user_form = UserForm()
profile_form = ProfileForm()
return render_to_response(
'donate.html',
{
'donation_form': donation_form,
'user_form': user_form,
'profile_form': profile_form,
'private': False,
'message': message,
'stripe_public_key': settings.STRIPE_PUBLIC_KEY
},
RequestContext(request)
)
def donate_complete(request):
error = None
if len(request.GET) > 0:
# We've gotten some information from the payment provider
if request.GET.get('error') == 'failure':
if request.GET.get('error_description') == 'User Cancelled':
error = 'User Cancelled'
elif 'insufficient funds' in request.GET.get('error_description').lower():
error = 'Insufficient Funds'
return render_to_response(
'donate_complete.html',
{
'error': error,
'private': True,
},
RequestContext(request),
)
return render_to_response(
'donate_complete.html',
{
'error': error,
'private': True,
},
RequestContext(request)
)
|
brianwc/courtlistener
|
cl/donate/views.py
|
Python
|
agpl-3.0
| 9,731
|
[
"Brian"
] |
025a83be90da53bd20a5c95d8b4c0fc0bf9947ccb87b7a55111912a881aaacb4
|
""" Python version of getting started tutorial.
Please see https://github.com/snoplusuk/echidna/wiki/GettingStarted or the
jupyter notebook form of this tutorial for further details.
This script:
* Creates :class:`echidna.core.spectra.Spectra` instance
* Fills `Spectra`
* Plots `Spectra`
* Applies cuts and smears `Spectra`
* Other `Spectra` manipulations e.g. `shrink_to_roi`, `rebin` and
`scale`
This file has been generated automatically by running::
(ENV) $ jupyter nbconvert --to python --template getting_started
getting_started.ipynb
Examples:
To run (from the base directory)::
$ python echidna/scripts/tutorials/getting_started.py
"""
if __name__ == "__main__": # for running as a standalone python script too!
import matplotlib.pyplot as plt
# First set up environment with convenience imports and inline plotting:
# <!--- The following cell should be commented out in the python script
# version of this notebook --->
# In[ ]:
# %pylab inline
# pylab.rc("savefig", dpi=120) # set resolution of inline figures
# The `%pylab` magic imports `matplotlib.pyplot` as `plt` and `numpy` as
# `np`. We'll also, change the working directory to echidna's base
# directory, so that all the relative imports work.
# <!--- The following cell should be commented out in the python script
# version of this notebook --->
# In[ ]:
# %cd ../../..
# In[ ]:
# %%bash
# pwd
# The `%cd` inline-magic emmulates the bash `cd` command, allowing us to
# change directory and the `%%bash` magic lets you run any bash command in
# the cell but remaining in the notebook!
# ***
# <div class="alert alert-info">
# <strong>A quick note about the ipython notebook:</strong>
# <ul>
# <li> To see the keyboard shortcuts at any time simply press the
# `Esc` key and then the `H` key </li>
# <li> The notebook has two basic modes: **Command** and **Edit**.
# Command mode is enabled by the `Esc` key and Edit by the
# `Enter` key. </li>
# <li> The main comand you will need is `Shift`+`Enter` (make sure
# you are in command mode first by pressing `Esc`). This
# executes the current cell and then selects the cell below. Try
# pressing `Shift`+`Enter` on this cell and then again to run
# the cell below. </li>
# </ul>
# </div>
# In[ ]:
print "Hello World!"
# <div class="alert alert-info">
# <par>
# As you can see, for cells containing valid python, the code
# snippet is executed as it would be in a python terminal shell and
# the output is displayed below. Try selecting the cell above and
# editing it (`Enter` for edit mode) so that it prints out
# `Goodbye World!` when executed.
# </par>
# <par>
# These commands should get you through the tutorial, but there are
# more in-depth tutorials
# <a href="https://nbviewer.jupyter.org/github/ipython/ipython/blob/4.
# 0.x/examples/IPython%20Kernel/Index.ipynb">
# here</a> if you are interested - you can even download them and
# work through them in the Jupyter viewer.
# </par>
# </div>
# <!--- Main script starts below ------------------------------------------->
# # Tutorial 1: Getting started with echidna
# This guide tutorial aims to get you started with some basic tasks you can
# accomplish using echidna.
# ## Spectra creation
# The `Spectra` class is echidna's most fundamental class. It holds the core
# data structure and provides much of the core functionality required.
# Coincidentally, this guide will be centred around this class, how to
# create it and then some manipulations of the class.
# We'll begin with how to create an instance of the `Spectra` class. It is
# part of the `echidna.core.spectra` module, so we will import this and make
# a `Spectra` instance.
# In[ ]:
import echidna.core.spectra as spectra
# Now we need a config file to create the spectrum from. There is an example
# config file in `echidna/config`. If we look at the contents of this yaml
# file, we see it tells the `Spectra` class to create a data structure to
# hold two parameters:
# * `energy_mc`, with lower limit 0, upper limit 10 and 1000 bins
# * `radial_mc`, with lower limit 0, upper limit 15000 and 1500 bins
# This config should be fine for us. We can load it using the
# `load_from_file` method of the `SpectraConfig` class:
# In[ ]:
import echidna
from echidna.core.config import SpectraConfig
config = SpectraConfig.load_from_file(
echidna.__echidna_base__ + "/echidna/config/spectra_example.yml")
print config.get_pars()
# Note we used the `__echidna_base__` member of the `echidna` module here.
# This module has two special members for denoting the base directory (the
# outermost directory of the git repository) and the home directory (the
# `echidna` directory inside the base directory. The following lines show
# the current location of these directories:
# In[ ]:
print echidna.__echidna_base__
print echidna.__echidna_home__
# Finally before creating the spectrum, we should define the number of
# events it should represent:
# In[ ]:
num_decays = 1000
# In[ ]:
spectrum = spectra.Spectra("spectrum", num_decays, config)
print spectrum
# And there you have it, we've created a `Spectra` object.
# ## Filling the spectrum
# Ok, so we now have a spectrum, let's fill it with some events. We'll
# generate random energies from a Gaussian distribution and random positions
# from a Uniform distribution. Much of echidna is built using the `numpy`
# and `SciPy` packages and we will use them here to generate the random
# numbers. We'll also generate a third random number to simulate some form
# rudimentary detector efficiency.
# In[ ]:
# Import numpy
import numpy
# In[ ]:
# Generate random energies from a Gaussin with mean (mu) and sigma
# (sigma)
mu = 2.5 # MeV
sigma = 0.15 # MeV
# Generate random radial position from a Uniform distribution
outer_radius = 5997 # Radius of SNO+ AV
# Detector efficiency
efficiency = 0.9 # 90%
for event in range(num_decays):
energy = numpy.random.normal(mu, sigma)
radius = numpy.random.uniform(high=outer_radius)
event_detected = (numpy.random.uniform() < efficiency)
if event_detected: # Fill spectrum with values
spectrum.fill(energy_mc=energy, radial_mc=radius)
# This will have filled our `Spectra` class with the events. Make sure to
# use the exact parameter names that were printed out above, as kewyord
# arguments. To check we can now use the `sum` method. This returns the
# total number of events stored in the spectrum at a given time - the
# integral of the spectrum.
# In[ ]:
print spectrum.sum()
# The value returned by `sum`, should roughly equal:
# In[ ]:
print num_decays * efficiency
# We can also inspect the raw data structure. This is saved in the `_data`
# member of the `Spectra` class:
# In[ ]:
print spectrum._data
# <div class="alert alert-info">
# <strong>Note:</strong> you probably won't see any entries in the
# above. For large arrays, numpy only prints the first three and last
# three entries. Since our energy range is in the middle, all our events
# are in the `...` part at the moment. But we will see entries printed
# out later when we apply some cuts.
# </div>
# ## Plotting
# Another useful way to inspect the `Spectra` created is to plot it. Support
# is available within echidna to plot using either `ROOT` or `matplotlib`
# and there are some useful plotting functions available in the `plot` an
# `plot_root` modules.
# In[ ]:
import echidna.output.plot as plot
import echidna.output.plot_root as plot_root
# To plot the projection of the spectrum on the `energy_mc` axis:
# In[ ]:
fig1 = plot.plot_projection(spectrum, "energy_mc",
fig_num=1, show_plot=False)
# and to plot the projection on the `radial_mc` axis, this time using root:
# In[ ]:
plot_root.plot_projection(spectrum, "radial_mc", fig_num=2)
# We can also project onto two dimensions and plot a surface:
# In[ ]:
fig_3 = plot.plot_surface(spectrum, "energy_mc", "radial_mc",
fig_num=3, show_plot=False)
# ## Convolution and cuts
# The ability to smear the event, along a parameter axis, is built into
# echidna in the `smear` module. There are three classes in the module that
# allow us to create a smearer for different scenarios. There are two
# smearers for energy-based parameters, `EnergySmearRes` and
# `EnergySmearLY`, which allow smearing by energy resolution (e.g.
# $\frac{5\%}{\sqrt{(E[MeV])}}$ and light yield (e.g. 200 NHit/Mev)
# respectively. Then additionally the `RadialSmear` class handles smearing
# along the axis of any radial based parameter.
# We will go through an example of how to smear our spectrum by a fixed
# energy resolution of 5%. There are two main smearing algorithms: "weighted
# smear" and "random smear". The "random smear" algorithm takes each event
# in each bin and randomly assigns it a new energy from the Gaussian
# distribution for that bin - it is fast but not very accurate for low
# statistics. The "weighted smear" algorithm is slower but much more
# accurate, as re-weights each bin by taking into account all other nearby
# bins within a pre-defined range. We will use the "weighted smear" method
# in this example.
# First to speed the smearing process, we will apply some loose cuts.
# Although, fewer bins means faster smearing, you should be wary of cutting
# the spectrum too tightly before smearing as you may end up cutting bins
# that would have influenced the smearing. Cuts can be applied using the
# `shrink` method. (Confusingly there is also a `cut` method which is almost
# identical to the `shrink` method, but updates the number of events the
# spectrum represents, after the cut is applied. Unless you are sure this is
# what you want to do, it is probably better to use the `shrink` method.) To
# shrink over multiple parameters, it is best to construct a dictionary of
# `_low` and `_high` values for each parameter and then pass this to the
# shrink method.
# In[ ]:
shrink_dict = {"energy_mc_low": mu - 5.*sigma,
"energy_mc_high": mu + 5.*sigma,
"radial_mc_low": 0.0,
"radial_mc_high": 3500}
spectrum.shrink(**shrink_dict)
# Using the `sum` method, we can check to see how many events were cut.
# In[ ]:
print spectrum.sum()
# Import the smear class:
# In[ ]:
import echidna.core.smear as smear
# and create the smearer object.
# In[ ]:
smearer = smear.EnergySmearRes()
# By default the "weighted smear" method considers all bins within a $\pm
# 5\sigma$ range. For the sake of speed, we will reduce this to three here.
# Also set the energy resolution - 0.05 for 5%.
# In[ ]:
smearer.set_num_sigma(3)
smearer.set_resolution(0.05)
# To smear our original spectrum and create the new `Spectra` object
# `smeared_spectrum`:
# In[ ]:
smeared_spectrum = smearer.weighted_smear(spectrum)
# this should hopefully only take a couple of seconds.
# The following code shows how to make a simple script, using matplotlib, to
# overlay the original and smeared spectra.
# In[ ]:
def overlay_spectra(original, smeared,
dimension="energy_mc", fig_num=1):
""" Overlay original and smeared spectra.
Args:
original (echidna.core.spectra.Spectra): Original spectrum.
smeared (echidna.core.spectra.Spectra): Smeared spectrum.
dimension (string, optional): Dimension to project onto.
Default is "energy_mc".
fignum (int, optional): Figure number, if producing multiple
figures. Default is 1.
Returns:
matplotlib.figure.Figure: Figure showing overlaid spectra.
"""
par = original.get_config().get_par(dimension)
# Define array of bin boundarie
bins = par.get_bin_boundaries()
# Define array of bin centres
x = par.get_bin_centres()
# Save bin width
width = par.get_width()
# Create figure and axes
fig, ax = plt.subplots(num=fig_num)
# Overlay two spectra using projection as weight
ax.hist(x, bins, weights=original.project(dimension),
histtype="stepfilled", color="RoyalBlue",
alpha=0.5, label=original._name)
ax.hist(x, bins, weights=smeared.project(dimension),
histtype="stepfilled", color="Red",
alpha=0.5, label=smeared._name)
# Add label/style
plt.legend(loc="upper right")
plt.ylim(ymin=0.0)
plt.xlabel(dimension + " [" + par.get_unit() + "]")
plt.ylabel("Events per " + str(width) +
" " + par.get_unit() + " bin")
return fig
# In[ ]:
fig_4 = overlay_spectra(spectrum, smeared_spectrum, fig_num=4)
# ## Other spectra manipulations
# We now have a nice smeared version of our original spectrum. To prepare
# the spectrum for a final analysis there are a few final manipulations we
# may wish to do.
# ### Region of Interest (ROI)
# There is a special version of the `shrink` method called `shrink_to_roi`
# that can be used for ROI cuts. It saves some useful information about the
# ROI in the `Spectra` class instance, including the efficiency i.e.
# integral of spectrum after cut divided by integral of spectrum before cut.
# In[ ]:
# To get nice shape for rebinning
roi = (mu - 0.5*sigma, mu + 1.45*sigma)
smeared_spectrum.shrink_to_roi(roi[0], roi[1], "energy_mc")
print smeared_spectrum.get_roi("energy_mc")
# ### Rebin
# Our spectrum is still quite finely binned, perhaps we want to bin it in 50
# keV bins instead of 10 keV bins. The `rebin` method can be used to acheive
# this.
# The `rebin` method requires us to specify the new shape (tuple) of the
# data. With just two dimensions this is trivial, but with more dimensions,
# it may be better to use a construct such as:
# In[ ]:
dimension = smeared_spectrum.get_config().get_pars().index("energy_mc")
old_shape = smeared_spectrum._data.shape
reduction_factor = 5 # how many bins to combine into a single bin
new_shape = tuple([j / reduction_factor if i == dimension else j
for i, j in enumerate(old_shape)])
print old_shape
print new_shape
# In[ ]:
smeared_spectrum.rebin(new_shape)
# ### Scaling
# Finally, we "simulated" 1000 events, but we most likely want to scale this
# down for to represent the number of events expected in our analysis. The
# `Spectra` class has a `scale` method to accomplish this. Remember that the
# `scale` method should always be supplied with the number of events the
# full spectrum (i.e. before any cuts using `shrink` or `shrink_to_roi`)
# should represent. Lets assume that our spectrum should actually represent
# 104.25 events:
# In[ ]:
smeared_spectrum.scale(104.25)
print smeared_spectrum.sum()
# ## Putting it all together
# After creating, filling, convolving and various other manipulations what
# does our final spectrum look like?
# In[ ]:
print smeared_spectrum._data
# In[ ]:
fig_5 = plot.plot_projection(smeared_spectrum, "energy_mc",
fig_num=5, show_plot=False)
plt.show()
|
arushanova/echidna
|
echidna/scripts/tutorials/getting_started.py
|
Python
|
mit
| 15,553
|
[
"Gaussian"
] |
2d3d84b42ae1a109eb0414f7576c07254bb67cf735bef3f21edc079eceeada3e
|
import h5py
import numpy as np
from scipy.optimize import curve_fit
from upho.analysis.functions import FittingFunctionFactory
from upho.irreps.irreps import extract_degeneracy_from_ir_label
__author__ = 'Yuji Ikeda'
class SFFitter:
def __init__(self, filename='sf.hdf5', name='gaussian'):
self._name = name
with h5py.File(filename, 'r') as f:
self._band_data = f
self._run()
def _run(self):
band_data = self._band_data
npaths, npoints = band_data['paths'].shape[:2]
frequencies = band_data['frequencies']
frequencies = np.array(frequencies)
self._is_squared = np.array(band_data['is_squared'])
filename_sf = 'sf_fit.hdf5'
with h5py.File(filename_sf, 'w') as f:
self.print_header(f)
for ipath in range(npaths):
for ip in range(npoints):
print(ipath, ip)
group = '{}/{}/'.format(ipath, ip)
peak_positions, widths, norms, fiterrs, sf_fittings = (
self._fit_spectral_functions(
frequencies,
point_data=band_data[group],
)
)
self._write(f, group, peak_positions, widths, norms, fiterrs, sf_fittings)
def _fit_spectral_functions(self, frequencies, point_data, prec=1e-6):
partial_sf_s = point_data['partial_sf_s']
num_irreps = np.array(point_data['num_irreps'])
dfreq = frequencies[1] - frequencies[0]
fitting_function = FittingFunctionFactory(
name=self._name,
is_normalized=False).create()
peak_positions = []
widths = []
norms = []
fiterrs = []
sf_fittings = []
for i in range(num_irreps):
sf = partial_sf_s[:, i]
if np.sum(sf) < prec:
peak_position = np.nan
width = np.nan
norm = np.nan
fiterr = np.nan
sf_fitting = np.full(frequencies.shape, np.nan)
else:
peak_position = self._create_initial_peak_position(frequencies, sf)
width = self._create_initial_width()
if self._is_squared:
norm = self._create_initial_norm(frequencies, sf)
else:
ir_label = str(point_data['ir_labels'][i], encoding='ascii')
norm = float(extract_degeneracy_from_ir_label(ir_label))
def f(x, p, w):
return fitting_function(x, p, w, norm)
p0 = [peak_position, width]
maxfev = create_maxfev(p0)
fit_params, pcov = curve_fit(
f, frequencies, sf, p0=p0, maxfev=maxfev)
fiterr = np.sqrt(np.sum((f(frequencies, *fit_params) - sf) ** 2)) * dfreq
peak_position = fit_params[0]
width = fit_params[1]
norm = fit_params[2] if len(fit_params) == 3 else norm
sf_fitting = f(frequencies, *fit_params)
peak_positions.append(peak_position)
widths .append(width)
norms .append(norm)
fiterrs.append(fiterr)
sf_fittings.append(sf_fitting)
peak_positions = np.array(peak_positions)
widths = np.array(widths)
norms = np.array(norms)
fiterrs = np.asarray(fiterrs)
sf_fittings = np.asarray(sf_fittings)
return peak_positions, widths, norms, fiterrs, sf_fittings
def _create_initial_peak_position(self, frequencies, sf, prec=1e-12):
position = frequencies[np.argmax(sf)]
# "curve_fit" does not work well for extremely small initial guess.
# To avoid this problem, "position" is rounded.
# See also "http://stackoverflow.com/questions/15624070"
if abs(position) < prec:
position = 0.0
return position
def _create_initial_width(self):
width = 0.1
return width
def _create_initial_norm(self, frequencies, sf):
dfreq = frequencies[1] - frequencies[0]
norm = np.sum(sf) * dfreq
return norm
def print_header(self, file_output):
file_output.create_dataset('function' , data=self._name)
file_output.create_dataset('is_squared', data=self._is_squared)
file_output.create_dataset('paths' , data=self._band_data['paths'])
file_output['frequencies'] = self._band_data['frequencies'][...]
def _write(self, file_out, group_name, peak_positions_s, widths_s, norms_s, fiterrs, sf_fittings):
group = file_out.create_group(group_name)
keys = [
'natoms_primitive',
'elements',
'distance',
'pointgroup_symbol',
'num_irreps',
'ir_labels',
]
for k in keys:
file_out.create_dataset(
group_name + k, data=np.array(self._band_data[group_name + k])
)
group.create_dataset('peaks_s', data=peak_positions_s)
group.create_dataset('widths_s', data=widths_s)
group.create_dataset('norms_s', data=norms_s)
group['fitting_errors'] = fiterrs
group['partial_sf_s'] = sf_fittings
group['total_sf'] = np.nansum(sf_fittings, axis=0)
def create_maxfev(p0):
maxfev = 20000 * (len(p0) + 1)
return maxfev
|
yuzie007/upho
|
upho/phonon/sf_fitter.py
|
Python
|
mit
| 5,575
|
[
"Gaussian"
] |
171ced1e08e71c7f423ccbf9c2c84719d85b3290ded3da5fca7b7a88b25af625
|
#!/usr/bin/env python
"""Run signal-to-reference alignments
"""
from __future__ import print_function
import pandas as pd
from signalAlignLib import *
from alignmentAnalysisLib import CallMethylation, get_first_sequence
from variantCallingLib import scan_for_proposals
from multiprocessing import Process, Queue, current_process, Manager
from serviceCourse.file_handlers import FolderHandler
from argparse import ArgumentParser
from random import shuffle
from shutil import copyfile
from operator import itemgetter
STEP = 6
def parse_args():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--file_directory', '-d', action='store',
dest='files_dir', required=True, type=str, default=None,
help="directory with MinION fast5 reads to align")
parser.add_argument('--ref', '-r', action='store',
dest='ref', required=True, type=str,
help="reference sequence to align to, in FASTA")
parser.add_argument('--in_template_hmm', '-T', action='store', dest='in_T_Hmm',
required=False, type=str, default=None,
help="input HMM for template events, if you don't want the default")
parser.add_argument('--in_complement_hmm', '-C', action='store', dest='in_C_Hmm',
required=False, type=str, default=None,
help="input HMM for complement events, if you don't want the default")
parser.add_argument('--templateHDP', '-tH', action='store', dest='templateHDP', default=None,
help="template serialized HDP file")
parser.add_argument('--complementHDP', '-cH', action='store', dest='complementHDP', default=None,
help="complement serialized HDP file")
parser.add_argument('--degenerate', '-x', action='store', dest='degenerate', default="variant",
help="Specify degenerate nucleotide options: "
"variant -> {ACGT}, twoWay -> {CE} threeWay -> {CEO}")
parser.add_argument('--stateMachineType', '-smt', action='store', dest='stateMachineType', type=str,
default="threeState", help="decide which model to use, threeState by default")
parser.add_argument('--threshold', '-t', action='store', dest='threshold', type=float, required=False,
default=None, help="posterior match probability threshold, Default: 0.01")
parser.add_argument('--diagonalExpansion', '-e', action='store', dest='diag_expansion', type=int,
required=False, default=None, help="number of diagonals to expand around each anchor")
parser.add_argument('--constraintTrim', '-m', action='store', dest='constraint_trim', type=int,
required=False, default=None, help='amount to remove from an anchor constraint')
parser.add_argument('--target_regions', '-q', action='store', dest='target_regions', type=str,
required=False, default=None, help="tab separated table with regions to align to")
parser.add_argument('---un-banded', '-ub', action='store_false', dest='banded',
default=True, help='flag, turn off banding')
parser.add_argument('--jobs', '-j', action='store', dest='nb_jobs', required=False,
default=4, type=int, help="number of jobs to run concurrently")
parser.add_argument('--nb_files', '-n', action='store', dest='nb_files', required=False,
default=500, type=int, help="maximum number of reads to align")
# todo help string
parser.add_argument('--cycles', dest='cycles', default=1, required=False, type=int)
parser.add_argument('--output_location', '-o', action='store', dest='out',
required=True, type=str, default=None,
help="directory to put the alignments")
# todo help string
parser.add_argument('--corrected', dest='corrected', required=False, default='corrected.fa')
args = parser.parse_args()
return args
def group_sites_in_window2(sites, window=6):
def collect_group(start):
i = start
g = [sites[start]]
while sites[i + 1] - sites[i] < window:
g.append(sites[i + 1])
i += 1
if len(sites) <= i + 1:
break
return g, i + 1
sites.sort()
groups = []
i = 0
while i + 1 < len(sites):
g, i = collect_group(i)
groups.append(g)
return groups
def make_degenerate_reference(input_fasta, start, forward_sequence_path, backward_sequence_path,
block_size=1, step=6):
"""
input_sequence: string, input nucleotide sequence
out_path: string, path to directory to put new sequences with substituted degenerate characters
block_size: not implemented
step: number of bases between degenerate characters
:return (subbed sequence, complement subbed sequence)
"""
input_sequence = get_first_sequence(input_fasta)
complement_sequence = reverse_complement(dna=input_sequence, reverse=False, complement=True)
t_seq = list(input_sequence)
c_seq = list(complement_sequence)
positions = xrange(start, len(input_sequence), step)
for position in positions:
t_seq[position] = "X"
c_seq[position] = "X"
t_seq = ''.join(t_seq)
c_seq = ''.join(c_seq)
sequence_length = len(t_seq)
with open(forward_sequence_path, 'w') as f:
f.write("{seq}".format(seq=t_seq))
with open(backward_sequence_path, 'w') as f:
f.write("{seq}".format(seq=c_seq))
return True, sequence_length
def aligner(work_queue, done_queue):
try:
for f in iter(work_queue.get, 'STOP'):
alignment = SignalAlignment(**f)
alignment.run()
except Exception, e:
done_queue.put("%s failed with %s" % (current_process().name, e.message))
def run_methyl_caller(work_queue, done_queue):
try:
for f in iter(work_queue.get, 'STOP'):
c = CallMethylation(**f)
c.write()
except Exception, e:
done_queue.put("%s failed with %s" % (current_process().name, e.message))
def load_data(file_path):
data = pd.read_table(file_path,
usecols=(0, 1, 2, 3, 4, 5, 6),
names=['site', 'strand', 'pA', 'pC', 'pG', 'pT', 'read'],
dtype={'site': np.int64,
'strand': np.str,
'pC': np.float64,
'pmC': np.float64,
'phmC': np.float64,
'read': np.str,
})
return data
def symbol_to_base(symbol):
return ["A", "C", "G", "T"][symbol]
def rc_probs(probs):
return [probs[3], probs[2], probs[1], probs[0]]
def update_reference(data, reference_sequence, min_depth=0, get_sites=False):
d = load_data(data)
ref = get_first_sequence(reference_sequence)
ref = list(ref)
candidate_sites = []
add_to_candidates = candidate_sites.append
for g, x in d.groupby("site"):
marginal_forward_p = pd.Series(0, ['pA', 'pC', 'pG', 'pT'])
marginal_backward_p = pd.Series(0, ['pA', 'pC', 'pG', 'pT'])
assert(len(x['site'].unique()) == 1)
site = x['site'].unique()[0]
if len(x['read']) < min_depth:
continue
for i, read in x.iterrows():
if ((read['read'].endswith(".forward.tsv") and read['strand'] == 't') or
(read['read'].endswith(".backward.tsv") and read['strand'] == 'c')):
direction = True
else:
direction = False
if direction:
marginal_forward_p += read[['pA', 'pC', 'pG', 'pT']]
else:
marginal_backward_p += read[['pA', 'pC', 'pG', 'pT']]
marginal_prob = marginal_forward_p + rc_probs(marginal_backward_p)
normed_marginal_probs = marginal_prob.map(lambda x: x / sum(marginal_prob))
called_base = normed_marginal_probs.argmax()[1]
#called_base = marginal_prob.map(lambda x: x / sum(marginal_prob)).argmax()[1]
if called_base != ref[site]:
if get_sites is False:
print("Changing {orig} to {new} at {site}".format(orig=ref[site], new=called_base, site=site))
ref[site] = called_base
else:
print("Proposing edit at {site} from {orig} to {new}, \n{probs}"
"".format(orig=ref[site], new=called_base, site=site, probs=normed_marginal_probs))
difference = normed_marginal_probs.max() - normed_marginal_probs["p" + ref[site]]
print(difference)
add_to_candidates((site, difference))
if get_sites is True:
return candidate_sites
else:
return ''.join(ref)
def main(args):
# parse args
args = parse_args()
command_line = " ".join(sys.argv[:])
print("Command Line: {cmdLine}\n".format(cmdLine=command_line), file=sys.stderr)
start_message = """
# Starting BonnyDoon Error-Correction
# Aligning files from: {fileDir}
# Aligning to reference: {reference}
# Aligning maximum of {nbFiles} files
# Using model: {model}
# Using banding: {banding}
# Aligning to regions in: {regions}
# Non-default template HMM: {inThmm}
# Non-default complement HMM: {inChmm}
# Template HDP: {tHdp}
# Complement HDP: {cHdp}
""".format(fileDir=args.files_dir, reference=args.ref, nbFiles=args.nb_files, banding=args.banded,
inThmm=args.in_T_Hmm, inChmm=args.in_C_Hmm, model=args.stateMachineType, regions=args.target_regions,
tHdp=args.templateHDP, cHdp=args.complementHDP)
print(start_message, file=sys.stdout)
# cull the MinION files
fast5s = cull_fast5_files(args.files_dir, args.nb_files)
# get the (input) reference sequence
if not os.path.isfile(args.ref):
print("Did not find valid reference file", file=sys.stderr)
sys.exit(1)
reference_sequence_path = args.ref
# unpack the reference sequence
reference_sequence_string = get_first_sequence(reference_sequence_path)
# make a working folder in the specified directory
temp_folder = FolderHandler()
temp_dir_path = temp_folder.open_folder(args.out + "tempFiles_errorCorrection")
# index the reference for bwa this is a string with the path to the index
bwa_ref_index = get_bwa_index(reference_sequence_path, temp_dir_path)
# alignment args are the parameters to the HMM/HDP model, and don't change
alignment_args = {
"path_to_EC_refs": None,
"destination": temp_dir_path,
"stateMachineType": args.stateMachineType,
"bwa_index": bwa_ref_index,
"in_templateHmm": args.in_T_Hmm,
"in_complementHmm": args.in_C_Hmm,
"in_templateHdp": args.templateHDP,
"in_complementHdp": args.complementHDP,
"banded": args.banded,
"sparse_output": True,
"threshold": args.threshold,
"diagonal_expansion": args.diag_expansion,
"constraint_trim": args.constraint_trim,
"target_regions": None,
"degenerate": degenerate_enum(args.degenerate),
}
# get the sites that have proposed edits
proposals = scan_for_proposals(temp_folder, STEP, reference_sequence_string, fast5s, alignment_args, args.nb_jobs)
proposals = group_sites_in_window2([x[0] for x in proposals], 6)
return
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
ArtRand/signalAlign
|
scripts/bonnyDoon.py
|
Python
|
mit
| 11,702
|
[
"BWA"
] |
53f4ffcfa6112d276041fba3e168b31d33c921291a1c5b368658482112fc86c2
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
ReconstructLine
QGIS tool to reconstruct linear features from points
The main idea of the processing is to construct Hamiltonian path.
To do it we use 1-d Self Organising Map. It allows:
order data points
connect nearest points
-------------------
begin : 2015-03-23
git sha : $Format:%H$
copyright : (C) 2015 by NextGIS
email : info@nextgis.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import numpy as np
from tuner import Tuner
EPSILON = 0.00001 # A small number
class SOM1d():
'''1-d self organizing map for 2-dimmential inputs
'''
def __init__(self, data):
assert len(data.shape) == 2
self.z = np.array([complex(p[0], p[1]) for p in data])
self.x_avg = np.average(data[:, 0], axis=0)
self.y_avg = np.average(data[:, 1], axis=0)
self.x_std = np.std(data[:, 0], axis=0)
self.y_std = np.std(data[:, 1], axis=0)
ratio = 4.0/3.0 # (Number of SOM unit) / (Number of points)
self.w = np.zeros(int(data.shape[0]*ratio), dtype=np.complex)
@property
def size(self):
return self.w.shape[0]
def _normalize(self):
self.z.real = (self.z.real - self.x_avg)/self.x_std
self.z.imag = (self.z.imag - self.y_avg)/self.y_std
def _denormalyze(self):
self.z.real = self.x_std*self.z.real + self.x_avg
self.z.imag = self.y_std*self.z.imag + self.y_avg
self.w.real = self.x_std*self.w.real + self.x_avg
self.w.imag = self.y_std*self.w.imag + self.y_avg
def _distances(self, point):
'''Return array of Euclidean distances between self.w and point
'''
diff = self.w - point
return abs(diff)
def _BMU_idx(self, point):
'''Return index of best matching unit pf the point
'''
dists = self._distances(point)
return np.argmin(dists)
def _gaussian(self, c, sigma):
""" Returns a Gaussian centered in c """
d = 2*np.pi * sigma**2
dists = range(self.size)-c
ax = np.exp(-np.power(dists, 2)/d)
return ax
def _update(self, sigma):
data = np.random.permutation(self.z)
for point in data:
bmu = self._BMU_idx(point)
delta = (point - self.w[bmu])
bubble = self._gaussian(bmu, sigma)
delta = delta * bubble
self.w += delta
def _train(self,rlen, lrate=0.99, sigma_init=5.0):
sigma = sigma_init
for t in range(rlen):
sigma = sigma * lrate
if sigma < EPSILON:
break
self._update(sigma)
def connect(self):
# train SOM
self._normalize()
self._train(self.size*100, lrate=0.99, sigma_init=self.size)
self._train(self.size*250, lrate=0.99, sigma_init=2)
self._denormalyze()
ordered = {}
for point_id in range(len(self.z)):
bmu = self._BMU_idx(self.z[point_id])
try:
ordered[bmu].append(point_id)
except KeyError:
ordered[bmu] = [point_id]
order = []
for i in range(self.size):
try:
pnts = ordered[i]
if len(pnts) != 1:
for point_id in pnts:
order.append(point_id)
else:
order.append(pnts[0])
except KeyError:
# It's Ok, if self.size > len(self.z)
pass
# Some shapes of points (eg., angles) are difficult for SOM.
# Use a little of postprocessing for tuning
tuner = Tuner(self.z)
order = tuner.reorder(order)
result = np.take(self.z, order)
result = np.array([[z.real, z.imag] for z in result])
return [result]
if __name__ == "__main__":
data = np.array([
[-0.1, 0],
[0, 1.1],
[0, 2.03],
[0, 3.2],
[0, 4.1],
[0, 5.2],
[0, 6.02],
[1, 5.1],
[1, 4.03],
[3, 1.5],
[2, 2.01],
[1, 2],
[1, 3]
])
data1 = np.array([
[34.773262991,52.656898974],
[34.77316903,52.656709962],
[34.772321032,52.656871984],
[34.771443028,52.657045992],
[34.770453963,52.657227963],
[34.76949499,52.657386968],
[34.768617991,52.657537004],
[34.767743004,52.657693997],
[34.766845973,52.65785099],
[34.765947014,52.65802701],
[34.765082002,52.658178974],
[34.764174996,52.65833999],
[34.763295986,52.658496983],
[34.762430973,52.658654982],
[34.761546012,52.658806024],
[34.760566,52.658975003],
[34.759678021,52.659143982],
[34.75881502,52.659289995],
[34.757920001,52.659457969],
[34.757035039,52.659610016],
[34.756114036,52.659775978],
[34.755199989,52.659943029],
[34.75427404,52.66009097],
[34.753378015,52.660273025],
[34.75253203,52.660425995],
[34.751642961,52.660590028],
[34.750703014,52.660750961],
[34.74987899,52.660898985],
[34.748970978,52.661063019],
[34.748048969,52.661219006],
[34.747141041,52.661388991],
[34.746245015,52.661553025],
[34.745294005,52.661717981],
[34.744335031,52.661794005],
[34.743413022,52.661860976],
[34.742446002,52.661921997],
[34.741450986,52.662007995],
[34.740509028,52.662063986],
[34.73963798,52.662112014],
[34.738820996,52.662183009],
[34.772797041,52.657160992],
[34.771933034,52.657704977],
[34.770236034,52.658766964],
[34.769355012,52.659326037],
[34.76850098,52.659875974],
[34.767625993,52.660431024],
[34.766791994,52.660960006],
[34.765842995,52.661571968],
[34.764949987,52.662127018],
[34.764039963,52.662702017],
[34.763292968,52.663179031],
[34.762440026,52.663719999],
[34.761606026,52.664241018],
[34.760725005,52.664806964],
[34.75988497,52.665335024],
[34.75904502,52.66585202],
[34.758163998,52.666408997],
[34.757231008,52.667002017],
[34.756441014,52.667503003],
[34.755566027,52.668050006],
[34.754646029,52.668644032],
[34.753865004,52.669138983],
[34.771071039,52.658235971]
])
data2 = np.array([
[34.746993016,52.706206022],
[34.747126959,52.706670966],
[34.747186974,52.707015965],
[34.747258974,52.70734001],
[34.747356037,52.707667993],
[34.747418985,52.706163023],
[34.747858029,52.706148019],
[34.747888036,52.706127986],
[34.748256002,52.706080042],
[34.74869404,52.70605104],
[34.748547021,52.704327973],
])
som = SOM1d(data2)
result = som.connect()
for l in result:
print 'l', l
result = result[0]
#~ import matplotlib.pyplot as plt
#~ plt.plot(som.z.real, som.z.imag, 'o',
#~ # som.w.real, som.w.imag, 'r-o',
#~ result[:, 0], result[:, 1], '-g')
#~ plt.show()
|
nextgis/ReconstructLine
|
src/connector.py
|
Python
|
gpl-2.0
| 8,060
|
[
"Gaussian"
] |
3e57e8a68a9182010cb974c990dc6ac9b66a391716e7e41c226178ea7cb4af32
|
""" The mind is a service the distributes "task" to executors
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pprint
import six
from DIRAC import gLogger
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR, isReturnStructure
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Utilities.ExecutorDispatcher import ExecutorDispatcher, ExecutorDispatcherCallbacks
class ExecutorMindHandler(RequestHandler):
MSG_DEFINITIONS = {'ProcessTask': {'taskId': six.integer_types,
'taskStub': six.string_types + (str, ),
'eType': six.string_types + (str, )},
'TaskDone': {'taskId': six.integer_types,
'taskStub': six.string_types + (str, )},
'TaskFreeze': {'taskId': six.integer_types + (str, ),
'taskStub': six.string_types + (str, ),
'freezeTime': six.integer_types},
'TaskError': {'taskId': six.integer_types,
'errorMsg': six.string_types + (str, ),
'taskStub': six.string_types + (str, ),
'eType': six.string_types + (str, )},
'ExecutorError': {'taskId': six.integer_types,
'errorMsg': six.string_types + (str, ),
'eType': six.string_types + (str, )}}
class MindCallbacks(ExecutorDispatcherCallbacks):
def __init__(self, sendTaskCB, dispatchCB, disconnectCB, taskProcCB, taskFreezeCB, taskErrCB):
self.__sendTaskCB = sendTaskCB
self.__dispatchCB = dispatchCB
self.__disconnectCB = disconnectCB
self.__taskProcDB = taskProcCB
self.__taskFreezeCB = taskFreezeCB
self.__taskErrCB = taskErrCB
self.__allowedClients = []
def cbSendTask(self, taskId, taskObj, eId, eType):
return self.__sendTaskCB(taskId, taskObj, eId, eType)
def cbDispatch(self, taskId, taskObj, pathExecuted):
return self.__dispatchCB(taskId, taskObj, pathExecuted)
def cbDisconectExecutor(self, eId):
return self.__disconnectCB(eId)
def cbTaskError(self, taskId, taskObj, errorMsg):
return self.__taskErrCB(taskId, taskObj, errorMsg)
def cbTaskProcessed(self, taskId, taskObj, eType):
return self.__taskProcDB(taskId, taskObj, eType)
def cbTaskFreeze(self, taskId, taskObj, eType):
return self.__taskFreezeCB(taskId, taskObj, eType)
###
# End of callbacks
###
@classmethod
def initializeHandler(cls, serviceInfoDict):
gLogger.notice("Initializing Executor dispatcher")
cls.__eDispatch = ExecutorDispatcher(cls.srv_getMonitor())
cls.__callbacks = ExecutorMindHandler.MindCallbacks(cls.__sendTask,
cls.exec_dispatch,
cls.__execDisconnected,
cls.exec_taskProcessed,
cls.exec_taskFreeze,
cls.exec_taskError)
cls.__eDispatch.setCallbacks(cls.__callbacks)
cls.__allowedClients = []
if cls.log.shown("VERBOSE"):
gThreadScheduler.setMinValidPeriod(1)
gThreadScheduler.addPeriodicTask(
10, lambda: cls.log.verbose(
"== Internal state ==\n%s\n===========" %
pprint.pformat(
cls.__eDispatch._internals())))
return S_OK()
@classmethod
def setAllowedClients(cls, aClients):
if not isinstance(aClients, (list, tuple)):
aClients = (aClients, )
cls.__allowedClients = aClients
@classmethod
def __sendTask(self, taskId, taskObj, eId, eType):
try:
result = self.exec_prepareToSend(taskId, taskObj, eId)
if not result['OK']:
return result
except Exception as excp:
gLogger.exception("Exception while executing prepareToSend: %s" % str(excp), lException=excp)
return S_ERROR("Cannot presend task")
try:
result = self.exec_serializeTask(taskObj)
except Exception as excp:
gLogger.exception("Exception while serializing task %s" % taskId, lException=excp)
return S_ERROR("Cannot serialize task %s: %s" % (taskId, str(excp)))
if not isReturnStructure(result):
raise Exception("exec_serializeTask does not return a return structure")
if not result['OK']:
return result
taskStub = result['Value']
result = self.srv_msgCreate("ProcessTask")
if not result['OK']:
return result
msgObj = result['Value']
msgObj.taskId = taskId
msgObj.taskStub = taskStub
msgObj.eType = eType
return self.srv_msgSend(eId, msgObj)
@classmethod
def __execDisconnected(cls, trid):
result = cls.srv_disconnectClient(trid)
if not result['OK']:
return result
return cls.exec_executorDisconnected(trid)
auth_conn_new = ['all']
def conn_new(self, trid, identity, kwargs):
if 'executorTypes' in kwargs and kwargs['executorTypes']:
return S_OK()
for aClient in self.__allowedClients:
if aClient in kwargs and kwargs[aClient]:
return S_OK()
return S_ERROR("Only executors are allowed to connect")
auth_conn_connected = ['all']
def conn_connected(self, trid, identity, kwargs):
for aClient in self.__allowedClients:
if aClient in kwargs and kwargs[aClient]:
return S_OK()
try:
numTasks = max(1, int(kwargs['maxTasks']))
except Exception:
numTasks = 1
self.__eDispatch.addExecutor(trid, kwargs['executorTypes'])
return self.exec_executorConnected(trid, kwargs['executorTypes'])
auth_conn_drop = ['all']
def conn_drop(self, trid):
self.__eDispatch.removeExecutor(trid)
return S_OK()
auth_msg_TaskDone = ['all']
def msg_TaskDone(self, msgObj):
taskId = msgObj.taskId
try:
result = self.exec_deserializeTask(msgObj.taskStub)
except Exception as excp:
gLogger.exception("Exception while deserializing task %s" % taskId, lException=excp)
return S_ERROR("Cannot deserialize task %s: %s" % (taskId, str(excp)))
if not isReturnStructure(result):
raise Exception("exec_deserializeTask does not return a return structure")
if not result['OK']:
return result
taskObj = result['Value']
result = self.__eDispatch.taskProcessed(self.srv_getTransportID(), msgObj.taskId, taskObj)
if not result['OK']:
gLogger.error("There was a problem processing task", "%s: %s" % (taskId, result['Message']))
return S_OK()
auth_msg_TaskFreeze = ['all']
def msg_TaskFreeze(self, msgObj):
taskId = msgObj.taskId
try:
result = self.exec_deserializeTask(msgObj.taskStub)
except Exception as excp:
gLogger.exception("Exception while deserializing task %s" % taskId, lException=excp)
return S_ERROR("Cannot deserialize task %s: %s" % (taskId, str(excp)))
if not isReturnStructure(result):
raise Exception("exec_deserializeTask does not return a return structure")
if not result['OK']:
return result
taskObj = result['Value']
result = self.__eDispatch.freezeTask(self.srv_getTransportID(), msgObj.taskId,
msgObj.freezeTime, taskObj)
if not result['OK']:
gLogger.error("There was a problem freezing task", "%s: %s" % (taskId, result['Message']))
return S_OK()
auth_msg_TaskError = ['all']
def msg_TaskError(self, msgObj):
taskId = msgObj.taskId
try:
result = self.exec_deserializeTask(msgObj.taskStub)
except Exception as excp:
gLogger.exception("Exception while deserializing task %s" % taskId, lException=excp)
return S_ERROR("Cannot deserialize task %s: %s" % (taskId, str(excp)))
if not isReturnStructure(result):
raise Exception("exec_deserializeTask does not return a return structure")
if not result['OK']:
return result
taskObj = result['Value']
# TODO: Check the executor has privileges over the task
self.__eDispatch.removeTask(msgObj.taskId)
try:
self.exec_taskError(msgObj.taskId, taskObj, msgObj.errorMsg)
except Exception as excp:
gLogger.exception("Exception when processing task %s" % msgObj.taskId, lException=excp)
return S_OK()
auth_msg_ExecutorError = ['all']
def msg_ExecutorError(self, msgObj):
gLogger.info("Disconnecting executor by error: %s" % msgObj.errorMsg)
self.__eDispatch.removeExecutor(self.srv_getTransportID())
return self.srv_disconnect()
#######
# Utilities functions
#######
@classmethod
def getTaskIds(cls):
return cls.__eDispatch.getTaskIds()
@classmethod
def getExecutorsConnected(cls):
return cls.__eDispatch.getExecutorsConnected()
@classmethod
def setFailedOnTooFrozen(cls, value):
# If a task is frozen too many times, send error or forget task?
cls.__eDispatch.setFailedOnTooFrozen(value)
@classmethod
def setFreezeOnFailedDispatch(cls, value):
# If a task fails to properly dispatch, freeze it?
cls.__eDispatch.setFreezeOnFailedDispatch(value)
@classmethod
def setFreezeOnUnknownExecutor(cls, value):
# If a task needs to go to an executor that has not connected. Forget the task?
cls.__eDispatch.setFreezeOnUnknownExecutor(value)
#######
# Methods that can be overwritten
#######
@classmethod
def exec_executorDisconnected(cls, trid):
return S_OK()
@classmethod
def exec_executorConnected(cls, execName, trid):
return S_OK()
@classmethod
def exec_prepareToSend(cls, taskId, taskObj, eId):
return S_OK()
########
# Methods to be used by the real services
########
@classmethod
def executeTask(cls, taskId, taskObj):
return cls.__eDispatch.addTask(taskId, taskObj)
@classmethod
def forgetTask(cls, taskId):
return cls.__eDispatch.removeTask(taskId)
########
# Methods that need to be overwritten
########
@classmethod
def exec_dispatch(cls, taskId, taskObj, pathExecuted):
raise Exception("No exec_dispatch defined or it is not a classmethod!!")
@classmethod
def exec_serializeTask(cls, taskObj):
raise Exception("No exec_serializeTask defined or it is not a classmethod!!")
@classmethod
def exec_deserializeTask(cls, taskStub):
raise Exception("No exec_deserializeTask defined or it is not a classmethod!!")
@classmethod
def exec_taskError(cls, taskId, taskObj, errorMsg):
raise Exception("No exec_taskError defined or it is not a classmethod!!")
@classmethod
def exec_taskProcessed(cls, taskId, taskObj, eType):
raise Exception("No exec_taskProcessed defined or it is not a classmethod!!")
@classmethod
def exec_taskFreeze(cls, taskId, taskObj, eType):
return S_OK()
|
yujikato/DIRAC
|
src/DIRAC/Core/Base/ExecutorMindHandler.py
|
Python
|
gpl-3.0
| 11,068
|
[
"DIRAC"
] |
d0c682ad1aac9f2a0cc5d9fbcb3776f16cb760cef735418cd74041f57be738b3
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace for the operators not belonging to the official numpy package
used in Gluon dispatched by F=ndarray module."""
import numpy as _np
from .._internal import NDArrayBase
from . import _api_internal
from ...util import set_module
__all__ = ['softmax', 'log_softmax', 'masked_softmax', 'masked_log_softmax',
'activation', 'batch_norm', 'fully_connected', 'pick', 'convolution',
'deconvolution', 'pooling', 'dropout', 'one_hot', 'rnn', 'embedding',
'topk', 'layer_norm', 'leaky_relu', 'batch_dot', 'broadcast_like',
'arange_like', 'group_norm']
# pylint: disable=too-many-arguments
@set_module('mxnet.ndarray.numpy_extension')
def softmax(data, axis=-1, length=None, temperature=None, use_length=False, dtype=None):
r"""Applies the softmax function.
The resulting array contains elements in the range (0,1) and the elements along the given axis sum up to 1.
.. math::
softmax(\mathbf{z/t})_j = \frac{e^{z_j/t}}{\sum_{k=1}^K e^{z_k/t}}
for :math:`j = 1, ..., K`
t is the temperature parameter in softmax function. By default, t equals 1.0
Parameters
----------
data : NDArray
The input array.
axis : int, optional, default='-1'
The axis along which to compute softmax.
length : NDArray
The length array.
temperature : double or None, optional, default=None
Temperature parameter in softmax
dtype : {None, 'float16', 'float32', 'float64'},optional, default='None'
DType of the output in case this can't be inferred. Defaults to
the same as input's dtype if not defined (dtype=None).
use_length : boolean or None, optional, default=0
Whether to use the length input as a mask over the data input.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
Example
-------
>>> data = np.ones((2, 3))
>>> npx.softmax(data, axis=0)
array([[0.5, 0.5, 0.5],
[0.5, 0.5, 0.5]])
>>> npx.softmax(data, axis=1)
array([[0.33333334, 0.33333334, 0.33333334],
[0.33333334, 0.33333334, 0.33333334]])
"""
if dtype and not isinstance(dtype, str):
dtype = _np.dtype(dtype).name
if use_length:
assert length is not None, "Missing length input"
return _api_internal.softmax(data, length, axis, temperature, True, dtype)
else:
assert length is None, "Length input is not used"
return _api_internal.softmax(data, axis, temperature, False, dtype)
# pylint: disable=too-many-arguments
@set_module('mxnet.ndarray.numpy_extension')
def log_softmax(data, axis=-1, length=None, temperature=None, use_length=False, dtype=None):
r"""Computes the log softmax of the input.
This is equivalent to computing softmax followed by log.
Parameters
----------
data : NDArray
The input array.
axis : int, optional, default='-1'
The axis along which to compute softmax.
length : NDArray
The length array.
temperature : double or None, optional, default=None
Temperature parameter in softmax
dtype : {None, 'float16', 'float32', 'float64'},optional, default='None'
DType of the output in case this can't be inferred. Defaults to
the same as input's dtype if not defined (dtype=None).
use_length : boolean or None, optional, default=0
Whether to use the length input as a mask over the data input.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
Examples
--------
>>> data = np.array([1, 2, .1])
>>> npx.log_softmax(data)
array([-1.4170278, -0.4170278, -2.3170278])
>>> data = np.array([[1, 2, .1],[.1, 2, 1]])
>>> npx.log_softmax(data, axis=0)
array([[-0.34115386, -0.6931472 , -1.2411538 ],
[-1.2411538 , -0.6931472 , -0.34115386]])
"""
if dtype and not isinstance(dtype, str):
dtype = _np.dtype(dtype).name
if use_length:
assert length is not None, "Missing length input"
return _api_internal.log_softmax(data, length, axis, temperature, True, dtype)
else:
assert length is None, "Length input is not used"
return _api_internal.log_softmax(data, axis, temperature, False, dtype)
# pylint: disable=too-many-arguments
@set_module('mxnet.ndarray.numpy_extension')
def masked_softmax(data, mask, axis=-1, temperature=1.0, normalize=True):
r"""Applies the softmax function masking elements according to the mask provided
Parameters
----------
data : NDArray
The input array.
mask : NDArray
Mask to apply.
axis : int, optional, default='-1'
The axis along which to compute softmax.
temperature : double or None, optional, default=None
Temperature parameter in softmax
normalize : boolean or None, optional, default=1
Whether to normalize input data x: x = x - max(x)
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
Examples
--------
>>> data = np.arange(5)
>>> mask = np.array([1, 0, 1, 0, 1])
>>> npx.masked_softmax(data, mask)
array([0.01587624, 0. , 0.11731042, 0. , 0.8668133 ])
>>> data = np.arange(10).reshape((2, 5))
>>> npx.masked_softmax(data, mask, axis=0)
array([[0.00669285, 0. , 0.00669285, 0. , 0.00669285],
[0.9933072 , 0. , 0.9933072 , 0. , 0.9933072 ]])
"""
assert data is not None and mask is not None, "Missing input data and mask"
return _api_internal.masked_softmax(data, mask, axis, temperature, normalize)
# pylint: disable=too-many-arguments
@set_module('mxnet.ndarray.numpy_extension')
def masked_log_softmax(data, mask, axis=-1, temperature=1.0, normalize=True):
r"""Computes the masked log softmax of the input.
This is equivalent to computing masked softmax followed by log.
Parameters
----------
data : NDArray
The input array.
mask : NDArray
Mask to apply.
axis : int, optional, default='-1'
The axis along which to compute softmax.
temperature : double or None, optional, default=None
Temperature parameter in softmax
normalize : boolean or None, optional, default=1
Whether to normalize input data x: x = x - max(x)
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
Examples
--------
>>> data = np.arange(5)
>>> mask = np.array([1, 0, 1, 0, 1])
>>> npx.masked_log_softmax(data, mask)
array([-4.1429286 , -inf, -2.1429286 , -inf, -0.14292854])
>>> data = np.arange(10).reshape((2, 5))
>>> npx.masked_log_softmax(data, mask, axis=0)
array([[-5.0067153 , -inf, -5.0067153 , -inf, -5.0067153 ],
[-0.00671535, -inf, -0.00671535, -inf, -0.00671535]])
"""
assert data is not None and mask is not None, "Missing input data and mask"
return _api_internal.masked_log_softmax(data, mask, axis, temperature, normalize)
# pylint: disable=too-many-arguments, unused-argument
@set_module('mxnet.ndarray.numpy_extension')
def activation(data, act_type='relu', **kwargs):
r"""Applies an activation function element-wise to the input.
The following activation functions are supported:
- `log_sigmoid`: :math:`y = log(\frac{1}{1 + exp(-x)})`
- `mish`: :math:`y = x * tanh(log(1 + exp(x)))`
- `relu`: Rectified Linear Unit, :math:`y = max(x, 0)`
- `sigmoid`: :math:`y = \frac{1}{1 + exp(-x)}`
- `tanh`: Hyperbolic tangent, :math:`y = \frac{exp(x) - exp(-x)}{exp(x) + exp(-x)}`
- `softrelu`: Soft ReLU, or SoftPlus, :math:`y = log(1 + exp(x))`
- `softsign`: :math:`y = \frac{x}{1 + abs(x)}`
Parameters
----------
data : NDArray
The input array.
act_type : {'log_sigmoid', 'mish', 'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required
Activation function to be applied.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
"""
return _api_internal.activation(data, act_type)
# pylint: disable=too-many-arguments, unused-argument
@set_module('mxnet.ndarray.numpy_extension')
def batch_norm(x, gamma, beta, running_mean, running_var, eps=1e-3, momentum=0.9,
fix_gamma=True, use_global_stats=False, output_mean_var=False, axis=1,
cudnn_off=False, min_calib_range=None, max_calib_range=None, **kwargs):
r"""Batch normalization.
Normalizes a data batch by mean and variance, and applies a scale ``gamma`` as
well as offset ``beta``.
Assume the input has more than one dimension and we normalize along axis 1.
We first compute the mean and variance along this axis:
.. math::
data\_mean[i] = mean(data[:,i,:,...]) \\
data\_var[i] = var(data[:,i,:,...])
Then compute the normalized output, which has the same shape as input, as following:
.. math::
out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}} * gamma[i] + beta[i]
Both *mean* and *var* returns a scalar by treating the input as a vector.
Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta``
have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and
the inverse of ``data_var``, which are needed for the backward pass. Note that gradient of these
two outputs are blocked.
Besides the inputs and the outputs, this operator accepts two auxiliary
states, ``moving_mean`` and ``moving_var``, which are *k*-length
vectors. They are global statistics for the whole dataset, which are updated
by::
moving_mean = moving_mean * momentum + data_mean * (1 - momentum)
moving_var = moving_var * momentum + data_var * (1 - momentum)
If ``use_global_stats`` is set to be true, then ``moving_mean`` and
``moving_var`` are used instead of ``data_mean`` and ``data_var`` to compute
the output. It is often used during inference.
The parameter ``axis`` specifies which axis of the input shape denotes
the 'channel' (separately normalized groups). The default is 1. Specifying -1 sets the channel
axis to be the last item in the input shape.
Both ``gamma`` and ``beta`` are learnable parameters. But if ``fix_gamma`` is true,
then set ``gamma`` to 1 and its gradient to 0.
.. Note::
When ``fix_gamma`` is set to True, no sparse support is provided. If ``fix_gamma is`` set to False,
the sparse tensors will fallback.
Parameters
----------
data : NDArray
Input data to batch normalization
gamma : NDArray
gamma array
beta : NDArray
beta array
moving_mean : NDArray
running mean of input
moving_var : NDArray
running variance of input
eps : double, optional, default=0.0010000000474974513
Epsilon to prevent div 0. Must be no less than CUDNN_BN_MIN_EPSILON
defined in cudnn.h when using cudnn (usually 1e-5)
momentum : float, optional, default=0.899999976
Momentum for moving average
fix_gamma : boolean, optional, default=1
Fix gamma while training
use_global_stats : boolean, optional, default=0
Whether use global moving statistics instead of local batch-norm.
This will force change batch-norm into a scale shift operator.
output_mean_var : boolean, optional, default=0
Output the mean and inverse std
axis : int, optional, default='1'
Specify which shape axis the channel is specified
cudnn_off : boolean, optional, default=0
Do not select CUDNN operator, if available
min_calib_range : float or None, optional, default=None
The minimum scalar value in the form of float32 obtained through calibration.
If present, it will be used to by quantized batch norm op to calculate primitive scale.
Note: this calib_range is to calib bn output.
max_calib_range : float or None, optional, default=None
The maximum scalar value in the form of float32 obtained through calibration.
If present, it will be used to by quantized batch norm op to calculate primitive scale.
Note: this calib_range is to calib bn output.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
"""
out = _api_internal.batch_norm(x, gamma, beta, running_mean, running_var, eps, momentum,
fix_gamma, use_global_stats, output_mean_var, axis,
cudnn_off, min_calib_range, max_calib_range)
if isinstance(out, NDArrayBase):
return out
return list(out)
# pylint: disable=too-many-arguments, unused-argument
@set_module('mxnet.ndarray.numpy_extension')
def fully_connected(x, weight, bias=None, num_hidden=None,
no_bias=True, flatten=True, **kwargs):
r"""Applies a linear transformation: :math:`Y = XW^T + b`.
If ``flatten`` is set to be true, then the shapes are:
- **data**: `(batch_size, x1, x2, ..., xn)`
- **weight**: `(num_hidden, x1 * x2 * ... * xn)`
- **bias**: `(num_hidden,)`
- **out**: `(batch_size, num_hidden)`
If ``flatten`` is set to be false, then the shapes are:
- **data**: `(x1, x2, ..., xn, input_dim)`
- **weight**: `(num_hidden, input_dim)`
- **bias**: `(num_hidden,)`
- **out**: `(x1, x2, ..., xn, num_hidden)`
The learnable parameters include both ``weight`` and ``bias``.
If ``no_bias`` is set to be true, then the ``bias`` term is ignored.
.. Note::
The sparse support for FullyConnected is limited to forward evaluation with `row_sparse`
weight and bias, where the length of `weight.indices` and `bias.indices` must be equal
to `num_hidden`. This could be useful for model inference with `row_sparse` weights
trained with importance sampling or noise contrastive estimation.
To compute linear transformation with 'csr' sparse data, sparse.dot is recommended instead
of sparse.FullyConnected.
Parameters
----------
data : NDArray
Input data.
weight : NDArray
Weight matrix.
bias : NDArray
Bias parameter.
num_hidden : int, required
Number of hidden nodes of the output.
no_bias : boolean, optional, default=0
Whether to disable bias parameter.
flatten : boolean, optional, default=1
Whether to collapse all but the first axis of the input data tensor.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
"""
assert num_hidden is not None, "Please provide number of hidden nodes"
if no_bias:
return _api_internal.fully_connected(x, weight, num_hidden, no_bias, flatten)
else:
assert bias is not None, "Missing bias parameter"
return _api_internal.fully_connected(x, weight, bias, num_hidden,
no_bias, flatten)
# pylint: disable=too-many-arguments
@set_module('mxnet.ndarray.numpy_extension')
def pick(data, index, axis=-1, mode='clip', keepdims=False):
r"""Picks elements from an input array according to the input indices along the given axis.
Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be
an output array of shape ``(i0,)`` with::
output[i] = input[i, indices[i]]
By default, if any index mentioned is too large, it is replaced by the index that addresses
the last element along an axis (the `clip` mode).
This function supports n-dimensional input and (n-1)-dimensional indices arrays.
Parameters
----------
data : NDArray
The input array
index : NDArray
The index array
axis : int or None, optional, default='-1'
int or None. The axis to picking the elements.
Negative values means indexing from right to left.
If is `None`, the elements in the index w.r.t the flattened input will be picked.
keepdims : boolean, optional, default=0
If true, the axis where we pick the elements is
left in the result as dimension with size one.
mode : {'clip', 'wrap'},optional, default='clip'
Specify how out-of-bound indices behave. Default is "clip".
"clip" means clip to the range. So, if all indices mentioned are too large,
they are replaced by the index that addresses the last element along an axis.
"wrap" means to wrap around.
out : NDArray, optional
The output NDArray to hold the result.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
Example
-------
>>> x = np.array([[1., 2.],[3., 4.],[5., 6.]])
picks elements with specified indices along axis 0
>>> npx.pick(x, np.array([0, 1]), 0)
array([1., 4.])
picks elements with specified indices along axis 1
>>> npx.pick(x, np.array([0, 1, 0]), 1)
array([1., 4., 5.])
picks elements with specified indices along axis 1 using 'wrap' mode
to place indicies that would normally be out of bounds
>>> npx.pick(x, np.array([2, -1, -2]), 1, mode='wrap')
array([1., 4., 5.])
picks elements with specified indices along axis 1 and dims are maintained
>>> npx.pick(x, np.array([[1.], [0.], [2.]]), 1, keepdims=True)
array([[2.],
[3.],
[6.]])
"""
return _api_internal.pick(data, index, axis, mode, keepdims)
# pylint: disable=too-many-arguments
@set_module('mxnet.ndarray.numpy_extension')
def convolution(data=None, weight=None, bias=None, kernel=None, stride=None, dilate=None,
pad=None, num_filter=1, num_group=1, workspace=1024, no_bias=False,
cudnn_tune=None, cudnn_off=False, layout=None):
r"""Compute *N*-D convolution on *(N+2)*-D input.
In the 2-D convolution, given input data with shape *(batch_size,
channel, height, width)*, the output is computed by
.. math::
out[n,i,:,:] = bias[i] + \sum_{j=0}^{channel} data[n,j,:,:] \star
weight[i,j,:,:]
where :math:`\star` is the 2-D cross-correlation operator.
For general 2-D convolution, the shapes are
- **data**: *(batch_size, channel, height, width)*
- **weight**: *(num_filter, channel, kernel[0], kernel[1])*
- **bias**: *(num_filter,)*
- **out**: *(batch_size, num_filter, out_height, out_width)*.
Define::
f(x,k,p,s,d) = floor((x+2*p-d*(k-1)-1)/s)+1
then we have::
out_height=f(height, kernel[0], pad[0], stride[0], dilate[0])
out_width=f(width, kernel[1], pad[1], stride[1], dilate[1])
If ``no_bias`` is set to be true, then the ``bias`` term is ignored.
The default data ``layout`` is *NCHW*, namely *(batch_size, channel, height,
width)*. We can choose other layouts such as *NWC*.
If ``num_group`` is larger than 1, denoted by *g*, then split the input ``data``
evenly into *g* parts along the channel axis, and also evenly split ``weight``
along the first dimension. Next compute the convolution on the *i*-th part of
the data with the *i*-th weight part. The output is obtained by concatenating all
the *g* results.
1-D convolution does not have *height* dimension but only *width* in space.
- **data**: *(batch_size, channel, width)*
- **weight**: *(num_filter, channel, kernel[0])*
- **bias**: *(num_filter,)*
- **out**: *(batch_size, num_filter, out_width)*.
3-D convolution adds an additional *depth* dimension besides *height* and
*width*. The shapes are
- **data**: *(batch_size, channel, depth, height, width)*
- **weight**: *(num_filter, channel, kernel[0], kernel[1], kernel[2])*
- **bias**: *(num_filter,)*
- **out**: *(batch_size, num_filter, out_depth, out_height, out_width)*.
Both ``weight`` and ``bias`` are learnable parameters.
There are other options to tune the performance.
- **cudnn_tune**: enable this option leads to higher startup time but may give
faster speed. Options are
- **off**: no tuning
- **limited_workspace**:run test and pick the fastest algorithm that doesn't
exceed workspace limit.
- **fastest**: pick the fastest algorithm and ignore workspace limit.
- **None** (default): the behavior is determined by environment variable
``MXNET_CUDNN_AUTOTUNE_DEFAULT``. 0 for off, 1 for limited workspace
(default), 2 for fastest.
- **workspace**: A large number leads to more (GPU) memory usage but may improve
the performance.
Parameters
----------
data : NDArray
Input data to the ConvolutionOp.
weight : NDArray
Weight matrix.
bias : NDArray
Bias parameter.
kernel : Shape(tuple), required
Convolution kernel size: (w,), (h, w) or (d, h, w)
stride : Shape(tuple), optional, default=[]
Convolution stride: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension.
dilate : Shape(tuple), optional, default=[]
Convolution dilate: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension.
pad : Shape(tuple), optional, default=[]
Zero pad for convolution: (w,), (h, w) or (d, h, w). Defaults to no padding.
num_filter : int (non-negative), required
Convolution filter(channel) number
num_group : int (non-negative), optional, default=1
Number of group partitions.
workspace : long (non-negative), optional, default=1024
Maximum temporary workspace allowed (MB) in convolution.This parameter has two usages.
When CUDNN is not used, it determines the effective batch size of the convolution kernel.
When CUDNN is used, it controls the maximum temporary storage used for tuning the best
CUDNN kernel when `limited_workspace` strategy is used.
no_bias : boolean, optional, default=0
Whether to disable bias parameter.
cudnn_tune : {None, 'fastest', 'limited_workspace', 'off'},optional, default='None'
Whether to pick convolution algo by running performance test.
cudnn_off : boolean, optional, default=0
Turn off cudnn for this layer.
layout : {None, 'NCDHW', 'NCHW', 'NCW', 'NDHWC', 'NHWC'},optional, default='None'
Set layout for input, output and weight. Empty for
default layout: NCW for 1d, NCHW for 2d and NCDHW for 3d.
NHWC and NDHWC are only supported on GPU.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
"""
assert data is not None and weight is not None and kernel is not None, \
"Missing input data, weight or kernel"
assert num_filter >= 1, "Number of output filters should be greater equal to 1."
assert workspace >= 0, "Maximum temporary workspace should be greater equal to 0."
if no_bias:
assert bias is None, "Using no bias"
return _api_internal.convolution(data, weight, kernel, stride, dilate, pad,
num_filter, num_group, workspace, no_bias,
cudnn_tune, cudnn_off, layout)
else:
assert bias is not None, "Using bias"
return _api_internal.convolution(data, weight, bias, kernel, stride, dilate, pad,
num_filter, num_group, workspace, no_bias,
cudnn_tune, cudnn_off, layout)
# pylint: disable=too-many-arguments
@set_module('mxnet.ndarray.numpy_extension')
def deconvolution(data=None, weight=None, bias=None, kernel=None, stride=None, dilate=None,
pad=None, adj=None, target_shape=None, num_filter=1, num_group=1,
workspace=1024, no_bias=False, cudnn_tune=None,
cudnn_off=False, layout=None):
r"""Computes 1D, 2D or 3D transposed convolution (aka fractionally strided convolution) of
the input tensor. This operation can be seen as the gradient of Convolution operation
with respect to its input. Convolution usually reduces the size of the input.
Transposed convolution works the other way, going from a smaller input
to a larger output while preserving the connectivity pattern.
Parameters
----------
data : NDArray
Input tensor to the deconvolution operation.
weight : NDArray
Weights representing the kernel.
bias : NDArray
Bias added to the result after the deconvolution operation.
kernel : Shape(tuple), required
Deconvolution kernel size: (w,), (h, w) or (d, h, w).
This is same as the kernel size used for the corresponding convolution
stride : Shape(tuple), optional, default=[]
The stride used for the corresponding convolution: (w,), (h, w) or (d, h, w).
Defaults to 1 for each dimension.
dilate : Shape(tuple), optional, default=[]
Dilation factor for each dimension of the input: (w,), (h, w) or (d, h, w).
Defaults to 1 for each dimension.
pad : Shape(tuple), optional, default=[]
The amount of implicit zero padding added during convolution for each dimension of
the input: (w,), (h, w) or (d, h, w). ``(kernel-1)/2`` is usually a good choice.
If `target_shape` is set, `pad` will be ignored and a padding that will generate
the target shape will be used. Defaults to no padding.
adj : Shape(tuple), optional, default=[]
Adjustment for output shape: (w,), (h, w) or (d, h, w).
If `target_shape` is set, `adj` will be ignored and computed accordingly.
target_shape : Shape(tuple), optional, default=[]
Shape of the output tensor: (w,), (h, w) or (d, h, w).
num_filter : int (non-negative), required
Number of output filters.
num_group : int (non-negative), optional, default=1
Number of groups partition.
workspace : long (non-negative), optional, default=512
Maximum temporary workspace allowed (MB) in deconvolution. This parameter has two usages.
When CUDNN is not used, it determines the effective batch size of the deconvolution kernel.
When CUDNN is used, it controls the maximum temporary storage used for tuning
the best CUDNN kernel when `limited_workspace` strategy is used.
no_bias : boolean, optional, default=1
Whether to disable bias parameter.
cudnn_tune : {None, 'fastest', 'limited_workspace', 'off'},optional, default='None'
Whether to pick convolution algorithm by running performance test.
cudnn_off : boolean, optional, default=0
Turn off cudnn for this layer.
layout : {None, 'NCDHW', 'NCHW', 'NCW', 'NDHWC', 'NHWC'},optional, default='None'
Set layout for input, output and weight. Empty for
default layout, NCW for 1d, NCHW for 2d and NCDHW for 3d.
NHWC and NDHWC are only supported on GPU.
out : NDArray, optional
The output NDArray to hold the result.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
"""
assert data is not None and weight is not None and kernel is not None, \
"Missing input data, weight or kernel"
assert num_filter >= 1, "Number of output filters should be greater equal to 1."
assert workspace >= 0, "Maximum temporary workspace should be greater equal to 0."
if no_bias:
assert bias is None, "Using no bias"
return _api_internal.deconvolution(data, weight, kernel, stride, dilate, pad,
adj, target_shape, num_filter, num_group,
workspace, no_bias, cudnn_tune, cudnn_off, layout)
else:
assert bias is not None, "Using bias"
return _api_internal.deconvolution(data, weight, bias, kernel, stride, dilate, pad,
adj, target_shape, num_filter, num_group,
workspace, no_bias, cudnn_tune, cudnn_off, layout)
# pylint: disable=too-many-arguments, unused-argument
@set_module('mxnet.ndarray.numpy_extension')
def pooling(data=None, kernel=None, stride=None, pad=None, pool_type="max",
pooling_convention="valid", global_pool=False, cudnn_off=False,
p_value=None, count_include_pad=None, layout=None, **kwargs):
r"""Performs pooling on the input.
The shapes for 1-D pooling are
- **data** and **out**: *(batch_size, channel, width)* (NCW layout) or
*(batch_size, width, channel)* (NWC layout),
The shapes for 2-D pooling are
- **data** and **out**: *(batch_size, channel, height, width)* (NCHW layout) or
*(batch_size, height, width, channel)* (NHWC layout),
out_height = f(height, kernel[0], pad[0], stride[0])
out_width = f(width, kernel[1], pad[1], stride[1])
The definition of *f* depends on ``pooling_convention``, which has two options:
- **valid** (default)::
f(x, k, p, s) = floor((x+2*p-k)/s)+1
- **full**, which is compatible with Caffe::
f(x, k, p, s) = ceil((x+2*p-k)/s)+1
When ``global_pool`` is set to be true, then global pooling is performed. It will reset
``kernel=(height, width)`` and set the appropiate padding to 0.
Three pooling options are supported by ``pool_type``:
- **avg**: average pooling
- **max**: max pooling
- **sum**: sum pooling
- **lp**: Lp pooling
For 3-D pooling, an additional *depth* dimension is added before
*height*. Namely the input data and output will have shape *(batch_size, channel, depth,
height, width)* (NCDHW layout) or *(batch_size, depth, height, width, channel)* (NDHWC layout).
Notes on Lp pooling:
Lp pooling was first introduced by this paper: https://arxiv.org/pdf/1204.3968.pdf.
L-1 pooling is simply sum pooling, while L-inf pooling is simply max pooling.
We can see that Lp pooling stands between those two, in practice the most common value for p is 2.
For each window ``X``, the mathematical expression for Lp pooling is:
:math:`f(X) = \sqrt[p]{\sum_{x}^{X} x^p}`
Parameters
----------
data : NDArray
Input data to the pooling operator.
kernel : Shape(tuple), optional, default=[]
Pooling kernel size: (y, x) or (d, y, x)
pool_type : {'avg', 'lp', 'max', 'sum'},optional, default='max'
Pooling type to be applied.
global_pool : boolean, optional, default=0
Ignore kernel size, do global pooling based on current input feature map.
cudnn_off : boolean, optional, default=0
Turn off cudnn pooling and use MXNet pooling operator.
pooling_convention : {'full', 'same', 'valid'},optional, default='valid'
Pooling convention to be applied.
stride : Shape(tuple), optional, default=[]
Stride: for pooling (y, x) or (d, y, x). Defaults to 1 for each dimension.
pad : Shape(tuple), optional, default=[]
Pad for pooling: (y, x) or (d, y, x). Defaults to no padding.
p_value : int or None, optional, default='None'
Value of p for Lp pooling, can be 1 or 2, required for Lp Pooling.
count_include_pad : boolean or None, optional, default=None
Only used for AvgPool, specify whether to count padding elements for averagecalculation.
For example, with a 5*5 kernel on a 3*3 corner of a image,the sum of the 9 valid elements will
be divided by 25 if this is set to true,or it will be divided by 9 if this is set to false.
Defaults to true.
layout : {None, 'NCDHW', 'NCHW', 'NCW', 'NDHWC', 'NHWC', 'NWC'},optional, default='None'
Set layout for input and output. Empty for
default layout: NCW for 1d, NCHW for 2d and NCDHW for 3d.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
"""
assert data is not None and kernel is not None, "Missing input data or kernel"
out = _api_internal.pooling(data, kernel, stride, pad, pool_type, pooling_convention,
global_pool, cudnn_off, p_value, count_include_pad, layout)
if isinstance(out, NDArrayBase):
return out
else:
return list(out)
# pylint: disable=too-many-arguments, unused-argument
@set_module('mxnet.ndarray.numpy_extension')
def dropout(data, p=0.5, mode="training", axes=None, cudnn_off=False, **kwargs):
r"""Applies dropout operation to input array.
- During training, each element of the input is set to zero with probability p.
The whole array is rescaled by :math:`1/(1-p)` to keep the expected
sum of the input unchanged.
- During testing, this operator does not change the input if mode is 'training'.
If mode is 'always', the same computaion as during training will be applied.
Parameters
----------
data : NDArray
Input array to which dropout will be applied.
p : float, optional, default=0.5
Fraction of the input that gets dropped out during training time.
mode : {'always', 'training'},optional, default='training'
Whether to only turn on dropout during training or to also turn on for inference.
axes : Shape(tuple), optional, default=[]
Axes for variational dropout kernel.
cudnn_off : boolean or None, optional, default=0
Whether to turn off cudnn in dropout operator. This option is ignored if axes is specified.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
"""
return _api_internal.dropout(data, p, mode, axes, cudnn_off)
# pylint: disable=too-many-arguments
@set_module('mxnet.ndarray.numpy_extension')
def one_hot(data, depth=None, on_value=1.0, off_value=0.0, dtype="float32"):
r"""Returns a one-hot array.
The locations represented by `indices` take value `on_value`, while all
other locations take value `off_value`.
`one_hot` operation with `indices` of shape ``(i0, i1)`` and `depth` of ``d`` would result
in an output array of shape ``(i0, i1, d)`` with::
output[i,j,:] = off_value
output[i,j,indices[i,j]] = on_value
Parameters
----------
indices : NDArray
array of locations where to set on_value
depth : long, required
Depth of the one hot dimension.
on_value : double, optional, default=1
The value assigned to the locations represented by indices.
off_value : double, optional, default=0
The value assigned to the locations not represented by indices.
dtype : {'bfloat16', 'float16', 'float32', 'float64', 'int32', 'int64', 'int8', 'uint8'},
optional, default='float32'
DType of the output
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
Example
-------
>>> data = np.array([1,0,2,0])
>>> npx.one_hot(data, 3)
array([[0., 1., 0.],
[1., 0., 0.],
[0., 0., 1.],
[1., 0., 0.]], dtype=float64)
>>> npx.one_hot(data, 3, on_value=8, off_value=1, dtype='int32')
array([[1, 8, 1],
[8, 1, 1],
[1, 1, 8],
[8, 1, 1]], dtype=int32)
>>> data = np.array([[1,0],[1,0],[2,0]])
>>> npx.one_hot(data, 3)
array([[[0., 1., 0.],
[1., 0., 0.]],
[[0., 1., 0.],
[1., 0., 0.]],
[[0., 0., 1.],
[1., 0., 0.]]], dtype=float64)
"""
assert depth is not None, "Please provide the depth of one hot dimension."
if not isinstance(dtype, str):
dtype = _np.dtype(dtype).name
return _api_internal.one_hot(data, depth, on_value, off_value, dtype)
# pylint: disable=too-many-arguments
@set_module('mxnet.ndarray.numpy_extension')
def rnn(data=None, parameters=None, state=None, state_cell=None, sequence_length=None,
mode=None, state_size=None, num_layers=None, bidirectional=False,
state_outputs=False, p=0.0, use_sequence_length=False, projection_size=None,
lstm_state_clip_min=None, lstm_state_clip_max=None, lstm_state_clip_nan=None):
r"""Applies recurrent layers to input data. Currently, vanilla RNN, LSTM and GRU are
implemented, with both multi-layer and bidirectional support.
When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE
and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use
pseudo-float16 precision (float32 math with float16 I/O) precision in order to use
Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups.
**Vanilla RNN**
Applies a single-gate recurrent layer to input X. Two kinds of activation function are supported:
ReLU and Tanh.
With ReLU activation function:
.. math::
h_t = relu(W_{ih} * x_t + b_{ih} + W_{hh} * h_{(t-1)} + b_{hh})
With Tanh activtion function:
.. math::
h_t = \tanh(W_{ih} * x_t + b_{ih} + W_{hh} * h_{(t-1)} + b_{hh})
Reference paper: Finding structure in time - Elman, 1988.
https://crl.ucsd.edu/~elman/Papers/fsit.pdf
**LSTM**
Long Short-Term Memory - Hochreiter, 1997. http://www.bioinf.jku.at/publications/older/2604.pdf
.. math::
\begin{array}{ll}
i_t = \mathrm{sigmoid}(W_{ii} x_t + b_{ii} + W_{hi} h_{(t-1)} + b_{hi}) \\
f_t = \mathrm{sigmoid}(W_{if} x_t + b_{if} + W_{hf} h_{(t-1)} + b_{hf}) \\
g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hc} h_{(t-1)} + b_{hg}) \\
o_t = \mathrm{sigmoid}(W_{io} x_t + b_{io} + W_{ho} h_{(t-1)} + b_{ho}) \\
c_t = f_t * c_{(t-1)} + i_t * g_t \\
h_t = o_t * \tanh(c_t)
\end{array}
With the projection size being set, LSTM could use the projection feature to reduce the parameters
size and give some speedups without significant damage to the accuracy.
Long Short-Term Memory Based Recurrent Neural Network Architectures for Large Vocabulary Speech
Recognition - Sak et al. 2014. https://arxiv.org/abs/1402.1128
.. math::
\begin{array}{ll}
i_t = \mathrm{sigmoid}(W_{ii} x_t + b_{ii} + W_{ri} r_{(t-1)} + b_{ri}) \\
f_t = \mathrm{sigmoid}(W_{if} x_t + b_{if} + W_{rf} r_{(t-1)} + b_{rf}) \\
g_t = \tanh(W_{ig} x_t + b_{ig} + W_{rc} r_{(t-1)} + b_{rg}) \\
o_t = \mathrm{sigmoid}(W_{io} x_t + b_{o} + W_{ro} r_{(t-1)} + b_{ro}) \\
c_t = f_t * c_{(t-1)} + i_t * g_t \\
h_t = o_t * \tanh(c_t)
r_t = W_{hr} h_t
\end{array}
**GRU**
Gated Recurrent Unit - Cho et al. 2014. http://arxiv.org/abs/1406.1078
The definition of GRU here is slightly different from paper but compatible with CUDNN.
.. math::
\begin{array}{ll}
r_t = \mathrm{sigmoid}(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\
z_t = \mathrm{sigmoid}(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\
n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\
h_t = (1 - z_t) * n_t + z_t * h_{(t-1)} \\
\end{array}
Parameters
----------
data : NDArray
Input data to RNN
parameters : NDArray
Vector of all RNN trainable parameters concatenated
state : NDArray
initial hidden state of the RNN
state_cell : NDArray
initial cell state for LSTM networks (only for LSTM)
sequence_length : NDArray
Vector of valid sequence lengths for each element in batch.
(Only used if use_sequence_length kwarg is True)
state_size : int (non-negative), required
size of the state for each layer
num_layers : int (non-negative), required
number of stacked layers
bidirectional : boolean, optional, default=0
whether to use bidirectional recurrent layers
mode : {'gru', 'lstm', 'rnn_relu', 'rnn_tanh'}, required
the type of RNN to compute
p : float, optional, default=0
drop rate of the dropout on the outputs of each RNN layer, except the last layer.
state_outputs : boolean, optional, default=0
Whether to have the states as symbol outputs.
projection_size : int or None, optional, default='None'
size of project size
lstm_state_clip_min : double or None, optional, default=None
Minimum clip value of LSTM states. This option must be used together with lstm_state_clip_max.
lstm_state_clip_max : double or None, optional, default=None
Maximum clip value of LSTM states. This option must be used together with lstm_state_clip_min.
lstm_state_clip_nan : boolean, optional, default=0
Whether to stop NaN from propagating in state by clipping it to min/max.
If clipping range is not specified, this option is ignored.
use_sequence_length : boolean, optional, default=0
If set to true, this layer takes in an extra input parameter `sequence_length`
to specify variable length sequence
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
"""
assert mode is not None, "Please provide rnn type to compute. e.g. rnn_relu, rnn_tanh, lstm, gru"
assert data is not None and parameters is not None and state is not None, \
"Missing input data/parameters/state."
assert state_size is not None, "Please provide state_size"
assert num_layers is not None, "Please provide num_layers"
if use_sequence_length:
assert sequence_length is not None, \
"use_sequence_length is set True, but no sequence_length provided."
if mode == "lstm":
assert state_cell is not None, \
"RNN computing mode is lstm, but no state_cell is provided"
return _api_internal.rnn(data, parameters, state, state_cell, sequence_length,
state_size, num_layers, bidirectional, state_outputs,
mode, p, use_sequence_length, projection_size,
lstm_state_clip_min, lstm_state_clip_max, lstm_state_clip_nan)
else:
return _api_internal.rnn(data, parameters, state, sequence_length,
state_size, num_layers, bidirectional, state_outputs,
mode, p, use_sequence_length, projection_size,
lstm_state_clip_min, lstm_state_clip_max, lstm_state_clip_nan)
else:
if mode == "lstm":
assert state_cell is not None, \
"RNN computing mode is lstm, but no state_cell is provided"
return _api_internal.rnn(data, parameters, state, state_cell,
state_size, num_layers, bidirectional, state_outputs,
mode, p, use_sequence_length, projection_size,
lstm_state_clip_min, lstm_state_clip_max, lstm_state_clip_nan)
else:
return _api_internal.rnn(data, parameters, state,
state_size, num_layers, bidirectional, state_outputs,
mode, p, use_sequence_length, projection_size,
lstm_state_clip_min, lstm_state_clip_max, lstm_state_clip_nan)
# pylint: disable=too-many-arguments, unused-argument
@set_module('mxnet.ndarray.numpy_extension')
def embedding(data, weight, input_dim=None, output_dim=None, dtype="float32", sparse_grad=False,
**kwargs):
r"""Maps integer indices to vector representations (embeddings).
This operator maps words to real-valued vectors in a high-dimensional space,
called word embeddings. These embeddings can capture semantic and syntactic properties of the words.
For example, it has been noted that in the learned embedding spaces, similar words tend
to be close to each other and dissimilar words far apart.
For an input array of shape (d1, ..., dK),
the shape of an output array is (d1, ..., dK, output_dim).
All the input values should be integers in the range [0, input_dim).
If the input_dim is ip0 and output_dim is op0, then shape of the embedding weight matrix must be
(ip0, op0).
When "sparse_grad" is False, if any index mentioned is too large, it is replaced by the index that
addresses the last vector in an embedding matrix.
When "sparse_grad" is True, an error will be raised if invalid indices are found.
The storage type of weight can be either row_sparse or default.
.. Note::
If "sparse_grad" is set to True, the storage type of gradient w.r.t weights will be
"row_sparse". Only a subset of optimizers support sparse gradients, including SGD, AdaGrad
and Adam. Note that by default lazy updates is turned on, which may perform differently
from standard updates. For more details, please check the Optimization API at:
https://mxnet.apache.org/versions/master/api/python/docs/api/optimizer/index.html
Parameters
----------
data : NDArray
The input array to the embedding operator.
weight : NDArray
The embedding weight matrix.
input_dim : long, required
Vocabulary size of the input indices.
output_dim : long, required
Dimension of the embedding vectors.
dtype : {'bfloat16', 'float16', 'float32', 'float64', 'int32', 'int64', 'int8', 'uint8'},
optional, default='float32'
Data type of weight.
sparse_grad : boolean, optional, default=0
Compute row sparse gradient in the backward calculation.
If set to True, the grad's storage type is row_sparse.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
Example
-------
>>> input_dim = 4
>>> output_dim = 5
Each row in weight matrix y represents a word. So, y = (w0,w1,w2,w3)
>>> y = np.arange(input_dim * output_dim).reshape(input_dim, output_dim)
>>> y
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.],
[10., 11., 12., 13., 14.],
[15., 16., 17., 18., 19.]])
Input array x represents n-grams(2-gram). So, x = [(w1,w3), (w0,w2)]
>>> x = np.array([[1., 3.], [0., 2.]])
>>> x
array([[1., 3.],
[0., 2.]])
Mapped input x to its vector representation y.
>>> npx.embedding(x, y, input_dim, output_dim)
array([[[ 5., 6., 7., 8., 9.],
[15., 16., 17., 18., 19.]],
[[ 0., 1., 2., 3., 4.],
[10., 11., 12., 13., 14.]]])
"""
assert input_dim > 0, "Vocabulary size of the input indices should be greater than 0."
assert output_dim > 0, "Dimension of the embedding vectors should greater than 0."
assert not sparse_grad, "Currently row sparse gradient is not supported in npx.embedding"
return _api_internal.embedding(data, weight, input_dim, output_dim, dtype, sparse_grad)
# pylint: disable=too-many-arguments
@set_module('mxnet.ndarray.numpy_extension')
def topk(data, axis=-1, k=1, ret_typ="indices", is_ascend=False, dtype="float32"):
r"""Returns the indices of the top *k* elements in an input array along the given
axis (by default).
If ret_type is set to 'value' returns the value of top *k* elements (instead of indices).
In case of ret_type = 'both', both value and index would be returned.
The returned elements will be sorted.
Parameters
----------
data : NDArray
The input array
axis : int or None, optional, default='-1'
Axis along which to choose the top k indices.
If not given, the flattened array is used. Default is -1.
k : int, optional, default='1'
Number of top elements to select, should be always smaller than or equal to
the element number in the given axis. A global sort is performed if set k < 1.
ret_typ : {'both', 'indices', 'mask', 'value'},optional, default='indices'
The return type.
"value" means to return the top k values,
"indices" means to return the indices of the top k values,
"mask" means to return a mask array containing 0 and 1. 1 means the top k values.
"both" means to return a list of both values and indices of top k elements.
is_ascend : boolean, optional, default=0
Whether to choose k largest or k smallest elements.
Top K largest elements will be chosen if set to false.
dtype : {'float16', 'float32', 'float64', 'int32', 'int64', 'uint8'},
optional, default='float32'
DType of the output indices when ret_typ is "indices" or "both".
An error will be raised if the selected data type cannot precisely represent the indices.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
Example
-------
>>> x = np.array([[0.3, 0.2, 0.4], [0.1, 0.3, 0.2]])
returns an index of the largest element on last axis
>>> npx.topk(x)
array([[2.],
[1.]])
returns the value of top-2 largest elements on last axis
>>> npx.topk(x, ret_typ='value', k=2)
array([[0.4, 0.3],
[0.3, 0.2]])
returns the value of top-2 smallest elements on last axis
>>> npx.topk(x, ret_typ='value', k=2, is_ascend=1)
array([[0.2, 0.3],
[0.1, 0.2]])
returns the value of top-2 largest elements on axis 0
>>> npx.topk(x, axis=0, ret_typ='value', k=2)
array([[0.3, 0.3, 0.4],
[0.1, 0.2, 0.2]])
flattens and then returns list of both values and indices
>>> npx.topk(x, ret_typ='both', k=2)
[array([[0.4, 0.3], [0.3, 0.2]]),
array([[2., 0.], [1., 2.]])]
"""
out = _api_internal.topk(data, axis, k, ret_typ, is_ascend, dtype)
if isinstance(out, NDArrayBase):
return out
return list(out)
# pylint: disable=too-many-arguments
@set_module('mxnet.ndarray.numpy_extension')
def layer_norm(data=None, gamma=None, beta=None, axis=None, eps=None, output_mean_var=None):
r"""Layer normalization.
Normalizes the channels of the input tensor by mean and variance, and applies a scale ``gamma`` as
well as offset ``beta``.
Assume the input has more than one dimension and we normalize along axis 1.
We first compute the mean and variance along this axis and then
compute the normalized output, which has the same shape as input, as following:
.. math::
out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis) + \epsilon}} * gamma + beta
Both ``gamma`` and ``beta`` are learnable parameters.
Unlike BatchNorm and InstanceNorm, the *mean* and *var* are computed along the channel dimension.
Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta``
have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and
``data_std``. Note that no gradient will be passed through these two outputs.
The parameter ``axis`` specifies which axis of the input shape denotes
the 'channel' (separately normalized groups). The default is -1, which sets the channel
axis to be the last item in the input shape.
Parameters
----------
data : NDArray
Input data to layer normalization
gamma : NDArray
gamma array
beta : NDArray
beta array
axis : int, optional, default='-1'
The axis to perform layer normalization.
Usually, this should be be axis of the channel dimension.
Negative values means indexing from right to left.
eps : float, optional, default=9.99999975e-06
An `epsilon` parameter to prevent division by 0.
output_mean_var : boolean, optional, default=0
Output the mean and std calculated along the given axis.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
"""
out = _api_internal.layer_norm(data, gamma, beta, axis, eps, output_mean_var)
if isinstance(out, NDArrayBase):
return out
return list(out)
# pylint: disable=too-many-arguments, unused-argument
@set_module('mxnet.ndarray.numpy_extension')
def leaky_relu(data=None, gamma=None, act_type="leaky", slope=0.25, lower_bound=0.125,
upper_bound=0.334, **kwargs):
r"""Applies Leaky rectified linear unit activation element-wise to the input.
Leaky ReLUs attempt to fix the "dying ReLU" problem by allowing a small `slope`
when the input is negative and has a slope of one when input is positive.
The following modified ReLU Activation functions are supported:
- *elu*: Exponential Linear Unit. `y = x > 0 ? x : slope * (exp(x)-1)`
- *gelu*: Gaussian Error Linear Unit. `y = 0.5 * x * (1 + erf(x / sqrt(2)))`
- *selu*: Scaled Exponential Linear Unit. `y = lambda * (x > 0 ? x : alpha * (exp(x) - 1))` where
*lambda = 1.0507009873554804934193349852946* and *alpha = 1.6732632423543772848170429916717*.
- *leaky*: Leaky ReLU. `y = x > 0 ? x : slope * x`
- *prelu*: Parametric ReLU. This is same as *leaky* except that `slope` is learnt during training.
- *rrelu*: Randomized ReLU. same as *leaky* but the `slope` is uniformly and randomly chosen from
*[lower_bound, upper_bound)* for training, while fixed to be
*(lower_bound+upper_bound)/2* for inference.
Parameters
----------
data : NDArray
Input data to activation function.
gamma : NDArray
Input data to activation function.
act_type : {'elu', 'gelu', 'leaky', 'prelu', 'rrelu', 'selu'},optional, default='leaky'
Activation function to be applied.
slope : float, optional, default=0.25
Init slope for the activation. (For leaky and elu only)
lower_bound : float, optional, default=0.125
Lower bound of random slope. (For rrelu only)
upper_bound : float, optional, default=0.333999991
Upper bound of random slope. (For rrelu only)
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
"""
if act_type == "prelu":
assert gamma is not None, "If activation function is prelu, please provide input gamma"
out = _api_internal.leaky_relu(data, gamma, act_type, slope, lower_bound, upper_bound)
if isinstance(out, NDArrayBase):
return out
return list(out)
else:
return _api_internal.leaky_relu(data, act_type, slope, lower_bound, upper_bound)
# pylint: disable=too-many-arguments
@set_module('mxnet.ndarray.numpy_extension')
def batch_dot(a, b, transpose_a=False, transpose_b=False, forward_stype="default"):
r"""Batchwise dot product.
``batch_dot`` is used to compute dot product of ``x`` and ``y`` when ``x`` and
``y`` are data in batch, namely N-D (N >= 3) arrays in shape of `(B0, ..., B_i, :, :)`.
For example, given ``x`` with shape `(B_0, ..., B_i, N, M)` and ``y`` with shape
`(B_0, ..., B_i, M, K)`, the result array will have shape `(B_0, ..., B_i, N, K)`,
which is computed by::
batch_dot(x,y)[b_0, ..., b_i, :, :] = dot(x[b_0, ..., b_i, :, :], y[b_0, ..., b_i, :, :])
Parameters
----------
lhs : NDArray
The first input
rhs : NDArray
The second input
transpose_a : boolean, optional, default=0
If true then transpose the first input before dot.
transpose_b : boolean, optional, default=0
If true then transpose the second input before dot.
forward_stype : {None, 'csr', 'default', 'row_sparse'},optional, default='None'
The desired storage type of the forward output given by user,
if thecombination of input storage types and this hint does not matchany implemented ones,
the dot operator will perform fallback operationand still produce
an output of the desired storage type.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
"""
return _api_internal.batch_dot(a, b, transpose_a, transpose_b, forward_stype)
# pylint: disable=too-many-arguments
@set_module('mxnet.ndarray.numpy_extension')
def broadcast_like(lhs, rhs, lhs_axes=None, rhs_axes=None):
r"""Broadcasts lhs to have the same shape as rhs.
Broadcasting is a mechanism that allows NDArrays to perform arithmetic operations
with arrays of different shapes efficiently without creating multiple copies of arrays.
Also see, `Broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_
for more explanation.
Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to
`(2,8,3,9)`. Elements will be duplicated on the broadcasted axes.
Parameters
----------
lhs : NDArray
First input.
rhs : NDArray
Second input.
lhs_axes : Shape or None, optional, default=None
Axes to perform broadcast on in the first input array
rhs_axes : Shape or None, optional, default=None
Axes to copy from the second input array
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
example
-------
>>> a = np.array([[1,2,3]])
>>> b = np.array([[5,6,7],[7,8,9]])
>>> npx.broadcast_like(a, b)
array([[1., 2., 3.],
[1., 2., 3.]])
>>> a = np.array([9])
>>> b = np.array([1,2,3,4,5])
>>> npx.broadcast_like(a, b, lhs_axes=(0,), rhs_axes=(-1,))
array([9., 9., 9., 9., 9.])
"""
return _api_internal.broadcast_like(lhs, rhs, lhs_axes, rhs_axes)
# pylint: disable=too-many-arguments
@set_module('mxnet.ndarray.numpy_extension')
def arange_like(data, start=0.0, step=1.0, repeat=1, ctx=None, axis=None):
r"""Return an array with evenly spaced values. If axis is not given, the output will
have the same shape as the input array. Otherwise, the output will be a 1-D array with size of
the specified axis in input shape.
Parameters
----------
data : NDArray
The input
start : double, optional, default=0
Start of interval. The interval includes this value. The default start value is 0.
step : double, optional, default=1
Spacing between values.
repeat : int, optional, default='1'
The repeating time of all elements.
E.g repeat=3, the element a will be repeated three times --> a, a, a.
ctx : string, optional, default=''
Context of output, in format [cpu|gpu|cpu_pinned](n).Only used for imperative calls.
axis : int or None, optional, default='None'
Arange elements according to the size of a certain axis of input array.
The negative numbers are interpreted counting from the backward.
If not provided, will arange elements according to the input shape.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
Example
-------
>>> x = np.random.uniform(0, 1, size=(3,4))
>>> x
array([[0.5488135 , 0.5928446 , 0.71518934, 0.84426576],
[0.60276335, 0.8579456 , 0.5448832 , 0.8472517 ],
[0.4236548 , 0.6235637 , 0.6458941 , 0.3843817 ]])
>>> npx.arange_like(x, start=0)
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]])
>>> npx.arange_like(x, start=0, axis=-1)
array([0., 1., 2., 3.])
"""
return _api_internal.arange_like(data, start, step, repeat, ctx, axis)
# pylint: disable=too-many-arguments
@set_module('mxnet.ndarray.numpy_extension')
def group_norm(data, gamma, beta, num_groups=1, eps=1e-3, output_mean_var=False):
r"""Group normalization.
The input channels are separated into ``num_groups`` groups,
each containing ``num_channels / num_groups`` channels.
The mean and standard-deviation are calculated separately over the each group.
.. math::
data = data.reshape((N, num_groups, C // num_groups, ...))
out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis) + \epsilon}} * gamma + beta
Both ``gamma`` and ``beta`` are learnable parameters.
Defined in ../src/operator/nn/group_norm.cc:L78
Parameters
----------
data : NDArray
Input data
gamma : NDArray
gamma array
beta : NDArray
beta array
num_groups : int, optional, default='1'
Total number of groups.
eps : float, optional, default=9.99999975e-06
An `epsilon` parameter to prevent division by 0.
output_mean_var : boolean, optional, default=0
Output the mean and std calculated along the given axis.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
"""
out = _api_internal.group_norm(data, gamma, beta, num_groups, eps, output_mean_var)
if isinstance(out, NDArrayBase):
return out
return list(out)
|
szha/mxnet
|
python/mxnet/ndarray/numpy_extension/_op.py
|
Python
|
apache-2.0
| 61,890
|
[
"Gaussian"
] |
712b35362408b41f1b832756802093d255c8020e5e5e15914e9607afbccea202
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import re
from jinja2.compiler import generate
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.six import text_type
from ansible.module_utils._text import to_native
from ansible.playbook.attribute import FieldAttribute
from ansible.utils.display import Display
display = Display()
DEFINED_REGEX = re.compile(r'(hostvars\[.+\]|[\w_]+)\s+(not\s+is|is|is\s+not)\s+(defined|undefined)')
LOOKUP_REGEX = re.compile(r'lookup\s*\(')
VALID_VAR_REGEX = re.compile("^[_A-Za-z][_a-zA-Z0-9]*$")
class Conditional:
'''
This is a mix-in class, to be used with Base to allow the object
to be run conditionally when a condition is met or skipped.
'''
_when = FieldAttribute(isa='list', default=list, extend=True, prepend=True)
def __init__(self, loader=None):
# when used directly, this class needs a loader, but we want to
# make sure we don't trample on the existing one if this class
# is used as a mix-in with a playbook base class
if not hasattr(self, '_loader'):
if loader is None:
raise AnsibleError("a loader must be specified when using Conditional() directly")
else:
self._loader = loader
super(Conditional, self).__init__()
def _validate_when(self, attr, name, value):
if not isinstance(value, list):
setattr(self, name, [value])
def extract_defined_undefined(self, conditional):
results = []
cond = conditional
m = DEFINED_REGEX.search(cond)
while m:
results.append(m.groups())
cond = cond[m.end():]
m = DEFINED_REGEX.search(cond)
return results
def evaluate_conditional(self, templar, all_vars):
'''
Loops through the conditionals set on this object, returning
False if any of them evaluate as such.
'''
# since this is a mix-in, it may not have an underlying datastructure
# associated with it, so we pull it out now in case we need it for
# error reporting below
ds = None
if hasattr(self, '_ds'):
ds = getattr(self, '_ds')
result = True
try:
for conditional in self.when:
# do evaluation
if conditional is None or conditional == '':
res = True
elif isinstance(conditional, bool):
res = conditional
else:
res = self._check_conditional(conditional, templar, all_vars)
# only update if still true, preserve false
if result:
result = res
display.debug("Evaluated conditional (%s): %s " % (conditional, res))
if not result:
break
except Exception as e:
raise AnsibleError("The conditional check '%s' failed. The error was: %s" % (to_native(conditional), to_native(e)), obj=ds)
return result
def _check_conditional(self, conditional, templar, all_vars):
'''
This method does the low-level evaluation of each conditional
set on this object, using jinja2 to wrap the conditionals for
evaluation.
'''
original = conditional
if templar.is_template(conditional):
display.warning('conditional statements should not include jinja2 '
'templating delimiters such as {{ }} or {%% %%}. '
'Found: %s' % conditional)
bare_vars_warning = False
if C.CONDITIONAL_BARE_VARS:
if conditional in all_vars and VALID_VAR_REGEX.match(conditional):
conditional = all_vars[conditional]
bare_vars_warning = True
# make sure the templar is using the variables specified with this method
templar.available_variables = all_vars
try:
# if the conditional is "unsafe", disable lookups
disable_lookups = hasattr(conditional, '__UNSAFE__')
conditional = templar.template(conditional, disable_lookups=disable_lookups)
if bare_vars_warning and not isinstance(conditional, bool):
display.deprecated('evaluating %r as a bare variable, this behaviour will go away and you might need to add " | bool"'
' (if you would like to evaluate input string from prompt) or " is truthy"'
' (if you would like to apply Python\'s evaluation method) to the expression in the future. '
'Also see CONDITIONAL_BARE_VARS configuration toggle' % original,
version="2.12", collection_name='ansible.builtin')
if not isinstance(conditional, text_type) or conditional == "":
return conditional
# update the lookups flag, as the string returned above may now be unsafe
# and we don't want future templating calls to do unsafe things
disable_lookups |= hasattr(conditional, '__UNSAFE__')
# First, we do some low-level jinja2 parsing involving the AST format of the
# statement to ensure we don't do anything unsafe (using the disable_lookup flag above)
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False, inside_yield=False):
if isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Yield):
inside_yield = True
elif isinstance(node, ast.Str):
if disable_lookups:
if inside_call and node.s.startswith("__"):
# calling things with a dunder is generally bad at this point...
raise AnsibleError(
"Invalid access found in the conditional: '%s'" % conditional
)
elif inside_yield:
# we're inside a yield, so recursively parse and traverse the AST
# of the result to catch forbidden syntax from executing
parsed = ast.parse(node.s, mode='exec')
cnv = CleansingNodeVisitor()
cnv.visit(parsed)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(
child_node,
inside_call=inside_call,
inside_yield=inside_yield
)
try:
e = templar.environment.overlay()
e.filters.update(templar.environment.filters)
e.tests.update(templar.environment.tests)
res = e._parse(conditional, None, None)
res = generate(res, e, None, None)
parsed = ast.parse(res, mode='exec')
cnv = CleansingNodeVisitor()
cnv.visit(parsed)
except Exception as e:
raise AnsibleError("Invalid conditional detected: %s" % to_native(e))
# and finally we generate and template the presented string and look at the resulting string
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
val = templar.template(presented, disable_lookups=disable_lookups).strip()
if val == "True":
return True
elif val == "False":
return False
else:
raise AnsibleError("unable to evaluate conditional: %s" % original)
except (AnsibleUndefinedVariable, UndefinedError) as e:
# the templating failed, meaning most likely a variable was undefined. If we happened
# to be looking for an undefined variable, return True, otherwise fail
try:
# first we extract the variable name from the error message
var_name = re.compile(r"'(hostvars\[.+\]|[\w_]+)' is undefined").search(str(e)).groups()[0]
# next we extract all defined/undefined tests from the conditional string
def_undef = self.extract_defined_undefined(conditional)
# then we loop through these, comparing the error variable name against
# each def/undef test we found above. If there is a match, we determine
# whether the logic/state mean the variable should exist or not and return
# the corresponding True/False
for (du_var, logic, state) in def_undef:
# when we compare the var names, normalize quotes because something
# like hostvars['foo'] may be tested against hostvars["foo"]
if var_name.replace("'", '"') == du_var.replace("'", '"'):
# the should exist is a xor test between a negation in the logic portion
# against the state (defined or undefined)
should_exist = ('not' in logic) != (state == 'defined')
if should_exist:
return False
else:
return True
# as nothing above matched the failed var name, re-raise here to
# trigger the AnsibleUndefinedVariable exception again below
raise
except Exception:
raise AnsibleUndefinedVariable("error while evaluating conditional (%s): %s" % (original, e))
|
ganeshnalawade/ansible
|
lib/ansible/playbook/conditional.py
|
Python
|
gpl-3.0
| 10,815
|
[
"VisIt"
] |
b50e5bfd512b577dc0f84f781bb1d51e5894f2d3c04b5dc53699c5487d30afe2
|
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2009-2014, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Tools for identifying and working with files and streams for any supported program"""
from __future__ import print_function
from . import logfileparser
from . import adfparser
from . import gamessparser
from . import gamessukparser
from . import gaussianparser
from . import jaguarparser
from . import molproparser
from . import nwchemparser
from . import orcaparser
from . import psiparser
from . import qchemparser
def ccopen(source, *args, **kargs):
"""Guess the identity of a particular log file and return an instance of it.
Inputs:
source - a single logfile, a list of logfiles, or an input stream
Returns:
one of ADF, GAMESS, GAMESS UK, Gaussian, Jaguar, Molpro, NWChem, ORCA,
Psi, QChem, or None (if it cannot figure it out or the file does not
exist).
"""
filetype = None
# Try to open the logfile(s), using openlogfile.
if isinstance(source, str) or \
isinstance(source, list) and all([isinstance(s, str) for s in source]):
try:
inputfile = logfileparser.openlogfile(source)
except IOError as error:
(errno, strerror) = error.args
print("I/O error %s (%s): %s" % (errno, source, strerror))
return None
isstream = False
elif hasattr(source, "read"):
inputfile = source
isstream = True
else:
raise ValueError
# Read through the logfile(s) and search for a clue.
for line in inputfile:
if line.find("Amsterdam Density Functional") >= 0:
filetype = adfparser.ADF
break
# Don't break in this case as it may be a GAMESS-UK file.
elif line.find("GAMESS") >= 0:
filetype = gamessparser.GAMESS
# This can break, since it is non-GAMESS-UK specific.
elif line.find("GAMESS VERSION") >= 0:
filetype = gamessparser.GAMESS
break
elif line.find("G A M E S S - U K") >= 0:
filetype = gamessukparser.GAMESSUK
break
elif line.find("Gaussian, Inc.") >= 0:
filetype = gaussianparser.Gaussian
break
elif line.find("Jaguar") >= 0:
filetype = jaguarparser.Jaguar
break
elif line.find("PROGRAM SYSTEM MOLPRO") >= 0:
filetype = molproparser.Molpro
break
# Molpro log files don't have the line above. Set this only if
# nothing else is detected, and notice it can be overwritten,
# since it does not break the loop.
elif line[0:8] == "1PROGRAM" and not filetype:
filetype = molproparser.Molpro
elif line.find("Northwest Computational Chemistry Package") >= 0:
filetype = nwchemparser.NWChem
break
elif line.find("O R C A") >= 0:
filetype = orcaparser.ORCA
break
elif line.find("PSI") >= 0 and line.find("Ab Initio Electronic Structure") >= 0:
filetype = psiparser.Psi
break
elif line.find("A Quantum Leap Into The Future Of Chemistry") >= 0:
filetype = qchemparser.QChem
break
# Need to close file before creating a instance.
if not isstream:
inputfile.close()
# Return an instance of the chosen class.
try:
return filetype(source, *args, **kargs)
except TypeError:
print("Log file type not identified.")
|
Clyde-fare/cclib
|
src/cclib/parser/ccopen.py
|
Python
|
lgpl-2.1
| 3,930
|
[
"ADF",
"GAMESS",
"Gaussian",
"Jaguar",
"Molpro",
"NWChem",
"ORCA",
"cclib"
] |
276f21c7ea5deff793edc9b545a83ed1ab0e86b568f33fbd2a3d4e18e6956efc
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2020, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from functools import partial
import torch
from torchvision import transforms
from nupic.research.frameworks.pytorch.model_utils import evaluate_model
class NoiseRobustnessTest:
"""
Replaces the standard evaluate model function with a loop over the same function
that conducts a noise robustness test on each iteration. In the test, validation
accuracy is reported for samples with varying percentages of Gaussian
noise. The output of this mixin is a results dictionary which has all of the
standard metrics for the default evaluation loop and also reports the value of each
metric at each level of specified noise.
"""
def setup_experiment(self, config):
"""
:param config:
- noise_levels: Optional, a list of floats between 0 and 1 which specify
how many units of the input image will receive Gaussian noise. For
example, if the noise level is 0.1, then about 10% of the incoming
inputs per sample will receive additive Gaussian noise. Note that
the zero noise case is included by default, and leaving noise_levels out
of the config will result in 0.1 to 0.9 in increments of 0.1.
- noise_mean: The mean of the noise which will be added to the incoming
data, defaults to 0
- noise_std: The standard deviation of the noise which will be added to
the incoming data, defaults to 1
Example config:
config = dict(
noise_levels = [0.1, 0.25, 0.5, 0.9]
noise_mean = 0.2
noise_sd = 0.5
)
"""
super().setup_experiment(config)
noise_levels = config.get(
"noise_levels", [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
)
assert all(
0 < noise <= 1 for noise in noise_levels
), "Noise levels must be between (0, 1]"
noise_mean = config.get("noise_mean", 0)
noise_std = config.get("noise_std", 1.0)
default_evaluate_model_func = config.get("evaluate_model_func", evaluate_model)
self.evaluate_model = partial(
evaluate_model_with_noise,
evaluate_model_func=default_evaluate_model_func,
noise_levels=noise_levels,
noise_mean=noise_mean,
noise_std=noise_std,
)
@classmethod
def get_execution_order(cls):
eo = super().get_execution_order()
eo["setup_experiment"].append("NoiseRobustnessTest: Initialize")
return eo
def evaluate_model_with_noise(
model,
loader,
device,
noise_levels=None,
noise_mean=0,
noise_std=1,
evaluate_model_func=None,
**kwargs,
):
"""
Executes the function evaluate_model_func with varying levels of noise.
For each noise level provided in noise_levels, evaluate_model_func is called with a
modified data transform which includes AddGaussianNoise for that noise level.
"""
if noise_levels is None:
noise_levels = []
noise_results = {}
dataset_transform = loader.dataset.transform
zero_noise_results = evaluate_model_func(model, loader, device, **kwargs)
for noise_level in noise_levels:
loader.dataset.transform = transforms.Compose(
[dataset_transform, AddGaussianNoise(noise_level, noise_mean, noise_std)]
)
noise_results[noise_level] = evaluate_model_func(
model, loader, device, **kwargs
)
# Completed testing with noise; reset data transform to its original setting
# (i.e., zero noise)
loader.dataset.transform = dataset_transform
all_results = {
key + "_" + str(noise_level) + "_noise": results[key]
for noise_level, results in noise_results.items()
for key in results
}
all_results.update(zero_noise_results)
return all_results
class AddGaussianNoise:
def __init__(self, noise_level, mean, std):
self.mean = mean
self.std = std
self.noise_level = noise_level
def __call__(self, tensor):
noise_indices = torch.bernoulli(torch.ones_like(tensor) * self.noise_level)
noise = torch.normal(self.mean, self.std, tensor.shape) * noise_indices
return tensor + noise
def __repr__(self):
return self.__class__.__name__ + f"(mean={self.mean}, std={self.std})"
|
mrcslws/nupic.research
|
src/nupic/research/frameworks/vernon/mixins/noise_robustness_test.py
|
Python
|
agpl-3.0
| 5,346
|
[
"Gaussian"
] |
8543e7d2935788b7093691e5ea8d720a7109feaecf9a5eb387c415db51e2d51d
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from tangent import quoting
from tangent import transformers
def test_insert():
def f(x):
y = x
return y
node = quoting.parse_function(f)
class Prepend(transformers.TreeTransformer):
def visit_Assign(self, node):
# If the target is y, then prepend this statement
# NOTE Without this test, we'd have an infinite loop
if node.targets[0].id == 'y':
statement = quoting.quote("x = 2 * x")
self.prepend(statement)
return node
Prepend().visit(node)
assert quoting.unquote(node).split('\n')[1].strip() == "x = 2 * x"
def test_insert_block():
def f(x):
while True:
y = x
z = y
return y
node = quoting.parse_function(f)
class PrependBlock(transformers.TreeTransformer):
def visit_Assign(self, node):
# If the target is y, then prepend this statement
# NOTE Without this test, we'd have an infinite loop
if node.targets[0].id == 'z':
statement = quoting.quote("x = 2 * x")
self.prepend_block(statement)
return node
PrependBlock().visit(node)
assert quoting.unquote(node).split('\n')[2].strip() == "x = 2 * x"
def test_insert_top():
def f(x):
while True:
y = x
z = y
return y
node = quoting.parse_function(f)
class InsertTop(transformers.TreeTransformer):
def visit_Assign(self, node):
# If the target is y, then prepend this statement
# NOTE Without this test, we'd have an infinite loop
if node.targets[0].id == 'z':
statement = quoting.quote("x = 2 * x")
self.insert_top(statement)
return node
InsertTop().visit(node)
assert quoting.unquote(node).split('\n')[1].strip() == "x = 2 * x"
def test_remove():
def f(x):
while True:
y = x
z = y
return y
node = quoting.parse_function(f)
class InsertTop(transformers.TreeTransformer):
def visit_Assign(self, node):
# If the target is y, then prepend this statement
# NOTE Without this test, we'd have an infinite loop
if node.targets[0].id == 'z':
self.remove(node)
return node
InsertTop().visit(node)
assert quoting.unquote(node).split('\n')[3].strip() == "return y"
if __name__ == '__main__':
assert not pytest.main([__file__])
|
google/tangent
|
tests/test_transformers.py
|
Python
|
apache-2.0
| 2,880
|
[
"VisIt"
] |
aff7cd6ac5edfbc8a9fe2b926f81b1ed4e57c6b9302271f5a7374e7c23cc4d07
|
"""
Acceptance tests for the certificate web view feature.
"""
from ..helpers import UniqueCourseTest, EventsTestMixin
from nose.plugins.attrib import attr
from ...fixtures.course import CourseFixture
from ...fixtures.certificates import CertificateConfigFixture
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.certificate_page import CertificatePage
@attr('shard_5')
class CertificateWebViewTest(EventsTestMixin, UniqueCourseTest):
"""
Tests for verifying certificate web view features
"""
def setUp(self):
super(CertificateWebViewTest, self).setUp()
# set same course number as we have in fixture json
self.course_info['number'] = "335535897951379478207964576572017930000"
test_certificate_config = {
'id': 1,
'name': 'Certificate name',
'description': 'Certificate description',
'course_title': 'Course title override',
'signatories': [],
'version': 1,
'is_active': True
}
course_settings = {'certificates': test_certificate_config}
self.course_fixture = CourseFixture(
self.course_info["org"],
self.course_info["number"],
self.course_info["run"],
self.course_info["display_name"],
settings=course_settings
)
self.course_fixture.install()
self.user_id = "99" # we have createad a user with this id in fixture
self.cert_fixture = CertificateConfigFixture(self.course_id, test_certificate_config)
# Load certificate web view page for use by the tests
self.certificate_page = CertificatePage(self.browser, self.user_id, self.course_id)
def log_in_as_unique_user(self):
"""
Log in as a valid lms user.
"""
AutoAuthPage(
self.browser,
username="testcert",
email="cert@example.com",
password="testuser",
course_id=self.course_id
).visit()
def test_page_has_accomplishments_banner(self):
"""
Scenario: User accomplishment banner should be present if logged in user is the one who is awarded
the certificate
Given there is a course with certificate configuration
And I have passed the course and certificate is generated
When I view the certificate web view page
Then I should see the accomplishment banner
And When I click on `Add to Profile` button `edx.certificate.shared` event should be emitted
"""
self.cert_fixture.install()
self.log_in_as_unique_user()
self.certificate_page.visit()
self.assertTrue(self.certificate_page.accomplishment_banner.visible)
self.assertTrue(self.certificate_page.add_to_linkedin_profile_button.visible)
self.certificate_page.add_to_linkedin_profile_button.click()
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.certificate.shared'},
number_of_matches=1
)
expected_events = [
{
'event': {
'user_id': self.user_id,
'course_id': self.course_id
}
}
]
self.assert_events_match(expected_events, actual_events)
|
kamalx/edx-platform
|
common/test/acceptance/tests/lms/test_certificate_web_view.py
|
Python
|
agpl-3.0
| 3,336
|
[
"VisIt"
] |
dc3d6a405197104c5d6d937bc5b2e62c118218bd72b07c79e1bc1b796c7811f0
|
# -*- coding: utf-8-*-
import re
import facebook
WORDS = ["FACEBOOK", "NOTIFICATION"]
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text, with a summary of
the user's Facebook notifications, including a count and details
related to each individual notification.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
oauth_access_token = profile['keys']['FB_TOKEN']
graph = facebook.GraphAPI(oauth_access_token)
try:
results = graph.request("me/notifications")
except facebook.GraphAPIError:
mic.say("I have not been authorized to query your Facebook. If you " +
"would like to check your notifications in the future, " +
"please visit the Jasper dashboard.")
return
except:
mic.say(
"I apologize, there's a problem with that service at the moment.")
if not len(results['data']):
mic.say("You have no Facebook notifications. ")
return
updates = []
for notification in results['data']:
updates.append(notification['title'])
count = len(results['data'])
mic.say("You have " + str(count) +
" Facebook notifications. " + " ".join(updates) + ". ")
return
def is_valid(text):
"""
Returns True if the input is related to Facebook notifications.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'\bnotification|Facebook\b', text, re.IGNORECASE))
|
benhoff/plugins
|
listenerplugins/Notifications.py
|
Python
|
gpl-3.0
| 1,744
|
[
"VisIt"
] |
ae9ac9aca773144533435c25422eb8b82fcf285f9a16e46f4f96765f08549ab7
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
class FileFormatError(Exception):
"""Raised when a file cannot be parsed."""
pass
class RecordError(FileFormatError):
"""Raised when a record is bad."""
pass
class FieldError(RecordError):
"""Raised when a field within a record is bad."""
pass
class UnrecognizedFormatError(FileFormatError):
"""Raised when a file's format is unknown, ambiguous, or unidentifiable."""
pass
class ClustalFormatError(FileFormatError):
"""Raised when a ``clustal`` formatted file cannot be parsed."""
pass
class FASTAFormatError(FileFormatError):
"""Raised when a ``fasta`` formatted file cannot be parsed."""
pass
class LSMatFormatError(FileFormatError):
"""Raised when a ``lsmat`` formatted file cannot be parsed."""
pass
class OrdinationFormatError(FileFormatError):
"""Raised when an ``ordination`` formatted file cannot be parsed."""
pass
class NewickFormatError(FileFormatError):
"""Raised when a ``newick`` formatted file cannot be parsed."""
pass
class FASTQFormatError(FileFormatError):
"""Raised when a ``fastq`` formatted file cannot be parsed."""
pass
class PhylipFormatError(FileFormatError):
"""Raised when a ``phylip`` formatted file cannot be parsed.
May also be raised when an object (e.g., ``Alignment``) cannot be written
in ``phylip`` format.
"""
pass
class QSeqFormatError(FileFormatError):
"""Raised when a ``qseq`` formatted file cannot be parsed."""
pass
class InvalidRegistrationError(Exception):
"""Raised if function doesn't meet the expected API of its registration."""
pass
class DuplicateRegistrationError(Exception):
"""Raised when a function is already registered in skbio.io"""
def __init__(self, name=None, fmt=None, cls=None, msg=None):
super(DuplicateRegistrationError, self).__init__()
if msg:
self.args = (msg,)
else:
if hasattr(cls, '__name__'):
classname = cls.__name__
else:
classname = 'generator'
self.args = ("'%s' already has a %s for %s."
% (fmt, name, classname),)
|
jensreeder/scikit-bio
|
skbio/io/_exception.py
|
Python
|
bsd-3-clause
| 2,595
|
[
"scikit-bio"
] |
68dfc3958a70b5d38d4f91f57e82bb24e6e8aa9a3f9ca52101ee92847c348fd0
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import unittest as ut
import espressomd
import numpy as np
import itertools
import collections
class RandomPairTest(ut.TestCase):
"""This test creates a system of random particles.
Then the interaction paris for a certain cutoff
are calculated by brute force in python (pairs_n2),
and compared to the pairs returned by the cell
systems, which should be identical. This check is
repeated for all valid combination of periodicities.
"""
system = espressomd.System(box_l=3 * [10.])
def setUp(self):
s = self.system
s.time_step = .1
s.cell_system.skin = 0.0
s.min_global_cut = 1.5
n_part = 500
np.random.seed(2)
s.part.add(pos=s.box_l * np.random.random((n_part, 3)))
self.all_pairs = []
dist_func = self.system.distance
for pair in self.system.part.pairs():
if dist_func(pair[0], pair[1]) < 1.5:
self.all_pairs.append((pair[0].id, pair[1].id))
self.all_pairs = set(self.all_pairs)
self.assertTrue(len(self.all_pairs))
def tearDown(self):
self.system.part.clear()
def pairs_n2(self, dist):
# Go through list of all possible pairs for full periodicy
# and skip those that ar not within the desired distance
# for the current periodicity
pairs = []
parts = self.system.part
for p in self.all_pairs:
if self.system.distance(parts[p[0]], parts[p[1]]) <= dist:
pairs.append(p)
return set(pairs)
def check_duplicates(self, l):
for e in collections.Counter(l).values():
self.assertEqual(e, 1)
def check_pairs(self, n2_pairs):
cs_pairs = self.system.cell_system.get_pairs_(1.5)
self.check_duplicates(cs_pairs)
self.assertTrue(len(cs_pairs))
self.assertEqual(n2_pairs ^ set(cs_pairs), set())
def check_dd(self, n2_pairs):
self.system.cell_system.set_domain_decomposition()
self.check_pairs(n2_pairs)
def check_layered(self, n2_pairs):
self.system.cell_system.set_layered()
self.check_pairs(n2_pairs)
def check_n_squared(self, n2_pairs):
self.system.cell_system.set_n_square()
self.check_pairs(n2_pairs)
def test(self):
if espressomd.has_features("PARTIAL_PERIODIC"):
periods = [0, 1]
else:
periods = [1]
for periodicity in itertools.product(periods, periods, periods):
self.system.periodicity = periodicity
n2_pairs = self.pairs_n2(1.5)
self.check_dd(n2_pairs)
self.check_layered(n2_pairs)
self.check_n_squared(n2_pairs)
if __name__ == '__main__':
print("Features: ", espressomd.features())
ut.main()
|
hmenke/espresso
|
testsuite/python/random_pairs.py
|
Python
|
gpl-3.0
| 3,581
|
[
"ESPResSo"
] |
0536b1e286306b172f1e14f5eacdb276f92a25e887b4a00117dfacb5291a7874
|
from __future__ import print_function, absolute_import
from pymatgen.core.structure import Molecule
from monty.json import MSONable
import re
import os
from monty.itertools import chunks
from monty.io import reverse_readline
__author__ = 'Xin Chen, chenxin13@mails.tsinghua.edu.cn'
def is_numeric(s):
"""
Return True is the string ``s`` is a numeric string.
Parameters
----------
s : str
A string.
Returns
-------
res : bool
If True, ``s`` is a numeric string and can be converted to an int or a
float. Otherwise False will be returned.
"""
try:
float(s)
except ValueError:
return False
else:
return True
def iterlines(s):
"""
A generator form of s.split('\n') for reducing memory overhead.
Parameters
----------
s : str
A multi-line string.
Yields
------
line : str
A string.
"""
prevnl = -1
while True:
nextnl = s.find('\n', prevnl + 1)
if nextnl < 0:
yield s[(prevnl+1):]
break
else:
yield s[(prevnl+1):nextnl]
prevnl = nextnl
class AdfInputError(Exception):
"""
The default error class for ADF.
"""
pass
class AdfOutputError(Exception):
"""
The default error class for errors raised by ``AdfOutput``.
"""
pass
class AdfKey(MSONable):
"""
The basic input unit for ADF. A key is a string of characters that does not
contain a delimiter (blank, comma or equal sign). A key may have multiple
subkeys and a set of options.
"""
block_keys = {"SCF", "GEOMETRY", "XC", "UNITS", "ATOMS", "CHARGE", "BASIS",
"SYMMETRY", "RELATIVISTIC", "OCCUPATIONS", "SAVE", "A1FIT",
"INTEGRATION", "UNRESTRICTED", "ZLMFIT", "TITLE",
"EXACTDENSITY", "TOTALENERGY", "ANALYTICALFREQ"}
sub_keys = {"AtomDepQuality"}
# Full blocks are blocks that must have an 'END'.
_full_blocks = {"GEOMETRY", "SCF", "UNITS", "BASIS", "ANALYTICALFREQ"}
def __init__(self, name, options=None, subkeys=None):
"""
Initialization method.
Parameters
----------
name : str
The name of this key.
options : Sized
The options for this key. Each element can be a primitive object or
a tuple/list with two elements: the first is the name and the second
is a primitive object.
subkeys : Sized
The subkeys for this key.
Raises
------
ValueError
If elements in ``subkeys`` are not ``AdfKey`` objects.
"""
self.name = name
self.options = options if options is not None else []
self.subkeys = subkeys if subkeys is not None else []
if len(self.subkeys) > 0:
for k in subkeys:
if not isinstance(k, AdfKey):
raise ValueError("Not all subkeys are ``AdfKey`` objects!")
self._sized_op = None
if len(self.options) > 0:
self._sized_op = isinstance(self.options[0], (list, tuple))
def _options_string(self):
"""
Return the option string.
"""
if len(self.options) > 0:
s = ""
for op in self.options:
if self._sized_op:
s += "{:s}={:s} ".format(*map(str, op))
else:
s += "{:s} ".format(str(op))
return s.strip()
else:
return ""
def is_block_key(self):
"""
Return True if this key is a block key.
"""
return bool(self.name.upper() in self.block_keys)
@property
def key(self):
"""
Return the name of this key. If this is a block key, the name will be
converted to upper cases.
"""
if self.is_block_key():
return self.name.upper()
else:
return self.name
def __str__(self):
"""
Return the string representation of this ``AdfKey``.
Notes
-----
If this key is 'Atoms' and the coordinates are in Cartesian form, a
different string format will be used.
"""
s = "{:s}".format(self.key)
if len(self.options) > 0:
s += " {:s}".format(self._options_string())
s += "\n"
if len(self.subkeys) > 0:
if self.key.lower() == 'atoms':
for subkey in self.subkeys:
s += "{:2s} {: 14.8f} {: 14.8f} {: 14.8f}\n".format(
subkey.name, *subkey.options)
else:
for subkey in self.subkeys:
s += str(subkey)
if self.is_block_key():
s += "END\n"
else:
s += "subend\n"
elif self.key.upper() in self._full_blocks:
s += "END\n"
return s
def __eq__(self, other):
if not isinstance(other, AdfKey):
return False
else:
return str(self) == str(other)
def has_subkey(self, subkey):
"""
Return True if this AdfKey contains the given subkey.
Parameters
----------
subkey : str or AdfKey
A key name or an AdfKey object.
Returns
-------
has : bool
True if this key contains the given key. Otherwise False.
"""
if isinstance(subkey, str):
key = subkey
elif isinstance(subkey, AdfKey):
key = subkey.key
else:
raise ValueError("The subkey should be an AdfKey or a string!")
if len(self.subkeys) > 0:
if key in map(lambda k: k.key, self.subkeys):
return True
return False
def add_subkey(self, subkey):
"""
Add a new subkey to this key.
Parameters
----------
subkey : AdfKey
A new subkey.
Notes
-----
Duplicate check will not be performed if this is an 'Atoms' block.
"""
if self.key.lower() == 'atoms' or not self.has_subkey(subkey):
self.subkeys.append(subkey)
def remove_subkey(self, subkey):
"""
Remove the given subkey, if existed, from this AdfKey.
Parameters
----------
subkey : str or AdfKey
The subkey to remove.
"""
if len(self.subkeys) > 0:
key = subkey if isinstance(subkey, str) else subkey.key
for i in range(len(self.subkeys)):
if self.subkeys[i].key == key:
self.subkeys.pop(i)
break
def add_option(self, option):
"""
Add a new option to this key.
Parameters
----------
option : Sized or str or int or float
A new option to add. This must have the same format with exsiting
options.
Raises
------
TypeError
If the format of the given ``option`` is different.
"""
if len(self.options) == 0:
self.options.append(option)
else:
sized_op = isinstance(option, (list, tuple))
if self._sized_op != sized_op:
raise TypeError("Option type is mismatched!")
self.options.append(option)
def remove_option(self, option):
"""
Remove an option.
Parameters
----------
option : str or int
The name (str) or index (int) of the option to remove.
Raises
------
TypeError
If the option has a wrong type.
"""
if len(self.options) > 0:
if self._sized_op:
if not isinstance(option, str):
raise TypeError("``option`` should be a name string!")
for i in range(len(self.options)):
if self.options[i][0] == option:
self.options.pop(i)
break
else:
if not isinstance(option, int):
raise TypeError("``option`` should be an integer index!")
self.options.pop(option)
def has_option(self, option):
"""
Return True if the option is included in this key.
Parameters
----------
option : str
The option.
Returns
-------
has : bool
True if the option can be found. Otherwise False will be returned.
"""
if len(self.options) == 0:
return False
for op in self.options:
if (self._sized_op and op[0] == option) or (op == option):
return True
return False
def as_dict(self):
"""
A JSON serializable dict representation of self.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"name": self.name, "options": self.options}
if len(self.subkeys) > 0:
subkeys = []
for subkey in self.subkeys:
subkeys.append(subkey.as_dict())
d.update({"subkeys": subkeys})
return d
def to_json(self):
"""
Return a json string representation of the MSONable AdfKey object.
"""
return super(AdfKey, self).to_json()
@classmethod
def from_dict(cls, d):
"""
Construct a MSONable AdfKey object from the JSON dict.
Parameters
----------
d : dict
A dict of saved attributes.
Returns
-------
adfkey : AdfKey
An AdfKey object recovered from the JSON dict ``d``.
"""
key = d.get("name")
options = d.get("options", None)
subkey_list = d.get("subkeys", [])
if len(subkey_list) > 0:
subkeys = list(map(lambda k: AdfKey.from_dict(k), subkey_list))
else:
subkeys = None
return cls(key, options, subkeys)
@staticmethod
def from_string(string):
"""
Construct an AdfKey object from the string.
Parameters
----------
string : str
A string.
Returns
-------
adfkey : AdfKey
An AdfKey object recovered from the string.
Raises
------
ValueError
Currently nested subkeys are not supported. If ``subend`` was found
a ValueError would be raised.
Notes
-----
Only the first block key will be returned.
"""
def is_float(s):
if '.' in s or 'E' in s or 'e' in s:
return True
else:
return False
if string.find("\n") == -1:
el = string.split()
if len(el) > 1:
if string.find("=") != -1:
options = list(map(lambda s: s.split("="), el[1:]))
else:
options = el[1:]
for i, op in enumerate(options):
if isinstance(op, list) and is_numeric(op[1]):
op[1] = float(op[1]) if is_float(op[1]) else int(op[1])
elif is_numeric(op):
options[i] = float(op) if is_float(op) else int(op)
else:
options = None
return AdfKey(el[0], options)
if string.find('subend') != -1:
raise ValueError("Nested subkeys are not supported!")
key = None
for line in iterlines(string):
if line == "":
continue
el = line.strip().split()
if len(el) == 0:
continue
if el[0].upper() in AdfKey.block_keys:
if key is None:
key = AdfKey.from_string(line)
else:
return key
elif el[0].upper() == 'END':
return key
elif key is not None:
key.add_subkey(AdfKey.from_string(line))
else:
raise Exception("IncompleteKey: 'END' is missing!")
class AdfTask(MSONable):
"""
Basic task for ADF. All settings in this class are independent of molecules.
Notes
-----
Unlike other quantum chemistry packages (NWChem, Gaussian, ...), ADF does
not support calculating force/gradient.
"""
operations = {"energy": "Evaluate the single point energy.",
"optimize": "Minimize the energy by varying the molecular "
"structure.",
"frequencies": "Compute second derivatives and print out an "
"analysis of molecular vibrations.",
"freq": "Same as frequencies.",
"numerical_frequencies": "Compute molecular frequencies using"
" numerical method."}
def __init__(self, operation="energy", basis_set=None, xc=None,
title="ADF_RUN", units=None, geo_subkeys=None, scf=None,
other_directives=None):
"""
Initialization method.
Parameters
----------
operation : str
The target operation.
basis_set : AdfKey
The basis set definitions for this task. Defaults to 'DZ/Large'.
xc : AdfKey
The exchange-correlation functionals. Defaults to PBE.
title : str
The title of this ADF task.
units : AdfKey
The units. Defaults to Angstroms/Degree.
geo_subkeys : Sized
The subkeys for the block key 'GEOMETRY'.
scf : AdfKey
The scf options.
other_directives : Sized
User-defined directives.
"""
if operation not in self.operations.keys():
raise AdfInputError("Invalid ADF task {:s}".format(operation))
self.operation = operation
self.title = title
self.basis_set = basis_set if basis_set is not None else \
self.get_default_basis_set()
self.xc = xc if xc is not None else self.get_default_xc()
self.units = units if units is not None else self.get_default_units()
self.scf = scf if scf is not None else self.get_default_scf()
self.other_directives = other_directives \
if other_directives is not None else []
self._setup_task(geo_subkeys)
@staticmethod
def get_default_basis_set():
return AdfKey.from_string("Basis\ntype DZ\ncore small\nEND")
@staticmethod
def get_default_scf():
return AdfKey.from_string("SCF\niterations 300\nEND")
@staticmethod
def get_default_geo():
return AdfKey.from_string("GEOMETRY SinglePoint\nEND")
@staticmethod
def get_default_xc():
return AdfKey.from_string("XC\nGGA PBE\nEND")
@staticmethod
def get_default_units():
return AdfKey.from_string("Units\nlength angstrom\nangle degree\nEnd")
def _setup_task(self, geo_subkeys):
"""
Setup the block 'Geometry' given subkeys and the task.
Parameters
----------
geo_subkeys : Sized
User-defined subkeys for the block 'Geometry'.
Notes
-----
Most of the run types of ADF are specified in the Geometry block except
the 'AnalyticFreq'.
"""
self.geo = AdfKey("Geometry", subkeys=geo_subkeys)
if self.operation.lower() == "energy":
self.geo.add_option("SinglePoint")
if self.geo.has_subkey("Frequencies"):
self.geo.remove_subkey("Frequencies")
elif self.operation.lower() == "optimize":
self.geo.add_option("GeometryOptimization")
if self.geo.has_subkey("Frequencies"):
self.geo.remove_subkey("Frequencies")
elif self.operation.lower() == "numerical_frequencies":
self.geo.add_subkey(AdfKey("Frequencies"))
else:
self.other_directives.append(AdfKey("AnalyticalFreq"))
if self.geo.has_subkey("Frequencies"):
self.geo.remove_subkey("Frequencies")
def __str__(self):
s = """TITLE {title}\n
{units}
{xc}
{basis_set}
{scf}
{geo}""".format(
title=self.title, units=str(self.units), xc=str(self.xc),
basis_set=str(self.basis_set), scf=str(self.scf), geo=str(self.geo)
)
s += "\n"
for block_key in self.other_directives:
if not isinstance(block_key, AdfKey):
raise ValueError("{} is not an AdfKey!".format(str(block_key)))
s += str(block_key) + "\n"
return s
def as_dict(self):
"""
A JSON serializable dict representation of self.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"operation": self.operation, "title": self.title,
"xc": self.xc.as_dict(), "basis_set": self.basis_set.as_dict(),
"units": self.units.as_dict(), "scf": self.scf.as_dict(),
"geo": self.geo.as_dict(),
"others": [k.as_dict() for k in self.other_directives]}
def to_json(self):
"""
Return a json string representation of the MSONable AdfTask object.
"""
return super(AdfTask, self).to_json()
@classmethod
def from_dict(cls, d):
"""
Construct a MSONable AdfTask object from the JSON dict.
Parameters
----------
d : dict
A dict of saved attributes.
Returns
-------
task : AdfTask
An AdfTask object recovered from the JSON dict ``d``.
"""
def _from_dict(_d):
return AdfKey.from_dict(_d) if _d is not None else None
operation = d.get("operation")
title = d.get("title")
basis_set = _from_dict(d.get("basis_set"))
xc = _from_dict(d.get("xc"))
units = _from_dict(d.get("units"))
scf = _from_dict(d.get("scf"))
others = [AdfKey.from_dict(o) for o in d.get("others", [])]
geo = _from_dict(d.get("geo"))
return cls(operation, basis_set, xc, title, units, geo.subkeys, scf,
others)
class AdfInput(object):
"""
A basic ADF input file writer.
"""
def __init__(self, task):
"""
Initialization method.
Parameters
----------
task : AdfTask
An ADF task.
"""
self.task = task
def write_file(self, molecule, inpfile):
"""
Write an ADF input file.
Parameters
----------
molecule : Molecule
The molecule for this task.
inpfile : str
The name where the input file will be saved.
"""
mol_blocks = []
atom_block = AdfKey("Atoms", options=["cartesian"])
for site in molecule:
atom_block.add_subkey(AdfKey(str(site.specie), list(site.coords)))
mol_blocks.append(atom_block)
if molecule.charge != 0:
netq = molecule.charge
ab = molecule.spin_multiplicity - 1
charge_block = AdfKey("Charge", [netq, ab])
mol_blocks.append(charge_block)
if ab != 0:
unres_block = AdfKey("Unrestricted")
mol_blocks.append(unres_block)
with open(inpfile, "w+") as f:
for block in mol_blocks:
f.write(str(block) + "\n")
f.write(str(self.task) + "\n")
f.write("END INPUT")
class AdfOutput(object):
"""
A basic ADF output file parser.
Attributes
----------
is_failed : bool
True is the ADF job is terminated without success. Otherwise False.
is_internal_crash : bool
True if the job is terminated with internal crash. Please read 'TAPE13'
of the ADF manual for more detail.
error : str
The error description.
run_type : str
The RunType of this ADF job. Possible options are: 'SinglePoint',
'GeometryOptimization', 'AnalyticalFreq' and 'NUmericalFreq'.
final_energy : float
The final molecule energy (a.u).
final_structure : GMolecule
The final structure of the molecule.
energies : Sized
The energy of each cycle.
structures : Sized
The structure of each cycle If geometry optimization is performed.
frequencies : array_like
The frequencies of the molecule.
normal_modes : array_like
The normal modes of the molecule.
freq_type : str
Either 'Analytical' or 'Numerical'.
"""
def __init__(self, filename):
"""
Initialization method.
Parameters
----------
filename : str
The ADF output file to parse.
"""
self.filename = filename
self._parse()
def _parse(self):
"""
Parse the ADF outputs. There are two files: one is 'logfile', the other
is the ADF output file. The final energy and structures are parsed from
the 'logfile'. Frequencies and normal modes are parsed from the ADF
output file.
"""
workdir = os.path.dirname(self.filename)
logfile = os.path.join(workdir, "logfile")
if not os.path.isfile(logfile):
raise IOError("The ADF logfile can not be accessed!")
self.is_failed = False
self.error = None
self.final_energy = None
self.final_structure = None
self.energies = []
self.structures = []
self.frequencies = []
self.normal_modes = None
self.freq_type = None
self.run_type = None
self.is_internal_crash = False
self._parse_logfile(logfile)
if not self.is_failed and self.run_type != "SinglePoint":
self._parse_adf_output()
@staticmethod
def _sites_to_mol(sites):
"""
Return a ``Molecule`` object given a list of sites.
Parameters
----------
sites : list
A list of sites.
Returns
-------
mol : Molecule
A ``Molecule`` object.
"""
return Molecule([site[0] for site in sites],
[site[1] for site in sites])
def _parse_logfile(self, logfile):
"""
Parse the formatted logfile.
"""
cycle_patt = re.compile(r"Coordinates\sin\sGeometry\sCycle\s(\d+)")
coord_patt = re.compile(r"\s+([0-9]+)\.([A-Za-z]+)"+3*r"\s+([-\.0-9]+)")
energy_patt = re.compile(r"<.*>\s<.*>\s+current\senergy\s+([-\.0-9]+)\s"
"Hartree")
final_energy_patt = re.compile(
r"<.*>\s<.*>\s+Bond\sEnergy\s+([-\.0-9]+)\sa\.u\.")
error_patt = re.compile(r"<.*>\s<.*>\s+ERROR\sDETECTED:\s(.*)")
runtype_patt = re.compile(r"<.*>\s<.*>\s+RunType\s+:\s(.*)")
end_patt = re.compile(r"<.*>\s<.*>\s+END")
parse_cycle = False
sites = []
last_cycle = -1
parse_final = False
# Stop parsing the logfile is this job is not terminated successfully.
# The last non-empty line of the logfile must match the end pattern.
# Otherwise the job has some internal failure. The TAPE13 part of the
# ADF manual has a detailed explanantion.
with open(logfile, "r") as f:
for line in reverse_readline(f):
if line == "":
continue
if end_patt.search(line) is None:
self.is_internal_crash = True
self.error = "Internal crash. TAPE13 is generated!"
self.is_failed = True
return
else:
break
with open(logfile, "r") as f:
for line in f:
m = error_patt.search(line)
if m:
self.is_failed = True
self.error = m.group(1)
break
if self.run_type is None:
m = runtype_patt.search(line)
if m:
if m.group(1) == 'FREQUENCIES':
self.freq_type = "Numerical"
self.run_type = "NumericalFreq"
elif m.group(1) == 'GEOMETRY OPTIMIZATION':
self.run_type = "GeometryOptimization"
elif m.group(1) == 'CREATE':
self.run_type = None
elif m.group(1) == 'SINGLE POINT':
self.run_type = 'SinglePoint'
else:
raise AdfOutputError("Undefined Runtype!")
elif self.run_type == 'SinglePoint':
m = coord_patt.search(line)
if m:
sites.append([m.groups()[0],
list(map(float, m.groups()[2:]))])
else:
m = final_energy_patt.search(line)
if m:
self.final_energy = float(m.group(1))
self.final_structure = self._sites_to_mol(sites)
elif self.run_type == 'GeometryOptimization':
m = cycle_patt.search(line)
if m:
cycle = int(m.group(1))
if cycle <= 0:
raise AdfOutputError("Wrong cycle {}".format(cycle))
if cycle > last_cycle:
parse_cycle = True
last_cycle = cycle
else:
parse_final = True
elif parse_cycle:
m = coord_patt.search(line)
if m:
sites.append([m.groups()[1],
list(map(float, m.groups()[2:]))])
else:
m = energy_patt.search(line)
if m:
self.energies.append(float(m.group(1)))
mol = self._sites_to_mol(sites)
self.structures.append(mol)
parse_cycle = False
sites = []
elif parse_final:
m = final_energy_patt.search(line)
if m:
self.final_energy = float(m.group(1))
elif self.run_type == "NumericalFreq":
break
if not self.is_failed:
if self.run_type == "GeometryOptimization":
if len(self.structures) > 0:
self.final_structure = self.structures[-1]
if self.final_energy is None:
raise AdfOutputError("The final energy can not be read!")
elif self.run_type == "SinglePoint":
if self.final_structure is None:
raise AdfOutputError("The final structure is missing!")
if self.final_energy is None:
raise AdfOutputError("The final energy can not be read!")
def _parse_adf_output(self):
"""
Parse the standard ADF output file.
"""
numerical_freq_patt = re.compile(
r"\s+\*\s+F\sR\sE\sQ\sU\sE\sN\sC\sI\sE\sS\s+\*")
analytic_freq_patt = re.compile(
r"\s+\*\s+F\sR\sE\sQ\sU\sE\sN\sC\sY\s+A\sN\sA\sL\sY\sS\sI\sS\s+\*")
freq_on_patt = re.compile(r"Vibrations\sand\sNormal\sModes\s+\*+.*\*+")
freq_off_patt = re.compile(r"List\sof\sAll\sFrequencies:")
mode_patt = re.compile(r"\s+(\d+)\.([A-Za-z]+)\s+(.*)")
coord_patt = re.compile(r"\s+(\d+)\s+([A-Za-z]+)" + 6 * r"\s+([0-9\.-]+)")
coord_on_patt = re.compile(r"\s+\*\s+R\sU\sN\s+T\sY\sP\sE\s:\sFREQUENCIES\s+\*")
parse_freq = False
parse_mode = False
nnext = 0
nstrike = 0
sites = []
self.frequencies = []
self.normal_modes = []
if self.final_structure is None:
find_structure = True
parse_coord = False
natoms = 0
else:
find_structure = False
parse_coord = False
natoms = self.final_structure.num_sites
with open(self.filename, "r") as f:
for line in f:
if self.run_type == "NumericalFreq" and find_structure:
if not parse_coord:
m = coord_on_patt.search(line)
if m:
parse_coord = True
else:
m = coord_patt.search(line)
if m:
sites.append(
[m.group(2), list(map(float, m.groups()[2:5]))])
nstrike += 1
elif nstrike > 0:
find_structure = False
self.final_structure = self._sites_to_mol(sites)
natoms = self.final_structure.num_sites
elif self.freq_type is None:
if numerical_freq_patt.search(line):
self.freq_type = "Numerical"
elif analytic_freq_patt.search(line):
self.freq_type = "Analytical"
self.run_type = "AnalyticalFreq"
elif freq_on_patt.search(line):
parse_freq = True
elif parse_freq:
if freq_off_patt.search(line):
break
el = line.strip().split()
if 1 <= len(el) <= 3 and line.find(".") != -1:
nnext = len(el)
parse_mode = True
parse_freq = False
self.frequencies.extend(map(float, el))
for i in range(nnext):
self.normal_modes.append([])
elif parse_mode:
m = mode_patt.search(line)
if m:
v = list(chunks(map(float, m.group(3).split()), 3))
if len(v) != nnext:
raise AdfOutputError("Odd Error!")
for i, k in enumerate(range(-nnext, 0, 1)):
self.normal_modes[k].extend(v[i])
if int(m.group(1)) == natoms:
parse_freq = True
parse_mode = False
if isinstance(self.final_structure, list):
self.final_structure = self._sites_to_mol(self.final_structure)
if self.freq_type is not None:
if len(self.frequencies) != len(self.normal_modes):
raise AdfOutputError("The number of normal modes is wrong!")
if len(self.normal_modes[0]) != natoms * 3:
raise AdfOutputError("The dimensions of the modes are wrong!")
|
matk86/pymatgen
|
pymatgen/io/adf.py
|
Python
|
mit
| 31,523
|
[
"ADF",
"Gaussian",
"NWChem",
"pymatgen"
] |
81cd0be3eab6bfd6a9ff3fd09ae3015dd9e861000aeb9c02c66ec378c38ca273
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import re
from collections import defaultdict
from skbio._base import SkbioObject
from skbio.sequence import Protein, InvalidCodonError, GeneticCodeInitError
# py3k compatibility
try:
from string import maketrans
except ImportError:
maketrans = str.maketrans
_dna_trans = maketrans('TCAG', 'AGTC')
def _simple_rc(seq):
"""simple reverse-complement: works only on unambiguous uppercase DNA"""
return seq.translate(_dna_trans)[::-1]
class GeneticCode(SkbioObject):
"""Class to hold codon to amino acid mapping, and vice versa.
Parameters
----------
code_sequence : str
64-character string containing NCBI representation.
id : str, optional
identifier for the object.
name : str, optional
name for the object.
start_codon_sequence : str, optional
starting point for the codon sequence.
Returns
-------
GeneticCode
initialized ``GeneticCode`` object.
Raises
------
GeneticCodeInitError
If the length of `code_sequence` is different to `64`.
Examples
--------
>>> from skbio.sequence import GeneticCode
>>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSR'
... 'RVVVVAAAADDEEGGGG')
.. note:: `*` is used to denote termination as per the NCBI standard.
Although the genetic code objects convert DNA to RNA and vice versa,
lists of codons that they produce will be provided in DNA format.
"""
# class data: need the bases, the list of codons in UUU -> GGG order, and
# a mapping from positions in the list back to codons. These should be the
# same for all GeneticCode instances, and are immutable (therefore
# private).
_codons = [a + b + c for a in "TCAG" for b in "TCAG" for c in "TCAG"]
def __init__(self, code_sequence, id=None, name=None,
start_codon_sequence=None):
if len(code_sequence) != 64:
raise GeneticCodeInitError("code_sequence: %s has length %d, but "
"expected 64" % (code_sequence,
len(code_sequence)))
self.code_sequence = code_sequence
self.id = id
self.name = name
self.start_codon_sequence = start_codon_sequence
start_codons = {}
if start_codon_sequence is not None:
for codon, aa in zip(self._codons, start_codon_sequence):
if aa != '-':
start_codons[codon] = aa
self.start_codons = start_codons
codon_lookup = {key: value for (key, value) in zip(self._codons,
code_sequence)}
self.codons = codon_lookup
# create synonyms for each aa
aa_lookup = defaultdict(list)
for codon in self._codons:
aa = codon_lookup[codon]
aa_lookup[aa].append(codon)
self.synonyms = dict(aa_lookup)
sense_codons = codon_lookup.copy()
# create sense codons
stop_codons = self['*']
for c in stop_codons:
del sense_codons[c]
self.sense_codons = sense_codons
# create anticodons
ac = {}
for aa, codons in self.synonyms.items():
ac[aa] = [_simple_rc(element) for element in codons]
self.anticodons = ac
def _analyze_quartet(self, codons, aa):
"""Analyzes a quartet of codons and amino acids: returns list of lists.
Each list contains one block, splitting at purine/pyrimidine boundary
if necessary.
codons should be a list of 4 codons.
aa should be a list of 4 amino acid symbols.
Possible states:
- All amino acids are the same: returns list of one quartet.
- Two groups of 2 aa: returns list of two doublets.
- One group of 2 and 2 groups of 1: list of one doublet, 2 singles.
- 4 groups of 1: four singles.
Note: codon blocks like Ile in the standard code (AUU, AUC, AUA) will
be split when they cross the R/Y boundary, so [[AUU, AUC], [AUA]]. This
would also apply to a block like AUC AUA AUG -> [[AUC],[AUA,AUG]],
although this latter pattern is not observed in the standard code.
"""
if aa[0] == aa[1]:
first_doublet = True
else:
first_doublet = False
if aa[2] == aa[3]:
second_doublet = True
else:
second_doublet = False
if first_doublet and second_doublet and aa[1] == aa[2]:
return [codons]
else:
blocks = []
if first_doublet:
blocks.append(codons[:2])
else:
blocks.extend([[codons[0]], [codons[1]]])
if second_doublet:
blocks.append(codons[2:])
else:
blocks.extend([[codons[2]], [codons[3]]])
return blocks
def _get_blocks(self):
"""Returns list of lists of codon blocks in the genetic code.
A codon block can be:
- a quartet, if all 4 XYn codons have the same amino acid.
- a doublet, if XYt and XYc or XYa and XYg have the same aa.
- a singlet, otherwise.
Returns
-------
list
Returns a list of the quartets, doublets, and singlets in the order
UUU -> GGG.
Notes
-----
A doublet cannot span the purine/pyrimidine boundary, and a quartet
cannot span the boundary between two codon blocks whose first two bases
differ.
"""
if hasattr(self, '_blocks'):
return self._blocks
else:
blocks = []
curr_codons = []
curr_aa = []
for index, codon, aa in zip(range(64), self._codons,
self.code_sequence):
# we're in a new block if it's a new quartet or a different aa
new_quartet = not index % 4
if new_quartet and curr_codons:
blocks.extend(self._analyze_quartet(curr_codons, curr_aa))
curr_codons = []
curr_aa = []
curr_codons.append(codon)
curr_aa.append(aa)
# don't forget to append last block
if curr_codons:
blocks.extend(self._analyze_quartet(curr_codons, curr_aa))
self._blocks = blocks
return self._blocks
blocks = property(_get_blocks)
def __str__(self):
"""Returns code_sequence that constructs the GeneticCode
"""
return self.code_sequence
def __repr__(self):
"""Returns reconstructable representation of the GeneticCode
"""
return 'GeneticCode(%s)' % str(self)
def __eq__(self, other):
""" Allows two GeneticCode objects to be compared to each other.
Two GeneticCode objects are equal if they have equal code_sequences.
"""
if not isinstance(other, GeneticCode):
return False
return self.code_sequence == other.code_sequence
def __ne__(self, other):
"""Required in Py2."""
return not self == other
def __getitem__(self, item):
"""Returns amino acid corresponding to codon, or codons for an aa.
Returns [] for empty list of codons, 'X' for unknown amino acid.
"""
item = str(item)
if len(item) == 1: # amino acid
return self.synonyms.get(item, [])
elif len(item) == 3: # codon
key = item.upper()
key = key.replace('U', 'T')
return self.codons.get(key, 'X')
else:
raise InvalidCodonError("Codon or aa %s has wrong length" % item)
def translate(self, nucleotide_sequence, start=0):
"""Translate nucleotide to protein sequence
Parameters
----------
nucleotide_sequence : NucleotideSequence
sequence to be translated
start : int, optional
position to begin translation
Returns
-------
ProteinSequence
translation of nucleotide_sequence
Notes
-----
``translate`` returns the translation of the entire sequence, (i.e., of
``nucleotide_sequence[start:]``). It is the user's responsibility to
trim to an open reading frame, either from the input or using the
output, if that is desired.
See Also
--------
translate_six_frames
Examples
--------
>>> from skbio.sequence import GeneticCode
>>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
... 'RRVVVVAAAADDEEGGGG')
>>> sgc.translate('AUGCAUGACUUUUGA', 1)
Protein
-----------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has non-degenerates: True
-----------------------------
0 CMTF
"""
if len(nucleotide_sequence) == 0:
return Protein('')
if start + 1 > len(nucleotide_sequence):
raise ValueError("Translation starts after end of"
"NucleotideSequence")
translation = []
for i in range(start, len(nucleotide_sequence) - 2, 3):
translation.append(self[nucleotide_sequence[i:i + 3]])
translation = Protein(''.join(translation))
return translation
def get_stop_indices(self, nucleotide_sequence, start=0):
"""returns indexes for stop codons in the specified frame
Parameters
----------
nucleotide_sequence : str, NucleotideSequence
sequence to be scanned for stop codons
start : int, optional
position where the search begins.
Returns
-------
list
indices of the stop codons.
Examples
--------
>>> from skbio.sequence import GeneticCode, DNA
>>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
... 'RRVVVVAAAADDEEGGGG')
>>> seq = DNA('ATGCTAACATAAA')
>>> sgc.get_stop_indices(seq, 0)
[9]
"""
stops = self['*']
stop_pattern = '(%s)' % '|'.join(stops)
stop_pattern = re.compile(stop_pattern)
seq = str(nucleotide_sequence)
found = [hit.start() for hit in stop_pattern.finditer(seq)]
found = [index for index in found if index % 3 == start]
return found
def translate_six_frames(self, nucleotide_sequence):
"""Translate nucleotide to protein sequences for all six reading frames
Parameters
----------
nucleotide_sequence : NucleotideSequence
sequence to be translated
Returns
-------
list
the six translated ProteinSequence objects
See Also
--------
translate
Examples
--------
>>> from skbio.sequence import GeneticCode, RNA
>>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
... 'RRVVVVAAAADDEEGGGG')
>>> results = sgc.translate_six_frames(RNA('AUGCUAACAUAAA'))
>>> for e in results:
... print(e)
MLT*
C*HK
ANI
FMLA
LC*H
YVS
"""
rc_nucleotide_sequence = nucleotide_sequence.reverse_complement()
results = []
for start in range(3):
translation = self.translate(nucleotide_sequence, start)
results.append(translation)
for start in range(3):
translation = self.translate(rc_nucleotide_sequence, start)
results.append(translation)
return results
def is_start(self, codon):
"""Checks if codon is a start codon
Parameters
----------
codon : str
codon string
Returns
-------
bool
``True`` if codon is a start codon, ``False`` otherwise
Examples
--------
>>> from skbio.sequence import GeneticCode
>>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
... 'RRVVVVAAAADDEEGGGG')
>>> sgc.is_start('ATG')
False
>>> sgc.is_start('AAA')
False
"""
fixed_codon = codon.upper().replace('U', 'T')
return fixed_codon in self.start_codons
def is_stop(self, codon):
"""Checks if codon is a stop codon
Parameters
----------
codon : str
codon string
Returns
-------
bool
``True`` if codon is a stop codon, ``False`` otherwise
Examples
--------
>>> from skbio.sequence import GeneticCode
>>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
... 'RRVVVVAAAADDEEGGGG')
>>> sgc.is_stop('UAA')
True
>>> sgc.is_stop('AAA')
False
"""
return self[codon] == '*'
def changes(self, other):
"""Returns dictionary of codons that differ
Parameters
----------
other : GeneticCode
genetic code object
Returns
-------
dict
Returns a dictionary of the form ``{codon:'XY'}`` for codons that
differ. X is the string representation of the amino acid in the
object calling this method, Y is the string representation of the
amino acid in `other`. Always returns a 2-character string.
Examples
--------
>>> from skbio.sequence import GeneticCode
>>> from pprint import pprint
>>> sgc = GeneticCode('FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSS*'
... '*VVVVAAAADDEEGGGG')
>>> pprint(sgc.changes('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTT'
... 'TNNKKSSRRVVVVAAAADDEEGGGG'))
{'AGA': '*R', 'AGG': '*R', 'ATA': 'MI', 'TGA': 'W*'}
"""
changes = {}
try:
other_code = other.code_sequence
except AttributeError: # try using other directly as sequence
other_code = other
for codon, old, new in zip(self._codons, self.code_sequence,
other_code):
if old != new:
changes[codon] = old + new
return changes
_ncbi_genetic_code_data = [
[
'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
1,
'Standard Nuclear',
'---M---------------M---------------M----------------------------',
],
[
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSS**VVVVAAAADDEEGGGG',
2,
'Vertebrate Mitochondrial',
'--------------------------------MMMM---------------M------------',
],
[
'FFLLSSSSYY**CCWWTTTTPPPPHHQQRRRRIIMMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
3,
'Yeast Mitochondrial',
'----------------------------------MM----------------------------',
],
[
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
4,
'Mold, Protozoan, and Coelenterate Mitochondrial, and Mycoplasma/'
'Spiroplasma Nuclear',
'--MM---------------M------------MMMM---------------M------------',
],
[
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSSSVVVVAAAADDEEGGGG',
5,
'Invertebrate Mitochondrial',
'---M----------------------------MMMM---------------M------------',
],
[
'FFLLSSSSYYQQCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
6,
'Ciliate, Dasycladacean and Hexamita Nuclear',
'-----------------------------------M----------------------------',
],
[
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG',
9,
'Echinoderm and Flatworm Mitochondrial',
'-----------------------------------M---------------M------------',
],
[
'FFLLSSSSYY**CCCWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
10,
'Euplotid Nuclear',
'-----------------------------------M----------------------------',
],
[
'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
11,
'Bacterial Nuclear and Plant Plastid',
'---M---------------M------------MMMM---------------M------------',
],
[
'FFLLSSSSYY**CC*WLLLSPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
12,
'Alternative Yeast Nuclear',
'-------------------M---------------M----------------------------',
],
[
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSGGVVVVAAAADDEEGGGG',
13,
'Ascidian Mitochondrial',
'-----------------------------------M----------------------------',
],
[
'FFLLSSSSYYY*CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG',
14,
'Alternative Flatworm Mitochondrial',
'-----------------------------------M----------------------------',
],
[
'FFLLSSSSYY*QCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
15,
'Blepharisma Nuclear',
'-----------------------------------M----------------------------',
],
[
'FFLLSSSSYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
16,
'Chlorophycean Mitochondrial',
'-----------------------------------M----------------------------',
],
[
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNNKSSSSVVVVAAAADDEEGGGG',
20,
'Trematode Mitochondrial',
'-----------------------------------M---------------M------------',
],
[
'FFLLSS*SYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
22,
'Scenedesmus obliquus Mitochondrial',
'-----------------------------------M----------------------------',
],
[
'FF*LSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
23,
'Thraustochytrium Mitochondrial',
],
]
def genetic_code(*id):
"""``skbio.sequence.GeneticCode`` factory given an optional id.
Parameters
----------
id : int or str optional
Indicates the ``skbio.sequence.GeneticCode`` to return. Must be in the
range of [1, 23] inclusive. If `id` is not provided, the Standard
Nuclear genetic code will be returned.
Returns
-------
skbio.sequence.GeneticCode
"""
key = 1
if len(id) == 1:
key = int(id[0])
if len(id) > 1:
raise TypeError('genetic_code takes 0 or 1 arguments (%d given)'
% len(id))
for n in _ncbi_genetic_code_data:
if n[1] == key:
return GeneticCode(*n)
raise ValueError('Genetic code could not be found for %d.' % id)
|
Achuth17/scikit-bio
|
skbio/sequence/_genetic_code.py
|
Python
|
bsd-3-clause
| 19,481
|
[
"scikit-bio"
] |
f2e54d3a9b023ef89877c7f219cf927ac83140eea3a6edf256958292d4604952
|
# sql/visitors.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Visitor/traversal interface and library functions.
SQLAlchemy schema and expression constructs rely on a Python-centric
version of the classic "visitor" pattern as the primary way in which
they apply functionality. The most common use of this pattern
is statement compilation, where individual expression classes match
up to rendering methods that produce a string result. Beyond this,
the visitor system is also used to inspect expressions for various
information and patterns, as well as for usage in
some kinds of expression transformation. Other kinds of transformation
use a non-visitor traversal system.
For many examples of how the visit system is used, see the
sqlalchemy.sql.util and the sqlalchemy.sql.compiler modules.
For an introduction to clause adaption, see
http://techspot.zzzeek.org/2008/01/23/expression-transformations/
"""
from collections import deque
from .. import util
import operator
from .. import exc
__all__ = ['VisitableType', 'Visitable', 'ClauseVisitor',
'CloningVisitor', 'ReplacingCloningVisitor', 'iterate',
'iterate_depthfirst', 'traverse_using', 'traverse',
'traverse_depthfirst',
'cloned_traverse', 'replacement_traverse']
class VisitableType(type):
"""Metaclass which assigns a `_compiler_dispatch` method to classes
having a `__visit_name__` attribute.
The _compiler_dispatch attribute becomes an instance method which
looks approximately like the following::
def _compiler_dispatch (self, visitor, **kw):
'''Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.'''
visit_attr = 'visit_%s' % self.__visit_name__
return getattr(visitor, visit_attr)(self, **kw)
Classes having no __visit_name__ attribute will remain unaffected.
"""
def __init__(cls, clsname, bases, clsdict):
if clsname != 'Visitable' and \
hasattr(cls, '__visit_name__'):
_generate_dispatch(cls)
super(VisitableType, cls).__init__(clsname, bases, clsdict)
def _generate_dispatch(cls):
"""Return an optimized visit dispatch function for the cls
for use by the compiler.
"""
if '__visit_name__' in cls.__dict__:
visit_name = cls.__visit_name__
if isinstance(visit_name, str):
# There is an optimization opportunity here because the
# the string name of the class's __visit_name__ is known at
# this early stage (import time) so it can be pre-constructed.
getter = operator.attrgetter("visit_%s" % visit_name)
def _compiler_dispatch(self, visitor, **kw):
try:
meth = getter(visitor)
except AttributeError:
raise exc.UnsupportedCompilationError(visitor, cls)
else:
return meth(self, **kw)
else:
# The optimization opportunity is lost for this case because the
# __visit_name__ is not yet a string. As a result, the visit
# string has to be recalculated with each compilation.
def _compiler_dispatch(self, visitor, **kw):
visit_attr = 'visit_%s' % self.__visit_name__
try:
meth = getattr(visitor, visit_attr)
except AttributeError:
raise exc.UnsupportedCompilationError(visitor, cls)
else:
return meth(self, **kw)
_compiler_dispatch.__doc__ = \
"""Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.
"""
cls._compiler_dispatch = _compiler_dispatch
class Visitable(util.with_metaclass(VisitableType, object)):
"""Base class for visitable objects, applies the
``VisitableType`` metaclass.
"""
class ClauseVisitor(object):
"""Base class for visitor objects which can traverse using
the traverse() function.
"""
__traverse_options__ = {}
def traverse_single(self, obj, **kw):
for v in self._visitor_iterator:
meth = getattr(v, "visit_%s" % obj.__visit_name__, None)
if meth:
return meth(obj, **kw)
def iterate(self, obj):
"""traverse the given expression structure, returning an iterator
of all elements.
"""
return iterate(obj, self.__traverse_options__)
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return traverse(obj, self.__traverse_options__, self._visitor_dict)
@util.memoized_property
def _visitor_dict(self):
visitors = {}
for name in dir(self):
if name.startswith('visit_'):
visitors[name[6:]] = getattr(self, name)
return visitors
@property
def _visitor_iterator(self):
"""iterate through this visitor and each 'chained' visitor."""
v = self
while v:
yield v
v = getattr(v, '_next', None)
def chain(self, visitor):
"""'chain' an additional ClauseVisitor onto this ClauseVisitor.
the chained visitor will receive all visit events after this one.
"""
tail = list(self._visitor_iterator)[-1]
tail._next = visitor
return self
class CloningVisitor(ClauseVisitor):
"""Base class for visitor objects which can traverse using
the cloned_traverse() function.
"""
def copy_and_process(self, list_):
"""Apply cloned traversal to the given list of elements, and return
the new list.
"""
return [self.traverse(x) for x in list_]
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return cloned_traverse(
obj, self.__traverse_options__, self._visitor_dict)
class ReplacingCloningVisitor(CloningVisitor):
"""Base class for visitor objects which can traverse using
the replacement_traverse() function.
"""
def replace(self, elem):
"""receive pre-copied elements during a cloning traversal.
If the method returns a new element, the element is used
instead of creating a simple copy of the element. Traversal
will halt on the newly returned element if it is re-encountered.
"""
return None
def traverse(self, obj):
"""traverse and visit the given expression structure."""
def replace(elem):
for v in self._visitor_iterator:
e = v.replace(elem)
if e is not None:
return e
return replacement_traverse(obj, self.__traverse_options__, replace)
def iterate(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be breadth-first.
"""
stack = deque([obj])
while stack:
t = stack.popleft()
yield t
for c in t.get_children(**opts):
stack.append(c)
def iterate_depthfirst(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be depth-first.
"""
stack = deque([obj])
traversal = deque()
while stack:
t = stack.pop()
traversal.appendleft(t)
for c in t.get_children(**opts):
stack.append(c)
return iter(traversal)
def traverse_using(iterator, obj, visitors):
"""visit the given expression structure using the given iterator of
objects.
"""
for target in iterator:
meth = visitors.get(target.__visit_name__, None)
if meth:
meth(target)
return obj
def traverse(obj, opts, visitors):
"""traverse and visit the given expression structure using the default
iterator.
"""
return traverse_using(iterate(obj, opts), obj, visitors)
def traverse_depthfirst(obj, opts, visitors):
"""traverse and visit the given expression structure using the
depth-first iterator.
"""
return traverse_using(iterate_depthfirst(obj, opts), obj, visitors)
def cloned_traverse(obj, opts, visitors):
"""clone the given expression structure, allowing
modifications by visitors."""
cloned = {}
stop_on = set(opts.get('stop_on', []))
def clone(elem):
if elem in stop_on:
return elem
else:
if id(elem) not in cloned:
cloned[id(elem)] = newelem = elem._clone()
newelem._copy_internals(clone=clone)
meth = visitors.get(newelem.__visit_name__, None)
if meth:
meth(newelem)
return cloned[id(elem)]
if obj is not None:
obj = clone(obj)
return obj
def replacement_traverse(obj, opts, replace):
"""clone the given expression structure, allowing element
replacement by a given replacement function."""
cloned = {}
stop_on = set([id(x) for x in opts.get('stop_on', [])])
def clone(elem, **kw):
if id(elem) in stop_on or \
'no_replacement_traverse' in elem._annotations:
return elem
else:
newelem = replace(elem)
if newelem is not None:
stop_on.add(id(newelem))
return newelem
else:
if elem not in cloned:
cloned[elem] = newelem = elem._clone()
newelem._copy_internals(clone=clone, **kw)
return cloned[elem]
if obj is not None:
obj = clone(obj, **opts)
return obj
|
odubno/microblog
|
venv/lib/python2.7/site-packages/sqlalchemy/sql/visitors.py
|
Python
|
bsd-3-clause
| 9,943
|
[
"VisIt"
] |
ec2375d309f6ae03eda2a622bd833b9486d2246210034ca2577c2cd4687edbbf
|
"""Tests for user-friendly public interface to polynomial functions. """
from sympy.polys.polytools import (
Poly, PurePoly, poly,
parallel_poly_from_expr,
degree, degree_list,
LC, LM, LT,
pdiv, prem, pquo, pexquo,
div, rem, quo, exquo,
half_gcdex, gcdex, invert,
subresultants,
resultant, discriminant,
terms_gcd, cofactors,
gcd, gcd_list,
lcm, lcm_list,
trunc,
monic, content, primitive,
compose, decompose,
sturm,
gff_list, gff,
sqf_norm, sqf_part, sqf_list, sqf,
factor_list, factor,
intervals, refine_root, count_roots,
real_roots, nroots, ground_roots,
nth_power_roots_poly,
cancel, reduced, groebner,
GroebnerBasis, is_zero_dimensional,
_torational_factor_list,
to_rational_coeffs)
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
OperationNotSupported,
ExactQuotientFailed,
PolificationFailed,
ComputationFailed,
UnificationFailed,
RefinementFailed,
GeneratorsNeeded,
GeneratorsError,
PolynomialError,
CoercionFailed,
NotAlgebraic,
DomainError,
OptionError,
FlagError)
from sympy.polys.polyclasses import DMP
from sympy.polys.fields import field
from sympy.polys.domains import FF, ZZ, QQ, RR, EX
from sympy.polys.orderings import lex, grlex, grevlex
from sympy import (
S, Integer, Rational, Float, Mul, Symbol, symbols, sqrt, Piecewise,
exp, sin, tanh, expand, oo, I, pi, re, im, RootOf, Eq, Tuple, Expr)
from sympy.core.basic import _aresame
from sympy.core.compatibility import iterable
from sympy.core.mul import _keep_coeff
from sympy.utilities.pytest import raises, XFAIL
from sympy.abc import a, b, c, d, e, p, q, r, s, t, u, v, w, x, y, z
def _epsilon_eq(a, b):
for x, y in zip(a, b):
if abs(x - y) > 1e-10:
return False
return True
def _strict_eq(a, b):
if type(a) == type(b):
if iterable(a):
if len(a) == len(b):
return all(_strict_eq(c, d) for c, d in zip(a, b))
else:
return False
else:
return isinstance(a, Poly) and a.eq(b, strict=True)
else:
return False
def test_Poly_from_dict():
K = FF(3)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict(
{0: 1, 1: 5}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict(
{(0,): 1, (1,): 5}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict({(0, 0): 1, (1, 1): 2}, gens=(
x, y), domain=K).rep == DMP([[K(2), K(0)], [K(1)]], K)
assert Poly.from_dict({0: 1, 1: 2}, gens=x).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, field=True).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, domain=ZZ).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, domain=QQ).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, field=True).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, domain=ZZ).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, domain=QQ).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict({(1,): sin(y)}, gens=x, composite=False) == \
Poly(sin(y)*x, x, domain='EX')
assert Poly.from_dict({(1,): y}, gens=x, composite=False) == \
Poly(y*x, x, domain='EX')
assert Poly.from_dict({(1, 1): 1}, gens=(x, y), composite=False) == \
Poly(x*y, x, y, domain='ZZ')
assert Poly.from_dict({(1, 0): y}, gens=(x, z), composite=False) == \
Poly(y*x, x, z, domain='EX')
def test_Poly_from_list():
K = FF(3)
assert Poly.from_list([2, 1], gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_list([5, 1], gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_list([2, 1], gens=x).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_list([2, 1], gens=x, field=True).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_list([2, 1], gens=x, domain=ZZ).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_list([2, 1], gens=x, domain=QQ).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_list([0, 1.0], gens=x).rep == DMP([RR(1.0)], RR)
assert Poly.from_list([1.0, 0], gens=x).rep == DMP([RR(1.0), RR(0.0)], RR)
raises(MultivariatePolynomialError, lambda: Poly.from_list([[]], gens=(x, y)))
def test_Poly_from_poly():
f = Poly(x + 7, x, domain=ZZ)
g = Poly(x + 2, x, modulus=3)
h = Poly(x + y, x, y, domain=ZZ)
K = FF(3)
assert Poly.from_poly(f) == f
assert Poly.from_poly(f, domain=K).rep == DMP([K(1), K(1)], K)
assert Poly.from_poly(f, domain=ZZ).rep == DMP([1, 7], ZZ)
assert Poly.from_poly(f, domain=QQ).rep == DMP([1, 7], QQ)
assert Poly.from_poly(f, gens=x) == f
assert Poly.from_poly(f, gens=x, domain=K).rep == DMP([K(1), K(1)], K)
assert Poly.from_poly(f, gens=x, domain=ZZ).rep == DMP([1, 7], ZZ)
assert Poly.from_poly(f, gens=x, domain=QQ).rep == DMP([1, 7], QQ)
assert Poly.from_poly(f, gens=y) == Poly(x + 7, y, domain='ZZ[x]')
raises(CoercionFailed, lambda: Poly.from_poly(f, gens=y, domain=K))
raises(CoercionFailed, lambda: Poly.from_poly(f, gens=y, domain=ZZ))
raises(CoercionFailed, lambda: Poly.from_poly(f, gens=y, domain=QQ))
assert Poly.from_poly(f, gens=(x, y)) == Poly(x + 7, x, y, domain='ZZ')
assert Poly.from_poly(
f, gens=(x, y), domain=ZZ) == Poly(x + 7, x, y, domain='ZZ')
assert Poly.from_poly(
f, gens=(x, y), domain=QQ) == Poly(x + 7, x, y, domain='QQ')
assert Poly.from_poly(
f, gens=(x, y), modulus=3) == Poly(x + 7, x, y, domain='FF(3)')
K = FF(2)
assert Poly.from_poly(g) == g
assert Poly.from_poly(g, domain=ZZ).rep == DMP([1, -1], ZZ)
raises(CoercionFailed, lambda: Poly.from_poly(g, domain=QQ))
assert Poly.from_poly(g, domain=K).rep == DMP([K(1), K(0)], K)
assert Poly.from_poly(g, gens=x) == g
assert Poly.from_poly(g, gens=x, domain=ZZ).rep == DMP([1, -1], ZZ)
raises(CoercionFailed, lambda: Poly.from_poly(g, gens=x, domain=QQ))
assert Poly.from_poly(g, gens=x, domain=K).rep == DMP([K(1), K(0)], K)
K = FF(3)
assert Poly.from_poly(h) == h
assert Poly.from_poly(
h, domain=ZZ).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, domain=QQ).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(h, domain=K).rep == DMP([[K(1)], [K(1), K(0)]], K)
assert Poly.from_poly(h, gens=x) == Poly(x + y, x, domain=ZZ[y])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=x, domain=ZZ))
assert Poly.from_poly(
h, gens=x, domain=ZZ[y]) == Poly(x + y, x, domain=ZZ[y])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=x, domain=QQ))
assert Poly.from_poly(
h, gens=x, domain=QQ[y]) == Poly(x + y, x, domain=QQ[y])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=x, modulus=3))
assert Poly.from_poly(h, gens=y) == Poly(x + y, y, domain=ZZ[x])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=y, domain=ZZ))
assert Poly.from_poly(
h, gens=y, domain=ZZ[x]) == Poly(x + y, y, domain=ZZ[x])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=y, domain=QQ))
assert Poly.from_poly(
h, gens=y, domain=QQ[x]) == Poly(x + y, y, domain=QQ[x])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=y, modulus=3))
assert Poly.from_poly(h, gens=(x, y)) == h
assert Poly.from_poly(
h, gens=(x, y), domain=ZZ).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, gens=(x, y), domain=QQ).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(
h, gens=(x, y), domain=K).rep == DMP([[K(1)], [K(1), K(0)]], K)
assert Poly.from_poly(
h, gens=(y, x)).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, gens=(y, x), domain=ZZ).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, gens=(y, x), domain=QQ).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(
h, gens=(y, x), domain=K).rep == DMP([[K(1)], [K(1), K(0)]], K)
assert Poly.from_poly(
h, gens=(x, y), field=True).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(
h, gens=(x, y), field=True).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
def test_Poly_from_expr():
raises(GeneratorsNeeded, lambda: Poly.from_expr(S(0)))
raises(GeneratorsNeeded, lambda: Poly.from_expr(S(7)))
F3 = FF(3)
assert Poly.from_expr(x + 5, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(y + 5, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(x + 5, x, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(y + 5, y, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(x + y, domain=F3).rep == DMP([[F3(1)], [F3(1), F3(0)]], F3)
assert Poly.from_expr(x + y, x, y, domain=F3).rep == DMP([[F3(1)], [F3(1), F3(0)]], F3)
assert Poly.from_expr(x + 5).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, x).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5, y).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, x, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5, y, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, x, y, domain=ZZ).rep == DMP([[1], [5]], ZZ)
assert Poly.from_expr(y + 5, x, y, domain=ZZ).rep == DMP([[1, 5]], ZZ)
def test_Poly__new__():
raises(GeneratorsError, lambda: Poly(x + 1, x, x))
raises(GeneratorsError, lambda: Poly(x + y, x, y, domain=ZZ[x]))
raises(GeneratorsError, lambda: Poly(x + y, x, y, domain=ZZ[y]))
raises(OptionError, lambda: Poly(x, x, symmetric=True))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, domain=QQ))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, gaussian=True))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, gaussian=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, extension=[sqrt(3)]))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, extension=[sqrt(3)]))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, extension=True))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, extension=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, greedy=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=QQ, field=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, greedy=False))
raises(OptionError, lambda: Poly(x + 2, x, domain=QQ, field=False))
raises(NotImplementedError, lambda: Poly(x + 1, x, modulus=3, order='grlex'))
raises(NotImplementedError, lambda: Poly(x + 1, x, order='grlex'))
raises(GeneratorsNeeded, lambda: Poly({1: 2, 0: 1}))
raises(GeneratorsNeeded, lambda: Poly([2, 1]))
raises(GeneratorsNeeded, lambda: Poly((2, 1)))
raises(GeneratorsNeeded, lambda: Poly(1))
f = a*x**2 + b*x + c
assert Poly({2: a, 1: b, 0: c}, x) == f
assert Poly(iter([a, b, c]), x) == f
assert Poly([a, b, c], x) == f
assert Poly((a, b, c), x) == f
f = Poly({}, x, y, z)
assert f.gens == (x, y, z) and f.as_expr() == 0
assert Poly(Poly(a*x + b*y, x, y), x) == Poly(a*x + b*y, x)
assert Poly(3*x**2 + 2*x + 1, domain='ZZ').all_coeffs() == [3, 2, 1]
assert Poly(3*x**2 + 2*x + 1, domain='QQ').all_coeffs() == [3, 2, 1]
assert Poly(3*x**2 + 2*x + 1, domain='RR').all_coeffs() == [3.0, 2.0, 1.0]
raises(CoercionFailed, lambda: Poly(3*x**2/5 + 2*x/5 + 1, domain='ZZ'))
assert Poly(
3*x**2/5 + 2*x/5 + 1, domain='QQ').all_coeffs() == [S(3)/5, S(2)/5, 1]
assert _epsilon_eq(
Poly(3*x**2/5 + 2*x/5 + 1, domain='RR').all_coeffs(), [0.6, 0.4, 1.0])
assert Poly(3.0*x**2 + 2.0*x + 1, domain='ZZ').all_coeffs() == [3, 2, 1]
assert Poly(3.0*x**2 + 2.0*x + 1, domain='QQ').all_coeffs() == [3, 2, 1]
assert Poly(
3.0*x**2 + 2.0*x + 1, domain='RR').all_coeffs() == [3.0, 2.0, 1.0]
raises(CoercionFailed, lambda: Poly(3.1*x**2 + 2.1*x + 1, domain='ZZ'))
assert Poly(3.1*x**2 + 2.1*x + 1, domain='QQ').all_coeffs() == [S(31)/10, S(21)/10, 1]
assert Poly(3.1*x**2 + 2.1*x + 1, domain='RR').all_coeffs() == [3.1, 2.1, 1.0]
assert Poly({(2, 1): 1, (1, 2): 2, (1, 1): 3}, x, y) == \
Poly(x**2*y + 2*x*y**2 + 3*x*y, x, y)
assert Poly(x**2 + 1, extension=I).get_domain() == QQ.algebraic_field(I)
f = 3*x**5 - x**4 + x**3 - x** 2 + 65538
assert Poly(f, x, modulus=65537, symmetric=True) == \
Poly(3*x**5 - x**4 + x**3 - x** 2 + 1, x, modulus=65537,
symmetric=True)
assert Poly(f, x, modulus=65537, symmetric=False) == \
Poly(3*x**5 + 65536*x**4 + x**3 + 65536*x** 2 + 1, x,
modulus=65537, symmetric=False)
assert Poly(x**2 + x + 1.0).get_domain() == RR
def test_Poly__args():
assert Poly(x**2 + 1).args == (x**2 + 1,)
def test_Poly__gens():
assert Poly((x - p)*(x - q), x).gens == (x,)
assert Poly((x - p)*(x - q), p).gens == (p,)
assert Poly((x - p)*(x - q), q).gens == (q,)
assert Poly((x - p)*(x - q), x, p).gens == (x, p)
assert Poly((x - p)*(x - q), x, q).gens == (x, q)
assert Poly((x - p)*(x - q), x, p, q).gens == (x, p, q)
assert Poly((x - p)*(x - q), p, x, q).gens == (p, x, q)
assert Poly((x - p)*(x - q), p, q, x).gens == (p, q, x)
assert Poly((x - p)*(x - q)).gens == (x, p, q)
assert Poly((x - p)*(x - q), sort='x > p > q').gens == (x, p, q)
assert Poly((x - p)*(x - q), sort='p > x > q').gens == (p, x, q)
assert Poly((x - p)*(x - q), sort='p > q > x').gens == (p, q, x)
assert Poly((x - p)*(x - q), x, p, q, sort='p > q > x').gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt='x').gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt='p').gens == (p, x, q)
assert Poly((x - p)*(x - q), wrt='q').gens == (q, x, p)
assert Poly((x - p)*(x - q), wrt=x).gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt=p).gens == (p, x, q)
assert Poly((x - p)*(x - q), wrt=q).gens == (q, x, p)
assert Poly((x - p)*(x - q), x, p, q, wrt='p').gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt='p', sort='q > x').gens == (p, q, x)
assert Poly((x - p)*(x - q), wrt='q', sort='p > x').gens == (q, p, x)
def test_Poly_zero():
assert Poly(x).zero == Poly(0, x, domain=ZZ)
assert Poly(x/2).zero == Poly(0, x, domain=QQ)
def test_Poly_one():
assert Poly(x).one == Poly(1, x, domain=ZZ)
assert Poly(x/2).one == Poly(1, x, domain=QQ)
def test_Poly__unify():
raises(UnificationFailed, lambda: Poly(x)._unify(y))
F3 = FF(3)
F5 = FF(5)
assert Poly(x, x, modulus=3)._unify(Poly(y, y, modulus=3))[2:] == (
DMP([[F3(1)], []], F3), DMP([[F3(1), F3(0)]], F3))
assert Poly(x, x, modulus=3)._unify(Poly(y, y, modulus=5))[2:] == (
DMP([[F5(1)], []], F5), DMP([[F5(1), F5(0)]], F5))
assert Poly(y, x, y)._unify(Poly(x, x, modulus=3))[2:] == (DMP([[F3(1), F3(0)]], F3), DMP([[F3(1)], []], F3))
assert Poly(x, x, modulus=3)._unify(Poly(y, x, y))[2:] == (DMP([[F3(1)], []], F3), DMP([[F3(1), F3(0)]], F3))
assert Poly(x + 1, x)._unify(Poly(x + 2, x))[2:] == (DMP([1, 1], ZZ), DMP([1, 2], ZZ))
assert Poly(x + 1, x, domain='QQ')._unify(Poly(x + 2, x))[2:] == (DMP([1, 1], QQ), DMP([1, 2], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, x, domain='QQ'))[2:] == (DMP([1, 1], QQ), DMP([1, 2], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, domain='QQ')._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, x, y, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, y, domain='QQ')._unify(Poly(x + 2, x))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, y, domain='QQ')._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x, y, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, y, x))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x + 1, x, domain='QQ')._unify(Poly(x + 2, y, x))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, y, x, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x + 1, y, x, domain='QQ')._unify(Poly(x + 2, x))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, y, x))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, y, domain='QQ')._unify(Poly(x + 2, y, x))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, y, x, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x, y))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x + 1, y, x, domain='QQ')._unify(Poly(x + 2, x, y))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x, y, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
F, A, B = field("a,b", ZZ)
assert Poly(a*x, x, domain='ZZ[a]')._unify(Poly(a*b*x, x, domain='ZZ(a,b)'))[2:] == \
(DMP([A, F(0)], F.to_domain()), DMP([A*B, F(0)], F.to_domain()))
assert Poly(a*x, x, domain='ZZ(a)')._unify(Poly(a*b*x, x, domain='ZZ(a,b)'))[2:] == \
(DMP([A, F(0)], F.to_domain()), DMP([A*B, F(0)], F.to_domain()))
raises(CoercionFailed, lambda: Poly(Poly(x**2 + x**2*z, y, field=True), domain='ZZ(x)'))
f = Poly(t**2 + t/3 + x, t, domain='QQ(x)')
g = Poly(t**2 + t/3 + x, t, domain='QQ[x]')
assert f._unify(g)[2:] == (f.rep, f.rep)
def test_Poly_free_symbols():
assert Poly(x**2 + 1).free_symbols == set([x])
assert Poly(x**2 + y*z).free_symbols == set([x, y, z])
assert Poly(x**2 + y*z, x).free_symbols == set([x, y, z])
assert Poly(x**2 + sin(y*z)).free_symbols == set([x, y, z])
assert Poly(x**2 + sin(y*z), x).free_symbols == set([x, y, z])
assert Poly(x**2 + sin(y*z), x, domain=EX).free_symbols == set([x, y, z])
def test_PurePoly_free_symbols():
assert PurePoly(x**2 + 1).free_symbols == set([])
assert PurePoly(x**2 + y*z).free_symbols == set([])
assert PurePoly(x**2 + y*z, x).free_symbols == set([y, z])
assert PurePoly(x**2 + sin(y*z)).free_symbols == set([])
assert PurePoly(x**2 + sin(y*z), x).free_symbols == set([y, z])
assert PurePoly(x**2 + sin(y*z), x, domain=EX).free_symbols == set([y, z])
def test_Poly__eq__():
assert (Poly(x, x) == Poly(x, x)) is True
assert (Poly(x, x, domain=QQ) == Poly(x, x)) is True
assert (Poly(x, x) == Poly(x, x, domain=QQ)) is True
assert (Poly(x, x, domain=ZZ[a]) == Poly(x, x)) is True
assert (Poly(x, x) == Poly(x, x, domain=ZZ[a])) is True
assert (Poly(x*y, x, y) == Poly(x, x)) is False
assert (Poly(x, x, y) == Poly(x, x)) is False
assert (Poly(x, x) == Poly(x, x, y)) is False
assert (Poly(x**2 + 1, x) == Poly(y**2 + 1, y)) is False
assert (Poly(y**2 + 1, y) == Poly(x**2 + 1, x)) is False
f = Poly(x, x, domain=ZZ)
g = Poly(x, x, domain=QQ)
assert f.eq(g) is True
assert f.ne(g) is False
assert f.eq(g, strict=True) is False
assert f.ne(g, strict=True) is True
t0 = Symbol('t0')
f = Poly((t0/2 + x**2)*t**2 - x**2*t, t, domain='QQ[x,t0]')
g = Poly((t0/2 + x**2)*t**2 - x**2*t, t, domain='ZZ(x,t0)')
assert (f == g) is True
def test_PurePoly__eq__():
assert (PurePoly(x, x) == PurePoly(x, x)) is True
assert (PurePoly(x, x, domain=QQ) == PurePoly(x, x)) is True
assert (PurePoly(x, x) == PurePoly(x, x, domain=QQ)) is True
assert (PurePoly(x, x, domain=ZZ[a]) == PurePoly(x, x)) is True
assert (PurePoly(x, x) == PurePoly(x, x, domain=ZZ[a])) is True
assert (PurePoly(x*y, x, y) == PurePoly(x, x)) is False
assert (PurePoly(x, x, y) == PurePoly(x, x)) is False
assert (PurePoly(x, x) == PurePoly(x, x, y)) is False
assert (PurePoly(x**2 + 1, x) == PurePoly(y**2 + 1, y)) is True
assert (PurePoly(y**2 + 1, y) == PurePoly(x**2 + 1, x)) is True
f = PurePoly(x, x, domain=ZZ)
g = PurePoly(x, x, domain=QQ)
assert f.eq(g) is True
assert f.ne(g) is False
assert f.eq(g, strict=True) is False
assert f.ne(g, strict=True) is True
f = PurePoly(x, x, domain=ZZ)
g = PurePoly(y, y, domain=QQ)
assert f.eq(g) is True
assert f.ne(g) is False
assert f.eq(g, strict=True) is False
assert f.ne(g, strict=True) is True
def test_PurePoly_Poly():
assert isinstance(PurePoly(Poly(x**2 + 1)), PurePoly) is True
assert isinstance(Poly(PurePoly(x**2 + 1)), Poly) is True
def test_Poly_get_domain():
assert Poly(2*x).get_domain() == ZZ
assert Poly(2*x, domain='ZZ').get_domain() == ZZ
assert Poly(2*x, domain='QQ').get_domain() == QQ
assert Poly(x/2).get_domain() == QQ
raises(CoercionFailed, lambda: Poly(x/2, domain='ZZ'))
assert Poly(x/2, domain='QQ').get_domain() == QQ
assert Poly(0.2*x).get_domain() == RR
def test_Poly_set_domain():
assert Poly(2*x + 1).set_domain(ZZ) == Poly(2*x + 1)
assert Poly(2*x + 1).set_domain('ZZ') == Poly(2*x + 1)
assert Poly(2*x + 1).set_domain(QQ) == Poly(2*x + 1, domain='QQ')
assert Poly(2*x + 1).set_domain('QQ') == Poly(2*x + 1, domain='QQ')
assert Poly(S(2)/10*x + S(1)/10).set_domain('RR') == Poly(0.2*x + 0.1)
assert Poly(0.2*x + 0.1).set_domain('QQ') == Poly(S(2)/10*x + S(1)/10)
raises(CoercionFailed, lambda: Poly(x/2 + 1).set_domain(ZZ))
raises(CoercionFailed, lambda: Poly(x + 1, modulus=2).set_domain(QQ))
raises(GeneratorsError, lambda: Poly(x*y, x, y).set_domain(ZZ[y]))
def test_Poly_get_modulus():
assert Poly(x**2 + 1, modulus=2).get_modulus() == 2
raises(PolynomialError, lambda: Poly(x**2 + 1).get_modulus())
def test_Poly_set_modulus():
assert Poly(
x**2 + 1, modulus=2).set_modulus(7) == Poly(x**2 + 1, modulus=7)
assert Poly(
x**2 + 5, modulus=7).set_modulus(2) == Poly(x**2 + 1, modulus=2)
assert Poly(x**2 + 1).set_modulus(2) == Poly(x**2 + 1, modulus=2)
raises(CoercionFailed, lambda: Poly(x/2 + 1).set_modulus(2))
def test_Poly_add_ground():
assert Poly(x + 1).add_ground(2) == Poly(x + 3)
def test_Poly_sub_ground():
assert Poly(x + 1).sub_ground(2) == Poly(x - 1)
def test_Poly_mul_ground():
assert Poly(x + 1).mul_ground(2) == Poly(2*x + 2)
def test_Poly_quo_ground():
assert Poly(2*x + 4).quo_ground(2) == Poly(x + 2)
assert Poly(2*x + 3).quo_ground(2) == Poly(x + 1)
def test_Poly_exquo_ground():
assert Poly(2*x + 4).exquo_ground(2) == Poly(x + 2)
raises(ExactQuotientFailed, lambda: Poly(2*x + 3).exquo_ground(2))
def test_Poly_abs():
assert Poly(-x + 1, x).abs() == abs(Poly(-x + 1, x)) == Poly(x + 1, x)
def test_Poly_neg():
assert Poly(-x + 1, x).neg() == -Poly(-x + 1, x) == Poly(x - 1, x)
def test_Poly_add():
assert Poly(0, x).add(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) + Poly(0, x) == Poly(0, x)
assert Poly(1, x).add(Poly(0, x)) == Poly(1, x)
assert Poly(1, x, y) + Poly(0, x) == Poly(1, x, y)
assert Poly(0, x).add(Poly(1, x, y)) == Poly(1, x, y)
assert Poly(0, x, y) + Poly(1, x, y) == Poly(1, x, y)
assert Poly(1, x) + x == Poly(x + 1, x)
assert Poly(1, x) + sin(x) == 1 + sin(x)
assert Poly(x, x) + 1 == Poly(x + 1, x)
assert 1 + Poly(x, x) == Poly(x + 1, x)
def test_Poly_sub():
assert Poly(0, x).sub(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) - Poly(0, x) == Poly(0, x)
assert Poly(1, x).sub(Poly(0, x)) == Poly(1, x)
assert Poly(1, x, y) - Poly(0, x) == Poly(1, x, y)
assert Poly(0, x).sub(Poly(1, x, y)) == Poly(-1, x, y)
assert Poly(0, x, y) - Poly(1, x, y) == Poly(-1, x, y)
assert Poly(1, x) - x == Poly(1 - x, x)
assert Poly(1, x) - sin(x) == 1 - sin(x)
assert Poly(x, x) - 1 == Poly(x - 1, x)
assert 1 - Poly(x, x) == Poly(1 - x, x)
def test_Poly_mul():
assert Poly(0, x).mul(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) * Poly(0, x) == Poly(0, x)
assert Poly(2, x).mul(Poly(4, x)) == Poly(8, x)
assert Poly(2, x, y) * Poly(4, x) == Poly(8, x, y)
assert Poly(4, x).mul(Poly(2, x, y)) == Poly(8, x, y)
assert Poly(4, x, y) * Poly(2, x, y) == Poly(8, x, y)
assert Poly(1, x) * x == Poly(x, x)
assert Poly(1, x) * sin(x) == sin(x)
assert Poly(x, x) * 2 == Poly(2*x, x)
assert 2 * Poly(x, x) == Poly(2*x, x)
def test_Poly_sqr():
assert Poly(x*y, x, y).sqr() == Poly(x**2*y**2, x, y)
def test_Poly_pow():
assert Poly(x, x).pow(10) == Poly(x**10, x)
assert Poly(x, x).pow(Integer(10)) == Poly(x**10, x)
assert Poly(2*y, x, y).pow(4) == Poly(16*y**4, x, y)
assert Poly(2*y, x, y).pow(Integer(4)) == Poly(16*y**4, x, y)
assert Poly(7*x*y, x, y)**3 == Poly(343*x**3*y**3, x, y)
assert Poly(x*y + 1, x, y)**(-1) == (x*y + 1)**(-1)
assert Poly(x*y + 1, x, y)**x == (x*y + 1)**x
def test_Poly_divmod():
f, g = Poly(x**2), Poly(x)
q, r = g, Poly(0, x)
assert divmod(f, g) == (q, r)
assert f // g == q
assert f % g == r
assert divmod(f, x) == (q, r)
assert f // x == q
assert f % x == r
q, r = Poly(0, x), Poly(2, x)
assert divmod(2, g) == (q, r)
assert 2 // g == q
assert 2 % g == r
assert Poly(x)/Poly(x) == 1
assert Poly(x**2)/Poly(x) == x
assert Poly(x)/Poly(x**2) == 1/x
def test_Poly_eq_ne():
assert (Poly(x + y, x, y) == Poly(x + y, x, y)) is True
assert (Poly(x + y, x) == Poly(x + y, x, y)) is False
assert (Poly(x + y, x, y) == Poly(x + y, x)) is False
assert (Poly(x + y, x) == Poly(x + y, x)) is True
assert (Poly(x + y, y) == Poly(x + y, y)) is True
assert (Poly(x + y, x, y) == x + y) is True
assert (Poly(x + y, x) == x + y) is True
assert (Poly(x + y, x, y) == x + y) is True
assert (Poly(x + y, x) == x + y) is True
assert (Poly(x + y, y) == x + y) is True
assert (Poly(x + y, x, y) != Poly(x + y, x, y)) is False
assert (Poly(x + y, x) != Poly(x + y, x, y)) is True
assert (Poly(x + y, x, y) != Poly(x + y, x)) is True
assert (Poly(x + y, x) != Poly(x + y, x)) is False
assert (Poly(x + y, y) != Poly(x + y, y)) is False
assert (Poly(x + y, x, y) != x + y) is False
assert (Poly(x + y, x) != x + y) is False
assert (Poly(x + y, x, y) != x + y) is False
assert (Poly(x + y, x) != x + y) is False
assert (Poly(x + y, y) != x + y) is False
assert (Poly(x, x) == sin(x)) is False
assert (Poly(x, x) != sin(x)) is True
def test_Poly_nonzero():
assert not bool(Poly(0, x)) is True
assert not bool(Poly(1, x)) is False
def test_Poly_properties():
assert Poly(0, x).is_zero is True
assert Poly(1, x).is_zero is False
assert Poly(1, x).is_one is True
assert Poly(2, x).is_one is False
assert Poly(x - 1, x).is_sqf is True
assert Poly((x - 1)**2, x).is_sqf is False
assert Poly(x - 1, x).is_monic is True
assert Poly(2*x - 1, x).is_monic is False
assert Poly(3*x + 2, x).is_primitive is True
assert Poly(4*x + 2, x).is_primitive is False
assert Poly(1, x).is_ground is True
assert Poly(x, x).is_ground is False
assert Poly(x + y + z + 1).is_linear is True
assert Poly(x*y*z + 1).is_linear is False
assert Poly(x*y + z + 1).is_quadratic is True
assert Poly(x*y*z + 1).is_quadratic is False
assert Poly(x*y).is_monomial is True
assert Poly(x*y + 1).is_monomial is False
assert Poly(x**2 + x*y).is_homogeneous is True
assert Poly(x**3 + x*y).is_homogeneous is False
assert Poly(x).is_univariate is True
assert Poly(x*y).is_univariate is False
assert Poly(x*y).is_multivariate is True
assert Poly(x).is_multivariate is False
assert Poly(
x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1).is_cyclotomic is False
assert Poly(
x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1).is_cyclotomic is True
def test_Poly_is_irreducible():
assert Poly(x**2 + x + 1).is_irreducible is True
assert Poly(x**2 + 2*x + 1).is_irreducible is False
assert Poly(7*x + 3, modulus=11).is_irreducible is True
assert Poly(7*x**2 + 3*x + 1, modulus=11).is_irreducible is False
def test_Poly_subs():
assert Poly(x + 1).subs(x, 0) == 1
assert Poly(x + 1).subs(x, x) == Poly(x + 1)
assert Poly(x + 1).subs(x, y) == Poly(y + 1)
assert Poly(x*y, x).subs(y, x) == x**2
assert Poly(x*y, x).subs(x, y) == y**2
def test_Poly_replace():
assert Poly(x + 1).replace(x) == Poly(x + 1)
assert Poly(x + 1).replace(y) == Poly(y + 1)
raises(PolynomialError, lambda: Poly(x + y).replace(z))
assert Poly(x + 1).replace(x, x) == Poly(x + 1)
assert Poly(x + 1).replace(x, y) == Poly(y + 1)
assert Poly(x + y).replace(x, x) == Poly(x + y)
assert Poly(x + y).replace(x, z) == Poly(z + y, z, y)
assert Poly(x + y).replace(y, y) == Poly(x + y)
assert Poly(x + y).replace(y, z) == Poly(x + z, x, z)
raises(PolynomialError, lambda: Poly(x + y).replace(x, y))
raises(PolynomialError, lambda: Poly(x + y).replace(z, t))
assert Poly(x + y, x).replace(x, z) == Poly(z + y, z)
assert Poly(x + y, y).replace(y, z) == Poly(x + z, z)
raises(PolynomialError, lambda: Poly(x + y, x).replace(x, y))
raises(PolynomialError, lambda: Poly(x + y, y).replace(y, x))
def test_Poly_reorder():
raises(PolynomialError, lambda: Poly(x + y).reorder(x, z))
assert Poly(x + y, x, y).reorder(x, y) == Poly(x + y, x, y)
assert Poly(x + y, x, y).reorder(y, x) == Poly(x + y, y, x)
assert Poly(x + y, y, x).reorder(x, y) == Poly(x + y, x, y)
assert Poly(x + y, y, x).reorder(y, x) == Poly(x + y, y, x)
assert Poly(x + y, x, y).reorder(wrt=x) == Poly(x + y, x, y)
assert Poly(x + y, x, y).reorder(wrt=y) == Poly(x + y, y, x)
def test_Poly_ltrim():
f = Poly(y**2 + y*z**2, x, y, z).ltrim(y)
assert f.as_expr() == y**2 + y*z**2 and f.gens == (y, z)
raises(PolynomialError, lambda: Poly(x*y**2 + y**2, x, y).ltrim(y))
def test_Poly_has_only_gens():
assert Poly(x*y + 1, x, y, z).has_only_gens(x, y) is True
assert Poly(x*y + z, x, y, z).has_only_gens(x, y) is False
raises(GeneratorsError, lambda: Poly(x*y**2 + y**2, x, y).has_only_gens(t))
def test_Poly_to_ring():
assert Poly(2*x + 1, domain='ZZ').to_ring() == Poly(2*x + 1, domain='ZZ')
assert Poly(2*x + 1, domain='QQ').to_ring() == Poly(2*x + 1, domain='ZZ')
raises(CoercionFailed, lambda: Poly(x/2 + 1).to_ring())
raises(DomainError, lambda: Poly(2*x + 1, modulus=3).to_ring())
def test_Poly_to_field():
assert Poly(2*x + 1, domain='ZZ').to_field() == Poly(2*x + 1, domain='QQ')
assert Poly(2*x + 1, domain='QQ').to_field() == Poly(2*x + 1, domain='QQ')
assert Poly(x/2 + 1, domain='QQ').to_field() == Poly(x/2 + 1, domain='QQ')
assert Poly(2*x + 1, modulus=3).to_field() == Poly(2*x + 1, modulus=3)
assert Poly(2.0*x + 1.0).to_field() == Poly(2.0*x + 1.0)
def test_Poly_to_exact():
assert Poly(2*x).to_exact() == Poly(2*x)
assert Poly(x/2).to_exact() == Poly(x/2)
assert Poly(0.1*x).to_exact() == Poly(x/10)
def test_Poly_retract():
f = Poly(x**2 + 1, x, domain=QQ[y])
assert f.retract() == Poly(x**2 + 1, x, domain='ZZ')
assert f.retract(field=True) == Poly(x**2 + 1, x, domain='QQ')
assert Poly(0, x, y).retract() == Poly(0, x, y)
def test_Poly_slice():
f = Poly(x**3 + 2*x**2 + 3*x + 4)
assert f.slice(0, 0) == Poly(0, x)
assert f.slice(0, 1) == Poly(4, x)
assert f.slice(0, 2) == Poly(3*x + 4, x)
assert f.slice(0, 3) == Poly(2*x**2 + 3*x + 4, x)
assert f.slice(0, 4) == Poly(x**3 + 2*x**2 + 3*x + 4, x)
assert f.slice(x, 0, 0) == Poly(0, x)
assert f.slice(x, 0, 1) == Poly(4, x)
assert f.slice(x, 0, 2) == Poly(3*x + 4, x)
assert f.slice(x, 0, 3) == Poly(2*x**2 + 3*x + 4, x)
assert f.slice(x, 0, 4) == Poly(x**3 + 2*x**2 + 3*x + 4, x)
def test_Poly_coeffs():
assert Poly(0, x).coeffs() == [0]
assert Poly(1, x).coeffs() == [1]
assert Poly(2*x + 1, x).coeffs() == [2, 1]
assert Poly(7*x**2 + 2*x + 1, x).coeffs() == [7, 2, 1]
assert Poly(7*x**4 + 2*x + 1, x).coeffs() == [7, 2, 1]
assert Poly(x*y**7 + 2*x**2*y**3).coeffs('lex') == [2, 1]
assert Poly(x*y**7 + 2*x**2*y**3).coeffs('grlex') == [1, 2]
def test_Poly_monoms():
assert Poly(0, x).monoms() == [(0,)]
assert Poly(1, x).monoms() == [(0,)]
assert Poly(2*x + 1, x).monoms() == [(1,), (0,)]
assert Poly(7*x**2 + 2*x + 1, x).monoms() == [(2,), (1,), (0,)]
assert Poly(7*x**4 + 2*x + 1, x).monoms() == [(4,), (1,), (0,)]
assert Poly(x*y**7 + 2*x**2*y**3).monoms('lex') == [(2, 3), (1, 7)]
assert Poly(x*y**7 + 2*x**2*y**3).monoms('grlex') == [(1, 7), (2, 3)]
def test_Poly_terms():
assert Poly(0, x).terms() == [((0,), 0)]
assert Poly(1, x).terms() == [((0,), 1)]
assert Poly(2*x + 1, x).terms() == [((1,), 2), ((0,), 1)]
assert Poly(7*x**2 + 2*x + 1, x).terms() == [((2,), 7), ((1,), 2), ((0,), 1)]
assert Poly(7*x**4 + 2*x + 1, x).terms() == [((4,), 7), ((1,), 2), ((0,), 1)]
assert Poly(
x*y**7 + 2*x**2*y**3).terms('lex') == [((2, 3), 2), ((1, 7), 1)]
assert Poly(
x*y**7 + 2*x**2*y**3).terms('grlex') == [((1, 7), 1), ((2, 3), 2)]
def test_Poly_all_coeffs():
assert Poly(0, x).all_coeffs() == [0]
assert Poly(1, x).all_coeffs() == [1]
assert Poly(2*x + 1, x).all_coeffs() == [2, 1]
assert Poly(7*x**2 + 2*x + 1, x).all_coeffs() == [7, 2, 1]
assert Poly(7*x**4 + 2*x + 1, x).all_coeffs() == [7, 0, 0, 2, 1]
def test_Poly_all_monoms():
assert Poly(0, x).all_monoms() == [(0,)]
assert Poly(1, x).all_monoms() == [(0,)]
assert Poly(2*x + 1, x).all_monoms() == [(1,), (0,)]
assert Poly(7*x**2 + 2*x + 1, x).all_monoms() == [(2,), (1,), (0,)]
assert Poly(7*x**4 + 2*x + 1, x).all_monoms() == [(4,), (3,), (2,), (1,), (0,)]
def test_Poly_all_terms():
assert Poly(0, x).all_terms() == [((0,), 0)]
assert Poly(1, x).all_terms() == [((0,), 1)]
assert Poly(2*x + 1, x).all_terms() == [((1,), 2), ((0,), 1)]
assert Poly(7*x**2 + 2*x + 1, x).all_terms() == \
[((2,), 7), ((1,), 2), ((0,), 1)]
assert Poly(7*x**4 + 2*x + 1, x).all_terms() == \
[((4,), 7), ((3,), 0), ((2,), 0), ((1,), 2), ((0,), 1)]
def test_Poly_termwise():
f = Poly(x**2 + 20*x + 400)
g = Poly(x**2 + 2*x + 4)
def func(monom, coeff):
(k,) = monom
return coeff//10**(2 - k)
assert f.termwise(func) == g
def func(monom, coeff):
(k,) = monom
return (k,), coeff//10**(2 - k)
assert f.termwise(func) == g
def test_Poly_length():
assert Poly(0, x).length() == 0
assert Poly(1, x).length() == 1
assert Poly(x, x).length() == 1
assert Poly(x + 1, x).length() == 2
assert Poly(x**2 + 1, x).length() == 2
assert Poly(x**2 + x + 1, x).length() == 3
def test_Poly_as_dict():
assert Poly(0, x).as_dict() == {}
assert Poly(0, x, y, z).as_dict() == {}
assert Poly(1, x).as_dict() == {(0,): 1}
assert Poly(1, x, y, z).as_dict() == {(0, 0, 0): 1}
assert Poly(x**2 + 3, x).as_dict() == {(2,): 1, (0,): 3}
assert Poly(x**2 + 3, x, y, z).as_dict() == {(2, 0, 0): 1, (0, 0, 0): 3}
assert Poly(3*x**2*y*z**3 + 4*x*y + 5*x*z).as_dict() == {(2, 1, 3): 3,
(1, 1, 0): 4, (1, 0, 1): 5}
def test_Poly_as_expr():
assert Poly(0, x).as_expr() == 0
assert Poly(0, x, y, z).as_expr() == 0
assert Poly(1, x).as_expr() == 1
assert Poly(1, x, y, z).as_expr() == 1
assert Poly(x**2 + 3, x).as_expr() == x**2 + 3
assert Poly(x**2 + 3, x, y, z).as_expr() == x**2 + 3
assert Poly(
3*x**2*y*z**3 + 4*x*y + 5*x*z).as_expr() == 3*x**2*y*z**3 + 4*x*y + 5*x*z
f = Poly(x**2 + 2*x*y**2 - y, x, y)
assert f.as_expr() == -y + x**2 + 2*x*y**2
assert f.as_expr({x: 5}) == 25 - y + 10*y**2
assert f.as_expr({y: 6}) == -6 + 72*x + x**2
assert f.as_expr({x: 5, y: 6}) == 379
assert f.as_expr(5, 6) == 379
raises(GeneratorsError, lambda: f.as_expr({z: 7}))
def test_Poly_lift():
assert Poly(x**4 - I*x + 17*I, x, gaussian=True).lift() == \
Poly(x**16 + 2*x**10 + 578*x**8 + x**4 - 578*x**2 + 83521,
x, domain='QQ')
def test_Poly_deflate():
assert Poly(0, x).deflate() == ((1,), Poly(0, x))
assert Poly(1, x).deflate() == ((1,), Poly(1, x))
assert Poly(x, x).deflate() == ((1,), Poly(x, x))
assert Poly(x**2, x).deflate() == ((2,), Poly(x, x))
assert Poly(x**17, x).deflate() == ((17,), Poly(x, x))
assert Poly(
x**2*y*z**11 + x**4*z**11).deflate() == ((2, 1, 11), Poly(x*y*z + x**2*z))
def test_Poly_inject():
f = Poly(x**2*y + x*y**3 + x*y + 1, x)
assert f.inject() == Poly(x**2*y + x*y**3 + x*y + 1, x, y)
assert f.inject(front=True) == Poly(y**3*x + y*x**2 + y*x + 1, y, x)
def test_Poly_eject():
f = Poly(x**2*y + x*y**3 + x*y + 1, x, y)
assert f.eject(x) == Poly(x*y**3 + (x**2 + x)*y + 1, y, domain='ZZ[x]')
assert f.eject(y) == Poly(y*x**2 + (y**3 + y)*x + 1, x, domain='ZZ[y]')
ex = x + y + z + t + w
g = Poly(ex, x, y, z, t, w)
assert g.eject(x) == Poly(ex, y, z, t, w, domain='ZZ[x]')
assert g.eject(x, y) == Poly(ex, z, t, w, domain='ZZ[x, y]')
assert g.eject(x, y, z) == Poly(ex, t, w, domain='ZZ[x, y, z]')
assert g.eject(w) == Poly(ex, x, y, z, t, domain='ZZ[w]')
assert g.eject(t, w) == Poly(ex, x, y, z, domain='ZZ[w, t]')
assert g.eject(z, t, w) == Poly(ex, x, y, domain='ZZ[w, t, z]')
raises(DomainError, lambda: Poly(x*y, x, y, domain=ZZ[z]).eject(y))
raises(NotImplementedError, lambda: Poly(x*y, x, y, z).eject(y))
def test_Poly_exclude():
assert Poly(x, x, y).exclude() == Poly(x, x)
assert Poly(x*y, x, y).exclude() == Poly(x*y, x, y)
assert Poly(1, x, y).exclude() == Poly(1, x, y)
def test_Poly__gen_to_level():
assert Poly(1, x, y)._gen_to_level(-2) == 0
assert Poly(1, x, y)._gen_to_level(-1) == 1
assert Poly(1, x, y)._gen_to_level( 0) == 0
assert Poly(1, x, y)._gen_to_level( 1) == 1
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level(-3))
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level( 2))
assert Poly(1, x, y)._gen_to_level(x) == 0
assert Poly(1, x, y)._gen_to_level(y) == 1
assert Poly(1, x, y)._gen_to_level('x') == 0
assert Poly(1, x, y)._gen_to_level('y') == 1
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level(z))
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level('z'))
def test_Poly_degree():
assert Poly(0, x).degree() == -oo
assert Poly(1, x).degree() == 0
assert Poly(x, x).degree() == 1
assert Poly(0, x).degree(gen=0) == -oo
assert Poly(1, x).degree(gen=0) == 0
assert Poly(x, x).degree(gen=0) == 1
assert Poly(0, x).degree(gen=x) == -oo
assert Poly(1, x).degree(gen=x) == 0
assert Poly(x, x).degree(gen=x) == 1
assert Poly(0, x).degree(gen='x') == -oo
assert Poly(1, x).degree(gen='x') == 0
assert Poly(x, x).degree(gen='x') == 1
raises(PolynomialError, lambda: Poly(1, x).degree(gen=1))
raises(PolynomialError, lambda: Poly(1, x).degree(gen=y))
raises(PolynomialError, lambda: Poly(1, x).degree(gen='y'))
assert Poly(1, x, y).degree() == 0
assert Poly(2*y, x, y).degree() == 0
assert Poly(x*y, x, y).degree() == 1
assert Poly(1, x, y).degree(gen=x) == 0
assert Poly(2*y, x, y).degree(gen=x) == 0
assert Poly(x*y, x, y).degree(gen=x) == 1
assert Poly(1, x, y).degree(gen=y) == 0
assert Poly(2*y, x, y).degree(gen=y) == 1
assert Poly(x*y, x, y).degree(gen=y) == 1
assert degree(1, x) == 0
assert degree(x, x) == 1
assert degree(x*y**2, gen=x) == 1
assert degree(x*y**2, gen=y) == 2
assert degree(x*y**2, x, y) == 1
assert degree(x*y**2, y, x) == 2
raises(ComputationFailed, lambda: degree(1))
def test_Poly_degree_list():
assert Poly(0, x).degree_list() == (-oo,)
assert Poly(0, x, y).degree_list() == (-oo, -oo)
assert Poly(0, x, y, z).degree_list() == (-oo, -oo, -oo)
assert Poly(1, x).degree_list() == (0,)
assert Poly(1, x, y).degree_list() == (0, 0)
assert Poly(1, x, y, z).degree_list() == (0, 0, 0)
assert Poly(x**2*y + x**3*z**2 + 1).degree_list() == (3, 1, 2)
assert degree_list(1, x) == (0,)
assert degree_list(x, x) == (1,)
assert degree_list(x*y**2) == (1, 2)
raises(ComputationFailed, lambda: degree_list(1))
def test_Poly_total_degree():
assert Poly(x**2*y + x**3*z**2 + 1).total_degree() == 5
assert Poly(x**2 + z**3).total_degree() == 3
assert Poly(x*y*z + z**4).total_degree() == 4
assert Poly(x**3 + x + 1).total_degree() == 3
def test_Poly_homogenize():
assert Poly(x**2+y).homogenize(z) == Poly(x**2+y*z)
assert Poly(x+y).homogenize(z) == Poly(x+y, x, y, z)
assert Poly(x+y**2).homogenize(y) == Poly(x*y+y**2)
def test_Poly_homogeneous_order():
assert Poly(0, x, y).homogeneous_order() == -oo
assert Poly(1, x, y).homogeneous_order() == 0
assert Poly(x, x, y).homogeneous_order() == 1
assert Poly(x*y, x, y).homogeneous_order() == 2
assert Poly(x + 1, x, y).homogeneous_order() is None
assert Poly(x*y + x, x, y).homogeneous_order() is None
assert Poly(x**5 + 2*x**3*y**2 + 9*x*y**4).homogeneous_order() == 5
assert Poly(x**5 + 2*x**3*y**3 + 9*x*y**4).homogeneous_order() is None
def test_Poly_LC():
assert Poly(0, x).LC() == 0
assert Poly(1, x).LC() == 1
assert Poly(2*x**2 + x, x).LC() == 2
assert Poly(x*y**7 + 2*x**2*y**3).LC('lex') == 2
assert Poly(x*y**7 + 2*x**2*y**3).LC('grlex') == 1
assert LC(x*y**7 + 2*x**2*y**3, order='lex') == 2
assert LC(x*y**7 + 2*x**2*y**3, order='grlex') == 1
def test_Poly_TC():
assert Poly(0, x).TC() == 0
assert Poly(1, x).TC() == 1
assert Poly(2*x**2 + x, x).TC() == 0
def test_Poly_EC():
assert Poly(0, x).EC() == 0
assert Poly(1, x).EC() == 1
assert Poly(2*x**2 + x, x).EC() == 1
assert Poly(x*y**7 + 2*x**2*y**3).EC('lex') == 1
assert Poly(x*y**7 + 2*x**2*y**3).EC('grlex') == 2
def test_Poly_coeff():
assert Poly(0, x).coeff_monomial(1) == 0
assert Poly(0, x).coeff_monomial(x) == 0
assert Poly(1, x).coeff_monomial(1) == 1
assert Poly(1, x).coeff_monomial(x) == 0
assert Poly(x**8, x).coeff_monomial(1) == 0
assert Poly(x**8, x).coeff_monomial(x**7) == 0
assert Poly(x**8, x).coeff_monomial(x**8) == 1
assert Poly(x**8, x).coeff_monomial(x**9) == 0
assert Poly(3*x*y**2 + 1, x, y).coeff_monomial(1) == 1
assert Poly(3*x*y**2 + 1, x, y).coeff_monomial(x*y**2) == 3
p = Poly(24*x*y*exp(8) + 23*x, x, y)
assert p.coeff_monomial(x) == 23
assert p.coeff_monomial(y) == 0
assert p.coeff_monomial(x*y) == 24*exp(8)
assert p.as_expr().coeff(x) == 24*y*exp(8) + 23
raises(NotImplementedError, lambda: p.coeff(x))
raises(ValueError, lambda: Poly(x + 1).coeff_monomial(0))
raises(ValueError, lambda: Poly(x + 1).coeff_monomial(3*x))
raises(ValueError, lambda: Poly(x + 1).coeff_monomial(3*x*y))
def test_Poly_nth():
assert Poly(0, x).nth(0) == 0
assert Poly(0, x).nth(1) == 0
assert Poly(1, x).nth(0) == 1
assert Poly(1, x).nth(1) == 0
assert Poly(x**8, x).nth(0) == 0
assert Poly(x**8, x).nth(7) == 0
assert Poly(x**8, x).nth(8) == 1
assert Poly(x**8, x).nth(9) == 0
assert Poly(3*x*y**2 + 1, x, y).nth(0, 0) == 1
assert Poly(3*x*y**2 + 1, x, y).nth(1, 2) == 3
def test_Poly_LM():
assert Poly(0, x).LM() == (0,)
assert Poly(1, x).LM() == (0,)
assert Poly(2*x**2 + x, x).LM() == (2,)
assert Poly(x*y**7 + 2*x**2*y**3).LM('lex') == (2, 3)
assert Poly(x*y**7 + 2*x**2*y**3).LM('grlex') == (1, 7)
assert LM(x*y**7 + 2*x**2*y**3, order='lex') == x**2*y**3
assert LM(x*y**7 + 2*x**2*y**3, order='grlex') == x*y**7
def test_Poly_LM_custom_order():
f = Poly(x**2*y**3*z + x**2*y*z**3 + x*y*z + 1)
rev_lex = lambda monom: tuple(reversed(monom))
assert f.LM(order='lex') == (2, 3, 1)
assert f.LM(order=rev_lex) == (2, 1, 3)
def test_Poly_EM():
assert Poly(0, x).EM() == (0,)
assert Poly(1, x).EM() == (0,)
assert Poly(2*x**2 + x, x).EM() == (1,)
assert Poly(x*y**7 + 2*x**2*y**3).EM('lex') == (1, 7)
assert Poly(x*y**7 + 2*x**2*y**3).EM('grlex') == (2, 3)
def test_Poly_LT():
assert Poly(0, x).LT() == ((0,), 0)
assert Poly(1, x).LT() == ((0,), 1)
assert Poly(2*x**2 + x, x).LT() == ((2,), 2)
assert Poly(x*y**7 + 2*x**2*y**3).LT('lex') == ((2, 3), 2)
assert Poly(x*y**7 + 2*x**2*y**3).LT('grlex') == ((1, 7), 1)
assert LT(x*y**7 + 2*x**2*y**3, order='lex') == 2*x**2*y**3
assert LT(x*y**7 + 2*x**2*y**3, order='grlex') == x*y**7
def test_Poly_ET():
assert Poly(0, x).ET() == ((0,), 0)
assert Poly(1, x).ET() == ((0,), 1)
assert Poly(2*x**2 + x, x).ET() == ((1,), 1)
assert Poly(x*y**7 + 2*x**2*y**3).ET('lex') == ((1, 7), 1)
assert Poly(x*y**7 + 2*x**2*y**3).ET('grlex') == ((2, 3), 2)
def test_Poly_max_norm():
assert Poly(-1, x).max_norm() == 1
assert Poly( 0, x).max_norm() == 0
assert Poly( 1, x).max_norm() == 1
def test_Poly_l1_norm():
assert Poly(-1, x).l1_norm() == 1
assert Poly( 0, x).l1_norm() == 0
assert Poly( 1, x).l1_norm() == 1
def test_Poly_clear_denoms():
coeff, poly = Poly(x + 2, x).clear_denoms()
assert coeff == 1 and poly == Poly(
x + 2, x, domain='ZZ') and poly.get_domain() == ZZ
coeff, poly = Poly(x/2 + 1, x).clear_denoms()
assert coeff == 2 and poly == Poly(
x + 2, x, domain='QQ') and poly.get_domain() == QQ
coeff, poly = Poly(x/2 + 1, x).clear_denoms(convert=True)
assert coeff == 2 and poly == Poly(
x + 2, x, domain='ZZ') and poly.get_domain() == ZZ
coeff, poly = Poly(x/y + 1, x).clear_denoms(convert=True)
assert coeff == y and poly == Poly(
x + y, x, domain='ZZ[y]') and poly.get_domain() == ZZ[y]
coeff, poly = Poly(x/3 + sqrt(2), x, domain='EX').clear_denoms()
assert coeff == 3 and poly == Poly(
x + 3*sqrt(2), x, domain='EX') and poly.get_domain() == EX
coeff, poly = Poly(
x/3 + sqrt(2), x, domain='EX').clear_denoms(convert=True)
assert coeff == 3 and poly == Poly(
x + 3*sqrt(2), x, domain='EX') and poly.get_domain() == EX
def test_Poly_rat_clear_denoms():
f = Poly(x**2/y + 1, x)
g = Poly(x**3 + y, x)
assert f.rat_clear_denoms(g) == \
(Poly(x**2 + y, x), Poly(y*x**3 + y**2, x))
f = f.set_domain(EX)
g = g.set_domain(EX)
assert f.rat_clear_denoms(g) == (f, g)
def test_Poly_integrate():
assert Poly(x + 1).integrate() == Poly(x**2/2 + x)
assert Poly(x + 1).integrate(x) == Poly(x**2/2 + x)
assert Poly(x + 1).integrate((x, 1)) == Poly(x**2/2 + x)
assert Poly(x*y + 1).integrate(x) == Poly(x**2*y/2 + x)
assert Poly(x*y + 1).integrate(y) == Poly(x*y**2/2 + y)
assert Poly(x*y + 1).integrate(x, x) == Poly(x**3*y/6 + x**2/2)
assert Poly(x*y + 1).integrate(y, y) == Poly(x*y**3/6 + y**2/2)
assert Poly(x*y + 1).integrate((x, 2)) == Poly(x**3*y/6 + x**2/2)
assert Poly(x*y + 1).integrate((y, 2)) == Poly(x*y**3/6 + y**2/2)
assert Poly(x*y + 1).integrate(x, y) == Poly(x**2*y**2/4 + x*y)
assert Poly(x*y + 1).integrate(y, x) == Poly(x**2*y**2/4 + x*y)
def test_Poly_diff():
assert Poly(x**2 + x).diff() == Poly(2*x + 1)
assert Poly(x**2 + x).diff(x) == Poly(2*x + 1)
assert Poly(x**2 + x).diff((x, 1)) == Poly(2*x + 1)
assert Poly(x**2*y**2 + x*y).diff(x) == Poly(2*x*y**2 + y)
assert Poly(x**2*y**2 + x*y).diff(y) == Poly(2*x**2*y + x)
assert Poly(x**2*y**2 + x*y).diff(x, x) == Poly(2*y**2, x, y)
assert Poly(x**2*y**2 + x*y).diff(y, y) == Poly(2*x**2, x, y)
assert Poly(x**2*y**2 + x*y).diff((x, 2)) == Poly(2*y**2, x, y)
assert Poly(x**2*y**2 + x*y).diff((y, 2)) == Poly(2*x**2, x, y)
assert Poly(x**2*y**2 + x*y).diff(x, y) == Poly(4*x*y + 1)
assert Poly(x**2*y**2 + x*y).diff(y, x) == Poly(4*x*y + 1)
def test_Poly_eval():
assert Poly(0, x).eval(7) == 0
assert Poly(1, x).eval(7) == 1
assert Poly(x, x).eval(7) == 7
assert Poly(0, x).eval(0, 7) == 0
assert Poly(1, x).eval(0, 7) == 1
assert Poly(x, x).eval(0, 7) == 7
assert Poly(0, x).eval(x, 7) == 0
assert Poly(1, x).eval(x, 7) == 1
assert Poly(x, x).eval(x, 7) == 7
assert Poly(0, x).eval('x', 7) == 0
assert Poly(1, x).eval('x', 7) == 1
assert Poly(x, x).eval('x', 7) == 7
raises(PolynomialError, lambda: Poly(1, x).eval(1, 7))
raises(PolynomialError, lambda: Poly(1, x).eval(y, 7))
raises(PolynomialError, lambda: Poly(1, x).eval('y', 7))
assert Poly(123, x, y).eval(7) == Poly(123, y)
assert Poly(2*y, x, y).eval(7) == Poly(2*y, y)
assert Poly(x*y, x, y).eval(7) == Poly(7*y, y)
assert Poly(123, x, y).eval(x, 7) == Poly(123, y)
assert Poly(2*y, x, y).eval(x, 7) == Poly(2*y, y)
assert Poly(x*y, x, y).eval(x, 7) == Poly(7*y, y)
assert Poly(123, x, y).eval(y, 7) == Poly(123, x)
assert Poly(2*y, x, y).eval(y, 7) == Poly(14, x)
assert Poly(x*y, x, y).eval(y, 7) == Poly(7*x, x)
assert Poly(x*y + y, x, y).eval({x: 7}) == Poly(8*y, y)
assert Poly(x*y + y, x, y).eval({y: 7}) == Poly(7*x + 7, x)
assert Poly(x*y + y, x, y).eval({x: 6, y: 7}) == 49
assert Poly(x*y + y, x, y).eval({x: 7, y: 6}) == 48
assert Poly(x*y + y, x, y).eval((6, 7)) == 49
assert Poly(x*y + y, x, y).eval([6, 7]) == 49
assert Poly(x + 1, domain='ZZ').eval(S(1)/2) == S(3)/2
assert Poly(x + 1, domain='ZZ').eval(sqrt(2)) == sqrt(2) + 1
raises(ValueError, lambda: Poly(x*y + y, x, y).eval((6, 7, 8)))
raises(DomainError, lambda: Poly(x + 1, domain='ZZ').eval(S(1)/2, auto=False))
# issue 6344
alpha = Symbol('alpha')
result = (2*alpha*z - 2*alpha + z**2 + 3)/(z**2 - 2*z + 1)
f = Poly(x**2 + (alpha - 1)*x - alpha + 1, x, domain='ZZ[alpha]')
assert f.eval((z + 1)/(z - 1)) == result
g = Poly(x**2 + (alpha - 1)*x - alpha + 1, x, y, domain='ZZ[alpha]')
assert g.eval((z + 1)/(z - 1)) == Poly(result, y, domain='ZZ(alpha,z)')
def test_Poly___call__():
f = Poly(2*x*y + 3*x + y + 2*z)
assert f(2) == Poly(5*y + 2*z + 6)
assert f(2, 5) == Poly(2*z + 31)
assert f(2, 5, 7) == 45
def test_parallel_poly_from_expr():
assert parallel_poly_from_expr(
[x - 1, x**2 - 1], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[Poly(x - 1, x), x**2 - 1], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x - 1, Poly(x**2 - 1, x)], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr([Poly(
x - 1, x), Poly(x**2 - 1, x)], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x - 1, x**2 - 1], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr([Poly(
x - 1, x), x**2 - 1], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr([x - 1, Poly(
x**2 - 1, x)], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr([Poly(x - 1, x), Poly(
x**2 - 1, x)], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr(
[x - 1, x**2 - 1])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[Poly(x - 1, x), x**2 - 1])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x - 1, Poly(x**2 - 1, x)])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[Poly(x - 1, x), Poly(x**2 - 1, x)])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, x**2 - 1])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, x**2 - 1])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, Poly(x**2 - 1, x)])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, Poly(x**2 - 1, x)])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x**2 - 1, 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr(
[x**2 - 1, 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr(
[Poly(x**2 - 1, x), 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr(
[Poly(x**2 - 1, x), 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr([Poly(x, x, y), Poly(y, x, y)], x, y, order='lex')[0] == \
[Poly(x, x, y, domain='ZZ'), Poly(y, x, y, domain='ZZ')]
raises(PolificationFailed, lambda: parallel_poly_from_expr([0, 1]))
def test_pdiv():
f, g = x**2 - y**2, x - y
q, r = x + y, 0
F, G, Q, R = [ Poly(h, x, y) for h in (f, g, q, r) ]
assert F.pdiv(G) == (Q, R)
assert F.prem(G) == R
assert F.pquo(G) == Q
assert F.pexquo(G) == Q
assert pdiv(f, g) == (q, r)
assert prem(f, g) == r
assert pquo(f, g) == q
assert pexquo(f, g) == q
assert pdiv(f, g, x, y) == (q, r)
assert prem(f, g, x, y) == r
assert pquo(f, g, x, y) == q
assert pexquo(f, g, x, y) == q
assert pdiv(f, g, (x, y)) == (q, r)
assert prem(f, g, (x, y)) == r
assert pquo(f, g, (x, y)) == q
assert pexquo(f, g, (x, y)) == q
assert pdiv(F, G) == (Q, R)
assert prem(F, G) == R
assert pquo(F, G) == Q
assert pexquo(F, G) == Q
assert pdiv(f, g, polys=True) == (Q, R)
assert prem(f, g, polys=True) == R
assert pquo(f, g, polys=True) == Q
assert pexquo(f, g, polys=True) == Q
assert pdiv(F, G, polys=False) == (q, r)
assert prem(F, G, polys=False) == r
assert pquo(F, G, polys=False) == q
assert pexquo(F, G, polys=False) == q
raises(ComputationFailed, lambda: pdiv(4, 2))
raises(ComputationFailed, lambda: prem(4, 2))
raises(ComputationFailed, lambda: pquo(4, 2))
raises(ComputationFailed, lambda: pexquo(4, 2))
def test_div():
f, g = x**2 - y**2, x - y
q, r = x + y, 0
F, G, Q, R = [ Poly(h, x, y) for h in (f, g, q, r) ]
assert F.div(G) == (Q, R)
assert F.rem(G) == R
assert F.quo(G) == Q
assert F.exquo(G) == Q
assert div(f, g) == (q, r)
assert rem(f, g) == r
assert quo(f, g) == q
assert exquo(f, g) == q
assert div(f, g, x, y) == (q, r)
assert rem(f, g, x, y) == r
assert quo(f, g, x, y) == q
assert exquo(f, g, x, y) == q
assert div(f, g, (x, y)) == (q, r)
assert rem(f, g, (x, y)) == r
assert quo(f, g, (x, y)) == q
assert exquo(f, g, (x, y)) == q
assert div(F, G) == (Q, R)
assert rem(F, G) == R
assert quo(F, G) == Q
assert exquo(F, G) == Q
assert div(f, g, polys=True) == (Q, R)
assert rem(f, g, polys=True) == R
assert quo(f, g, polys=True) == Q
assert exquo(f, g, polys=True) == Q
assert div(F, G, polys=False) == (q, r)
assert rem(F, G, polys=False) == r
assert quo(F, G, polys=False) == q
assert exquo(F, G, polys=False) == q
raises(ComputationFailed, lambda: div(4, 2))
raises(ComputationFailed, lambda: rem(4, 2))
raises(ComputationFailed, lambda: quo(4, 2))
raises(ComputationFailed, lambda: exquo(4, 2))
f, g = x**2 + 1, 2*x - 4
qz, rz = 0, x**2 + 1
qq, rq = x/2 + 1, 5
assert div(f, g) == (qq, rq)
assert div(f, g, auto=True) == (qq, rq)
assert div(f, g, auto=False) == (qz, rz)
assert div(f, g, domain=ZZ) == (qz, rz)
assert div(f, g, domain=QQ) == (qq, rq)
assert div(f, g, domain=ZZ, auto=True) == (qq, rq)
assert div(f, g, domain=ZZ, auto=False) == (qz, rz)
assert div(f, g, domain=QQ, auto=True) == (qq, rq)
assert div(f, g, domain=QQ, auto=False) == (qq, rq)
assert rem(f, g) == rq
assert rem(f, g, auto=True) == rq
assert rem(f, g, auto=False) == rz
assert rem(f, g, domain=ZZ) == rz
assert rem(f, g, domain=QQ) == rq
assert rem(f, g, domain=ZZ, auto=True) == rq
assert rem(f, g, domain=ZZ, auto=False) == rz
assert rem(f, g, domain=QQ, auto=True) == rq
assert rem(f, g, domain=QQ, auto=False) == rq
assert quo(f, g) == qq
assert quo(f, g, auto=True) == qq
assert quo(f, g, auto=False) == qz
assert quo(f, g, domain=ZZ) == qz
assert quo(f, g, domain=QQ) == qq
assert quo(f, g, domain=ZZ, auto=True) == qq
assert quo(f, g, domain=ZZ, auto=False) == qz
assert quo(f, g, domain=QQ, auto=True) == qq
assert quo(f, g, domain=QQ, auto=False) == qq
f, g, q = x**2, 2*x, x/2
assert exquo(f, g) == q
assert exquo(f, g, auto=True) == q
raises(ExactQuotientFailed, lambda: exquo(f, g, auto=False))
raises(ExactQuotientFailed, lambda: exquo(f, g, domain=ZZ))
assert exquo(f, g, domain=QQ) == q
assert exquo(f, g, domain=ZZ, auto=True) == q
raises(ExactQuotientFailed, lambda: exquo(f, g, domain=ZZ, auto=False))
assert exquo(f, g, domain=QQ, auto=True) == q
assert exquo(f, g, domain=QQ, auto=False) == q
f, g = Poly(x**2), Poly(x)
q, r = f.div(g)
assert q.get_domain().is_ZZ and r.get_domain().is_ZZ
r = f.rem(g)
assert r.get_domain().is_ZZ
q = f.quo(g)
assert q.get_domain().is_ZZ
q = f.exquo(g)
assert q.get_domain().is_ZZ
def test_gcdex():
f, g = 2*x, x**2 - 16
s, t, h = x/32, -Rational(1, 16), 1
F, G, S, T, H = [ Poly(u, x, domain='QQ') for u in (f, g, s, t, h) ]
assert F.half_gcdex(G) == (S, H)
assert F.gcdex(G) == (S, T, H)
assert F.invert(G) == S
assert half_gcdex(f, g) == (s, h)
assert gcdex(f, g) == (s, t, h)
assert invert(f, g) == s
assert half_gcdex(f, g, x) == (s, h)
assert gcdex(f, g, x) == (s, t, h)
assert invert(f, g, x) == s
assert half_gcdex(f, g, (x,)) == (s, h)
assert gcdex(f, g, (x,)) == (s, t, h)
assert invert(f, g, (x,)) == s
assert half_gcdex(F, G) == (S, H)
assert gcdex(F, G) == (S, T, H)
assert invert(F, G) == S
assert half_gcdex(f, g, polys=True) == (S, H)
assert gcdex(f, g, polys=True) == (S, T, H)
assert invert(f, g, polys=True) == S
assert half_gcdex(F, G, polys=False) == (s, h)
assert gcdex(F, G, polys=False) == (s, t, h)
assert invert(F, G, polys=False) == s
assert half_gcdex(100, 2004) == (-20, 4)
assert gcdex(100, 2004) == (-20, 1, 4)
assert invert(3, 7) == 5
raises(DomainError, lambda: half_gcdex(x + 1, 2*x + 1, auto=False))
raises(DomainError, lambda: gcdex(x + 1, 2*x + 1, auto=False))
raises(DomainError, lambda: invert(x + 1, 2*x + 1, auto=False))
def test_revert():
f = Poly(1 - x**2/2 + x**4/24 - x**6/720)
g = Poly(61*x**6/720 + 5*x**4/24 + x**2/2 + 1)
assert f.revert(8) == g
def test_subresultants():
f, g, h = x**2 - 2*x + 1, x**2 - 1, 2*x - 2
F, G, H = Poly(f), Poly(g), Poly(h)
assert F.subresultants(G) == [F, G, H]
assert subresultants(f, g) == [f, g, h]
assert subresultants(f, g, x) == [f, g, h]
assert subresultants(f, g, (x,)) == [f, g, h]
assert subresultants(F, G) == [F, G, H]
assert subresultants(f, g, polys=True) == [F, G, H]
assert subresultants(F, G, polys=False) == [f, g, h]
raises(ComputationFailed, lambda: subresultants(4, 2))
def test_resultant():
f, g, h = x**2 - 2*x + 1, x**2 - 1, 0
F, G = Poly(f), Poly(g)
assert F.resultant(G) == h
assert resultant(f, g) == h
assert resultant(f, g, x) == h
assert resultant(f, g, (x,)) == h
assert resultant(F, G) == h
assert resultant(f, g, polys=True) == h
assert resultant(F, G, polys=False) == h
assert resultant(f, g, includePRS=True) == (h, [f, g, 2*x - 2])
f, g, h = x - a, x - b, a - b
F, G, H = Poly(f), Poly(g), Poly(h)
assert F.resultant(G) == H
assert resultant(f, g) == h
assert resultant(f, g, x) == h
assert resultant(f, g, (x,)) == h
assert resultant(F, G) == H
assert resultant(f, g, polys=True) == H
assert resultant(F, G, polys=False) == h
raises(ComputationFailed, lambda: resultant(4, 2))
def test_discriminant():
f, g = x**3 + 3*x**2 + 9*x - 13, -11664
F = Poly(f)
assert F.discriminant() == g
assert discriminant(f) == g
assert discriminant(f, x) == g
assert discriminant(f, (x,)) == g
assert discriminant(F) == g
assert discriminant(f, polys=True) == g
assert discriminant(F, polys=False) == g
f, g = a*x**2 + b*x + c, b**2 - 4*a*c
F, G = Poly(f), Poly(g)
assert F.discriminant() == G
assert discriminant(f) == g
assert discriminant(f, x, a, b, c) == g
assert discriminant(f, (x, a, b, c)) == g
assert discriminant(F) == G
assert discriminant(f, polys=True) == G
assert discriminant(F, polys=False) == g
raises(ComputationFailed, lambda: discriminant(4))
def test_dispersion():
# We test only the API here. For more mathematical
# tests see the dedicated test file.
fp = poly((x + 1)*(x + 2), x)
assert sorted(fp.dispersionset()) == [0, 1]
assert fp.dispersion() == 1
fp = poly(x**4 - 3*x**2 + 1, x)
gp = fp.shift(-3)
assert sorted(fp.dispersionset(gp)) == [2, 3, 4]
assert fp.dispersion(gp) == 4
def test_gcd_list():
F = [x**3 - 1, x**2 - 1, x**2 - 3*x + 2]
assert gcd_list(F) == x - 1
assert gcd_list(F, polys=True) == Poly(x - 1)
assert gcd_list([]) == 0
assert gcd_list([1, 2]) == 1
assert gcd_list([4, 6, 8]) == 2
assert gcd_list([x*(y + 42) - x*y - x*42]) == 0
gcd = gcd_list([], x)
assert gcd.is_Number and gcd is S.Zero
gcd = gcd_list([], x, polys=True)
assert gcd.is_Poly and gcd.is_zero
raises(ComputationFailed, lambda: gcd_list([], polys=True))
def test_lcm_list():
F = [x**3 - 1, x**2 - 1, x**2 - 3*x + 2]
assert lcm_list(F) == x**5 - x**4 - 2*x**3 - x**2 + x + 2
assert lcm_list(F, polys=True) == Poly(x**5 - x**4 - 2*x**3 - x**2 + x + 2)
assert lcm_list([]) == 1
assert lcm_list([1, 2]) == 2
assert lcm_list([4, 6, 8]) == 24
assert lcm_list([x*(y + 42) - x*y - x*42]) == 0
lcm = lcm_list([], x)
assert lcm.is_Number and lcm is S.One
lcm = lcm_list([], x, polys=True)
assert lcm.is_Poly and lcm.is_one
raises(ComputationFailed, lambda: lcm_list([], polys=True))
def test_gcd():
f, g = x**3 - 1, x**2 - 1
s, t = x**2 + x + 1, x + 1
h, r = x - 1, x**4 + x**3 - x - 1
F, G, S, T, H, R = [ Poly(u) for u in (f, g, s, t, h, r) ]
assert F.cofactors(G) == (H, S, T)
assert F.gcd(G) == H
assert F.lcm(G) == R
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == r
assert cofactors(f, g, x) == (h, s, t)
assert gcd(f, g, x) == h
assert lcm(f, g, x) == r
assert cofactors(f, g, (x,)) == (h, s, t)
assert gcd(f, g, (x,)) == h
assert lcm(f, g, (x,)) == r
assert cofactors(F, G) == (H, S, T)
assert gcd(F, G) == H
assert lcm(F, G) == R
assert cofactors(f, g, polys=True) == (H, S, T)
assert gcd(f, g, polys=True) == H
assert lcm(f, g, polys=True) == R
assert cofactors(F, G, polys=False) == (h, s, t)
assert gcd(F, G, polys=False) == h
assert lcm(F, G, polys=False) == r
f, g = 1.0*x**2 - 1.0, 1.0*x - 1.0
h, s, t = g, 1.0*x + 1.0, 1.0
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == f
f, g = 1.0*x**2 - 1.0, 1.0*x - 1.0
h, s, t = g, 1.0*x + 1.0, 1.0
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == f
assert cofactors(8, 6) == (2, 4, 3)
assert gcd(8, 6) == 2
assert lcm(8, 6) == 24
f, g = x**2 - 3*x - 4, x**3 - 4*x**2 + x - 4
l = x**4 - 3*x**3 - 3*x**2 - 3*x - 4
h, s, t = x - 4, x + 1, x**2 + 1
assert cofactors(f, g, modulus=11) == (h, s, t)
assert gcd(f, g, modulus=11) == h
assert lcm(f, g, modulus=11) == l
f, g = x**2 + 8*x + 7, x**3 + 7*x**2 + x + 7
l = x**4 + 8*x**3 + 8*x**2 + 8*x + 7
h, s, t = x + 7, x + 1, x**2 + 1
assert cofactors(f, g, modulus=11, symmetric=False) == (h, s, t)
assert gcd(f, g, modulus=11, symmetric=False) == h
assert lcm(f, g, modulus=11, symmetric=False) == l
raises(TypeError, lambda: gcd(x))
raises(TypeError, lambda: lcm(x))
def test_gcd_numbers_vs_polys():
assert isinstance(gcd(3, 9), Integer)
assert isinstance(gcd(3*x, 9), Integer)
assert gcd(3, 9) == 3
assert gcd(3*x, 9) == 3
assert isinstance(gcd(S(3)/2, S(9)/4), Rational)
assert isinstance(gcd(S(3)/2*x, S(9)/4), Rational)
assert gcd(S(3)/2, S(9)/4) == S(3)/4
assert gcd(S(3)/2*x, S(9)/4) == 1
assert isinstance(gcd(3.0, 9.0), Float)
assert isinstance(gcd(3.0*x, 9.0), Float)
assert gcd(3.0, 9.0) == 1.0
assert gcd(3.0*x, 9.0) == 1.0
def test_terms_gcd():
assert terms_gcd(1) == 1
assert terms_gcd(1, x) == 1
assert terms_gcd(x - 1) == x - 1
assert terms_gcd(-x - 1) == -x - 1
assert terms_gcd(2*x + 3) == 2*x + 3
assert terms_gcd(6*x + 4) == Mul(2, 3*x + 2, evaluate=False)
assert terms_gcd(x**3*y + x*y**3) == x*y*(x**2 + y**2)
assert terms_gcd(2*x**3*y + 2*x*y**3) == 2*x*y*(x**2 + y**2)
assert terms_gcd(x**3*y/2 + x*y**3/2) == x*y/2*(x**2 + y**2)
assert terms_gcd(x**3*y + 2*x*y**3) == x*y*(x**2 + 2*y**2)
assert terms_gcd(2*x**3*y + 4*x*y**3) == 2*x*y*(x**2 + 2*y**2)
assert terms_gcd(2*x**3*y/3 + 4*x*y**3/5) == 2*x*y/15*(5*x**2 + 6*y**2)
assert terms_gcd(2.0*x**3*y + 4.1*x*y**3) == x*y*(2.0*x**2 + 4.1*y**2)
assert _aresame(terms_gcd(2.0*x + 3), 2.0*x + 3)
assert terms_gcd((3 + 3*x)*(x + x*y), expand=False) == \
(3*x + 3)*(x*y + x)
assert terms_gcd((3 + 3*x)*(x + x*sin(3 + 3*y)), expand=False, deep=True) == \
3*x*(x + 1)*(sin(Mul(3, y + 1, evaluate=False)) + 1)
assert terms_gcd(sin(x + x*y), deep=True) == \
sin(x*(y + 1))
eq = Eq(2*x, 2*y + 2*z*y)
assert terms_gcd(eq) == eq
assert terms_gcd(eq, deep=True) == Eq(2*x, 2*y*(z + 1))
def test_trunc():
f, g = x**5 + 2*x**4 + 3*x**3 + 4*x**2 + 5*x + 6, x**5 - x**4 + x**2 - x
F, G = Poly(f), Poly(g)
assert F.trunc(3) == G
assert trunc(f, 3) == g
assert trunc(f, 3, x) == g
assert trunc(f, 3, (x,)) == g
assert trunc(F, 3) == G
assert trunc(f, 3, polys=True) == G
assert trunc(F, 3, polys=False) == g
f, g = 6*x**5 + 5*x**4 + 4*x**3 + 3*x**2 + 2*x + 1, -x**4 + x**3 - x + 1
F, G = Poly(f), Poly(g)
assert F.trunc(3) == G
assert trunc(f, 3) == g
assert trunc(f, 3, x) == g
assert trunc(f, 3, (x,)) == g
assert trunc(F, 3) == G
assert trunc(f, 3, polys=True) == G
assert trunc(F, 3, polys=False) == g
f = Poly(x**2 + 2*x + 3, modulus=5)
assert f.trunc(2) == Poly(x**2 + 1, modulus=5)
def test_monic():
f, g = 2*x - 1, x - S(1)/2
F, G = Poly(f, domain='QQ'), Poly(g)
assert F.monic() == G
assert monic(f) == g
assert monic(f, x) == g
assert monic(f, (x,)) == g
assert monic(F) == G
assert monic(f, polys=True) == G
assert monic(F, polys=False) == g
raises(ComputationFailed, lambda: monic(4))
assert monic(2*x**2 + 6*x + 4, auto=False) == x**2 + 3*x + 2
raises(ExactQuotientFailed, lambda: monic(2*x + 6*x + 1, auto=False))
assert monic(2.0*x**2 + 6.0*x + 4.0) == 1.0*x**2 + 3.0*x + 2.0
assert monic(2*x**2 + 3*x + 4, modulus=5) == x**2 - x + 2
def test_content():
f, F = 4*x + 2, Poly(4*x + 2)
assert F.content() == 2
assert content(f) == 2
raises(ComputationFailed, lambda: content(4))
f = Poly(2*x, modulus=3)
assert f.content() == 1
def test_primitive():
f, g = 4*x + 2, 2*x + 1
F, G = Poly(f), Poly(g)
assert F.primitive() == (2, G)
assert primitive(f) == (2, g)
assert primitive(f, x) == (2, g)
assert primitive(f, (x,)) == (2, g)
assert primitive(F) == (2, G)
assert primitive(f, polys=True) == (2, G)
assert primitive(F, polys=False) == (2, g)
raises(ComputationFailed, lambda: primitive(4))
f = Poly(2*x, modulus=3)
g = Poly(2.0*x, domain=RR)
assert f.primitive() == (1, f)
assert g.primitive() == (1.0, g)
assert primitive(S('-3*x/4 + y + 11/8')) == \
S('(1/8, -6*x + 8*y + 11)')
def test_compose():
f = x**12 + 20*x**10 + 150*x**8 + 500*x**6 + 625*x**4 - 2*x**3 - 10*x + 9
g = x**4 - 2*x + 9
h = x**3 + 5*x
F, G, H = map(Poly, (f, g, h))
assert G.compose(H) == F
assert compose(g, h) == f
assert compose(g, h, x) == f
assert compose(g, h, (x,)) == f
assert compose(G, H) == F
assert compose(g, h, polys=True) == F
assert compose(G, H, polys=False) == f
assert F.decompose() == [G, H]
assert decompose(f) == [g, h]
assert decompose(f, x) == [g, h]
assert decompose(f, (x,)) == [g, h]
assert decompose(F) == [G, H]
assert decompose(f, polys=True) == [G, H]
assert decompose(F, polys=False) == [g, h]
raises(ComputationFailed, lambda: compose(4, 2))
raises(ComputationFailed, lambda: decompose(4))
assert compose(x**2 - y**2, x - y, x, y) == x**2 - 2*x*y
assert compose(x**2 - y**2, x - y, y, x) == -y**2 + 2*x*y
def test_shift():
assert Poly(x**2 - 2*x + 1, x).shift(2) == Poly(x**2 + 2*x + 1, x)
def test_sturm():
f, F = x, Poly(x, domain='QQ')
g, G = 1, Poly(1, x, domain='QQ')
assert F.sturm() == [F, G]
assert sturm(f) == [f, g]
assert sturm(f, x) == [f, g]
assert sturm(f, (x,)) == [f, g]
assert sturm(F) == [F, G]
assert sturm(f, polys=True) == [F, G]
assert sturm(F, polys=False) == [f, g]
raises(ComputationFailed, lambda: sturm(4))
raises(DomainError, lambda: sturm(f, auto=False))
f = Poly(S(1024)/(15625*pi**8)*x**5
- S(4096)/(625*pi**8)*x**4
+ S(32)/(15625*pi**4)*x**3
- S(128)/(625*pi**4)*x**2
+ S(1)/62500*x
- S(1)/625, x, domain='ZZ(pi)')
assert sturm(f) == \
[Poly(x**3 - 100*x**2 + pi**4/64*x - 25*pi**4/16, x, domain='ZZ(pi)'),
Poly(3*x**2 - 200*x + pi**4/64, x, domain='ZZ(pi)'),
Poly((S(20000)/9 - pi**4/96)*x + 25*pi**4/18, x, domain='ZZ(pi)'),
Poly((-3686400000000*pi**4 - 11520000*pi**8 - 9*pi**12)/(26214400000000 - 245760000*pi**4 + 576*pi**8), x, domain='ZZ(pi)')]
def test_gff():
f = x**5 + 2*x**4 - x**3 - 2*x**2
assert Poly(f).gff_list() == [(Poly(x), 1), (Poly(x + 2), 4)]
assert gff_list(f) == [(x, 1), (x + 2, 4)]
raises(NotImplementedError, lambda: gff(f))
f = x*(x - 1)**3*(x - 2)**2*(x - 4)**2*(x - 5)
assert Poly(f).gff_list() == [(
Poly(x**2 - 5*x + 4), 1), (Poly(x**2 - 5*x + 4), 2), (Poly(x), 3)]
assert gff_list(f) == [(x**2 - 5*x + 4, 1), (x**2 - 5*x + 4, 2), (x, 3)]
raises(NotImplementedError, lambda: gff(f))
def test_sqf_norm():
assert sqf_norm(x**2 - 2, extension=sqrt(3)) == \
(1, x**2 - 2*sqrt(3)*x + 1, x**4 - 10*x**2 + 1)
assert sqf_norm(x**2 - 3, extension=sqrt(2)) == \
(1, x**2 - 2*sqrt(2)*x - 1, x**4 - 10*x**2 + 1)
assert Poly(x**2 - 2, extension=sqrt(3)).sqf_norm() == \
(1, Poly(x**2 - 2*sqrt(3)*x + 1, x, extension=sqrt(3)),
Poly(x**4 - 10*x**2 + 1, x, domain='QQ'))
assert Poly(x**2 - 3, extension=sqrt(2)).sqf_norm() == \
(1, Poly(x**2 - 2*sqrt(2)*x - 1, x, extension=sqrt(2)),
Poly(x**4 - 10*x**2 + 1, x, domain='QQ'))
def test_sqf():
f = x**5 - x**3 - x**2 + 1
g = x**3 + 2*x**2 + 2*x + 1
h = x - 1
p = x**4 + x**3 - x - 1
F, G, H, P = map(Poly, (f, g, h, p))
assert F.sqf_part() == P
assert sqf_part(f) == p
assert sqf_part(f, x) == p
assert sqf_part(f, (x,)) == p
assert sqf_part(F) == P
assert sqf_part(f, polys=True) == P
assert sqf_part(F, polys=False) == p
assert F.sqf_list() == (1, [(G, 1), (H, 2)])
assert sqf_list(f) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, x) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, (x,)) == (1, [(g, 1), (h, 2)])
assert sqf_list(F) == (1, [(G, 1), (H, 2)])
assert sqf_list(f, polys=True) == (1, [(G, 1), (H, 2)])
assert sqf_list(F, polys=False) == (1, [(g, 1), (h, 2)])
assert F.sqf_list_include() == [(G, 1), (H, 2)]
raises(ComputationFailed, lambda: sqf_part(4))
assert sqf(1) == 1
assert sqf_list(1) == (1, [])
assert sqf((2*x**2 + 2)**7) == 128*(x**2 + 1)**7
assert sqf(f) == g*h**2
assert sqf(f, x) == g*h**2
assert sqf(f, (x,)) == g*h**2
d = x**2 + y**2
assert sqf(f/d) == (g*h**2)/d
assert sqf(f/d, x) == (g*h**2)/d
assert sqf(f/d, (x,)) == (g*h**2)/d
assert sqf(x - 1) == x - 1
assert sqf(-x - 1) == -x - 1
assert sqf(x - 1) == x - 1
assert sqf(6*x - 10) == Mul(2, 3*x - 5, evaluate=False)
assert sqf((6*x - 10)/(3*x - 6)) == S(2)/3*((3*x - 5)/(x - 2))
assert sqf(Poly(x**2 - 2*x + 1)) == (x - 1)**2
f = 3 + x - x*(1 + x) + x**2
assert sqf(f) == 3
f = (x**2 + 2*x + 1)**20000000000
assert sqf(f) == (x + 1)**40000000000
assert sqf_list(f) == (1, [(x + 1, 40000000000)])
def test_factor():
f = x**5 - x**3 - x**2 + 1
u = x + 1
v = x - 1
w = x**2 + x + 1
F, U, V, W = map(Poly, (f, u, v, w))
assert F.factor_list() == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(f) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, x) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, (x,)) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(F) == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(f, polys=True) == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(F, polys=False) == (1, [(u, 1), (v, 2), (w, 1)])
assert F.factor_list_include() == [(U, 1), (V, 2), (W, 1)]
assert factor_list(1) == (1, [])
assert factor_list(6) == (6, [])
assert factor_list(sqrt(3), x) == (1, [(3, S.Half)])
assert factor_list((-1)**x, x) == (1, [(-1, x)])
assert factor_list((2*x)**y, x) == (1, [(2, y), (x, y)])
assert factor_list(sqrt(x*y), x) == (1, [(x*y, S.Half)])
assert factor(6) == 6 and factor(6).is_Integer
assert factor_list(3*x) == (3, [(x, 1)])
assert factor_list(3*x**2) == (3, [(x, 2)])
assert factor(3*x) == 3*x
assert factor(3*x**2) == 3*x**2
assert factor((2*x**2 + 2)**7) == 128*(x**2 + 1)**7
assert factor(f) == u*v**2*w
assert factor(f, x) == u*v**2*w
assert factor(f, (x,)) == u*v**2*w
g, p, q, r = x**2 - y**2, x - y, x + y, x**2 + 1
assert factor(f/g) == (u*v**2*w)/(p*q)
assert factor(f/g, x) == (u*v**2*w)/(p*q)
assert factor(f/g, (x,)) == (u*v**2*w)/(p*q)
p = Symbol('p', positive=True)
i = Symbol('i', integer=True)
r = Symbol('r', real=True)
assert factor(sqrt(x*y)).is_Pow is True
assert factor(sqrt(3*x**2 - 3)) == sqrt(3)*sqrt((x - 1)*(x + 1))
assert factor(sqrt(3*x**2 + 3)) == sqrt(3)*sqrt(x**2 + 1)
assert factor((y*x**2 - y)**i) == y**i*(x - 1)**i*(x + 1)**i
assert factor((y*x**2 + y)**i) == y**i*(x**2 + 1)**i
assert factor((y*x**2 - y)**t) == (y*(x - 1)*(x + 1))**t
assert factor((y*x**2 + y)**t) == (y*(x**2 + 1))**t
f = sqrt(expand((r**2 + 1)*(p + 1)*(p - 1)*(p - 2)**3))
g = sqrt((p - 2)**3*(p - 1))*sqrt(p + 1)*sqrt(r**2 + 1)
assert factor(f) == g
assert factor(g) == g
g = (x - 1)**5*(r**2 + 1)
f = sqrt(expand(g))
assert factor(f) == sqrt(g)
f = Poly(sin(1)*x + 1, x, domain=EX)
assert f.factor_list() == (1, [(f, 1)])
f = x**4 + 1
assert factor(f) == f
assert factor(f, extension=I) == (x**2 - I)*(x**2 + I)
assert factor(f, gaussian=True) == (x**2 - I)*(x**2 + I)
assert factor(
f, extension=sqrt(2)) == (x**2 + sqrt(2)*x + 1)*(x**2 - sqrt(2)*x + 1)
f = x**2 + 2*sqrt(2)*x + 2
assert factor(f, extension=sqrt(2)) == (x + sqrt(2))**2
assert factor(f**3, extension=sqrt(2)) == (x + sqrt(2))**6
assert factor(x**2 - 2*y**2, extension=sqrt(2)) == \
(x + sqrt(2)*y)*(x - sqrt(2)*y)
assert factor(2*x**2 - 4*y**2, extension=sqrt(2)) == \
2*((x + sqrt(2)*y)*(x - sqrt(2)*y))
assert factor(x - 1) == x - 1
assert factor(-x - 1) == -x - 1
assert factor(x - 1) == x - 1
assert factor(6*x - 10) == Mul(2, 3*x - 5, evaluate=False)
assert factor(x**11 + x + 1, modulus=65537, symmetric=True) == \
(x**2 + x + 1)*(x**9 - x**8 + x**6 - x**5 + x**3 - x** 2 + 1)
assert factor(x**11 + x + 1, modulus=65537, symmetric=False) == \
(x**2 + x + 1)*(x**9 + 65536*x**8 + x**6 + 65536*x**5 +
x**3 + 65536*x** 2 + 1)
f = x/pi + x*sin(x)/pi
g = y/(pi**2 + 2*pi + 1) + y*sin(x)/(pi**2 + 2*pi + 1)
assert factor(f) == x*(sin(x) + 1)/pi
assert factor(g) == y*(sin(x) + 1)/(pi + 1)**2
assert factor(Eq(
x**2 + 2*x + 1, x**3 + 1)) == Eq((x + 1)**2, (x + 1)*(x**2 - x + 1))
f = (x**2 - 1)/(x**2 + 4*x + 4)
assert factor(f) == (x + 1)*(x - 1)/(x + 2)**2
assert factor(f, x) == (x + 1)*(x - 1)/(x + 2)**2
f = 3 + x - x*(1 + x) + x**2
assert factor(f) == 3
assert factor(f, x) == 3
assert factor(1/(x**2 + 2*x + 1/x) - 1) == -((1 - x + 2*x**2 +
x**3)/(1 + 2*x**2 + x**3))
assert factor(f, expand=False) == f
raises(PolynomialError, lambda: factor(f, x, expand=False))
raises(FlagError, lambda: factor(x**2 - 1, polys=True))
assert factor([x, Eq(x**2 - y**2, Tuple(x**2 - z**2, 1/x + 1/y))]) == \
[x, Eq((x - y)*(x + y), Tuple((x - z)*(x + z), (x + y)/x/y))]
assert not isinstance(
Poly(x**3 + x + 1).factor_list()[1][0][0], PurePoly) is True
assert isinstance(
PurePoly(x**3 + x + 1).factor_list()[1][0][0], PurePoly) is True
assert factor(sqrt(-x)) == sqrt(-x)
# issue 5917
e = (-2*x*(-x + 1)*(x - 1)*(-x*(-x + 1)*(x - 1) - x*(x - 1)**2)*(x**2*(x -
1) - x*(x - 1) - x) - (-2*x**2*(x - 1)**2 - x*(-x + 1)*(-x*(-x + 1) +
x*(x - 1)))*(x**2*(x - 1)**4 - x*(-x*(-x + 1)*(x - 1) - x*(x - 1)**2)))
assert factor(e) == 0
# deep option
assert factor(sin(x**2 + x) + x, deep=True) == sin(x*(x + 1)) + x
assert factor(sqrt(x**2)) == sqrt(x**2)
def test_factor_large():
f = (x**2 + 4*x + 4)**10000000*(x**2 + 1)*(x**2 + 2*x + 1)**1234567
g = ((x**2 + 2*x + 1)**3000*y**2 + (x**2 + 2*x + 1)**3000*2*y + (
x**2 + 2*x + 1)**3000)
assert factor(f) == (x + 2)**20000000*(x**2 + 1)*(x + 1)**2469134
assert factor(g) == (x + 1)**6000*(y + 1)**2
assert factor_list(
f) == (1, [(x + 1, 2469134), (x + 2, 20000000), (x**2 + 1, 1)])
assert factor_list(g) == (1, [(y + 1, 2), (x + 1, 6000)])
f = (x**2 - y**2)**200000*(x**7 + 1)
g = (x**2 + y**2)**200000*(x**7 + 1)
assert factor(f) == \
(x + 1)*(x - y)**200000*(x + y)**200000*(x**6 - x**5 +
x**4 - x**3 + x**2 - x + 1)
assert factor(g, gaussian=True) == \
(x + 1)*(x - I*y)**200000*(x + I*y)**200000*(x**6 - x**5 +
x**4 - x**3 + x**2 - x + 1)
assert factor_list(f) == \
(1, [(x + 1, 1), (x - y, 200000), (x + y, 200000), (x**6 -
x**5 + x**4 - x**3 + x**2 - x + 1, 1)])
assert factor_list(g, gaussian=True) == \
(1, [(x + 1, 1), (x - I*y, 200000), (x + I*y, 200000), (
x**6 - x**5 + x**4 - x**3 + x**2 - x + 1, 1)])
@XFAIL
def test_factor_noeval():
assert factor(6*x - 10) == 2*(3*x - 5)
assert factor((6*x - 10)/(3*x - 6)) == S(2)/3*((3*x - 5)/(x - 2))
def test_intervals():
assert intervals(0) == []
assert intervals(1) == []
assert intervals(x, sqf=True) == [(0, 0)]
assert intervals(x) == [((0, 0), 1)]
assert intervals(x**128) == [((0, 0), 128)]
assert intervals([x**2, x**4]) == [((0, 0), {0: 2, 1: 4})]
f = Poly((2*x/5 - S(17)/3)*(4*x + S(1)/257))
assert f.intervals(sqf=True) == [(-1, 0), (14, 15)]
assert f.intervals() == [((-1, 0), 1), ((14, 15), 1)]
assert f.intervals(fast=True, sqf=True) == [(-1, 0), (14, 15)]
assert f.intervals(fast=True) == [((-1, 0), 1), ((14, 15), 1)]
assert f.intervals(eps=S(1)/10) == f.intervals(eps=0.1) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/100) == f.intervals(eps=0.01) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/1000) == f.intervals(eps=0.001) == \
[((-S(1)/1002, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/10000) == f.intervals(eps=0.0001) == \
[((-S(1)/1028, -S(1)/1028), 1), ((S(85)/6, S(85)/6), 1)]
f = (2*x/5 - S(17)/3)*(4*x + S(1)/257)
assert intervals(f, sqf=True) == [(-1, 0), (14, 15)]
assert intervals(f) == [((-1, 0), 1), ((14, 15), 1)]
assert intervals(f, eps=S(1)/10) == intervals(f, eps=0.1) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/100) == intervals(f, eps=0.01) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/1000) == intervals(f, eps=0.001) == \
[((-S(1)/1002, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/10000) == intervals(f, eps=0.0001) == \
[((-S(1)/1028, -S(1)/1028), 1), ((S(85)/6, S(85)/6), 1)]
f = Poly((x**2 - 2)*(x**2 - 3)**7*(x + 1)*(7*x + 3)**3)
assert f.intervals() == \
[((-2, -S(3)/2), 7), ((-S(3)/2, -1), 1),
((-1, -1), 1), ((-1, 0), 3),
((1, S(3)/2), 1), ((S(3)/2, 2), 7)]
assert intervals([x**5 - 200, x**5 - 201]) == \
[((S(75)/26, S(101)/35), {0: 1}), ((S(309)/107, S(26)/9), {1: 1})]
assert intervals([x**5 - 200, x**5 - 201], fast=True) == \
[((S(75)/26, S(101)/35), {0: 1}), ((S(309)/107, S(26)/9), {1: 1})]
assert intervals([x**2 - 200, x**2 - 201]) == \
[((-S(71)/5, -S(85)/6), {1: 1}), ((-S(85)/6, -14), {0: 1}),
((14, S(85)/6), {0: 1}), ((S(85)/6, S(71)/5), {1: 1})]
assert intervals([x + 1, x + 2, x - 1, x + 1, 1, x - 1, x - 1, (x - 2)**2]) == \
[((-2, -2), {1: 1}), ((-1, -1), {0: 1, 3: 1}), ((1, 1), {2:
1, 5: 1, 6: 1}), ((2, 2), {7: 2})]
f, g, h = x**2 - 2, x**4 - 4*x**2 + 4, x - 1
assert intervals(f, inf=S(7)/4, sqf=True) == []
assert intervals(f, inf=S(7)/5, sqf=True) == [(S(7)/5, S(3)/2)]
assert intervals(f, sup=S(7)/4, sqf=True) == [(-2, -1), (1, S(3)/2)]
assert intervals(f, sup=S(7)/5, sqf=True) == [(-2, -1)]
assert intervals(g, inf=S(7)/4) == []
assert intervals(g, inf=S(7)/5) == [((S(7)/5, S(3)/2), 2)]
assert intervals(g, sup=S(7)/4) == [((-2, -1), 2), ((1, S(3)/2), 2)]
assert intervals(g, sup=S(7)/5) == [((-2, -1), 2)]
assert intervals([g, h], inf=S(7)/4) == []
assert intervals([g, h], inf=S(7)/5) == [((S(7)/5, S(3)/2), {0: 2})]
assert intervals([g, h], sup=S(
7)/4) == [((-2, -1), {0: 2}), ((1, 1), {1: 1}), ((1, S(3)/2), {0: 2})]
assert intervals(
[g, h], sup=S(7)/5) == [((-2, -1), {0: 2}), ((1, 1), {1: 1})]
assert intervals([x + 2, x**2 - 2]) == \
[((-2, -2), {0: 1}), ((-2, -1), {1: 1}), ((1, 2), {1: 1})]
assert intervals([x + 2, x**2 - 2], strict=True) == \
[((-2, -2), {0: 1}), ((-S(3)/2, -1), {1: 1}), ((1, 2), {1: 1})]
f = 7*z**4 - 19*z**3 + 20*z**2 + 17*z + 20
assert intervals(f) == []
real_part, complex_part = intervals(f, all=True, sqf=True)
assert real_part == []
assert all(re(a) < re(r) < re(b) and im(
a) < im(r) < im(b) for (a, b), r in zip(complex_part, nroots(f)))
assert complex_part == [(-S(40)/7 - 40*I/7, 0), (-S(40)/7, 40*I/7),
(-40*I/7, S(40)/7), (0, S(40)/7 + 40*I/7)]
real_part, complex_part = intervals(f, all=True, sqf=True, eps=S(1)/10)
assert real_part == []
assert all(re(a) < re(r) < re(b) and im(
a) < im(r) < im(b) for (a, b), r in zip(complex_part, nroots(f)))
raises(ValueError, lambda: intervals(x**2 - 2, eps=10**-100000))
raises(ValueError, lambda: Poly(x**2 - 2).intervals(eps=10**-100000))
raises(
ValueError, lambda: intervals([x**2 - 2, x**2 - 3], eps=10**-100000))
def test_refine_root():
f = Poly(x**2 - 2)
assert f.refine_root(1, 2, steps=0) == (1, 2)
assert f.refine_root(-2, -1, steps=0) == (-2, -1)
assert f.refine_root(1, 2, steps=None) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=None) == (-S(3)/2, -1)
assert f.refine_root(1, 2, steps=1) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=1) == (-S(3)/2, -1)
assert f.refine_root(1, 2, steps=1, fast=True) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=1, fast=True) == (-S(3)/2, -1)
assert f.refine_root(1, 2, eps=S(1)/100) == (S(24)/17, S(17)/12)
assert f.refine_root(1, 2, eps=1e-2) == (S(24)/17, S(17)/12)
raises(PolynomialError, lambda: (f**2).refine_root(1, 2, check_sqf=True))
raises(RefinementFailed, lambda: (f**2).refine_root(1, 2))
raises(RefinementFailed, lambda: (f**2).refine_root(2, 3))
f = x**2 - 2
assert refine_root(f, 1, 2, steps=1) == (1, S(3)/2)
assert refine_root(f, -2, -1, steps=1) == (-S(3)/2, -1)
assert refine_root(f, 1, 2, steps=1, fast=True) == (1, S(3)/2)
assert refine_root(f, -2, -1, steps=1, fast=True) == (-S(3)/2, -1)
assert refine_root(f, 1, 2, eps=S(1)/100) == (S(24)/17, S(17)/12)
assert refine_root(f, 1, 2, eps=1e-2) == (S(24)/17, S(17)/12)
raises(PolynomialError, lambda: refine_root(1, 7, 8, eps=S(1)/100))
raises(ValueError, lambda: Poly(f).refine_root(1, 2, eps=10**-100000))
raises(ValueError, lambda: refine_root(f, 1, 2, eps=10**-100000))
def test_count_roots():
assert count_roots(x**2 - 2) == 2
assert count_roots(x**2 - 2, inf=-oo) == 2
assert count_roots(x**2 - 2, sup=+oo) == 2
assert count_roots(x**2 - 2, inf=-oo, sup=+oo) == 2
assert count_roots(x**2 - 2, inf=-2) == 2
assert count_roots(x**2 - 2, inf=-1) == 1
assert count_roots(x**2 - 2, sup=1) == 1
assert count_roots(x**2 - 2, sup=2) == 2
assert count_roots(x**2 - 2, inf=-1, sup=1) == 0
assert count_roots(x**2 - 2, inf=-2, sup=2) == 2
assert count_roots(x**2 - 2, inf=-1, sup=1) == 0
assert count_roots(x**2 - 2, inf=-2, sup=2) == 2
assert count_roots(x**2 + 2) == 0
assert count_roots(x**2 + 2, inf=-2*I) == 2
assert count_roots(x**2 + 2, sup=+2*I) == 2
assert count_roots(x**2 + 2, inf=-2*I, sup=+2*I) == 2
assert count_roots(x**2 + 2, inf=0) == 0
assert count_roots(x**2 + 2, sup=0) == 0
assert count_roots(x**2 + 2, inf=-I) == 1
assert count_roots(x**2 + 2, sup=+I) == 1
assert count_roots(x**2 + 2, inf=+I/2, sup=+I) == 0
assert count_roots(x**2 + 2, inf=-I, sup=-I/2) == 0
raises(PolynomialError, lambda: count_roots(1))
def test_Poly_root():
f = Poly(2*x**3 - 7*x**2 + 4*x + 4)
assert f.root(0) == -S(1)/2
assert f.root(1) == 2
assert f.root(2) == 2
raises(IndexError, lambda: f.root(3))
assert Poly(x**5 + x + 1).root(0) == RootOf(x**3 - x**2 + 1, 0)
def test_real_roots():
assert real_roots(x) == [0]
assert real_roots(x, multiple=False) == [(0, 1)]
assert real_roots(x**3) == [0, 0, 0]
assert real_roots(x**3, multiple=False) == [(0, 3)]
assert real_roots(x*(x**3 + x + 3)) == [RootOf(x**3 + x + 3, 0), 0]
assert real_roots(x*(x**3 + x + 3), multiple=False) == [(RootOf(
x**3 + x + 3, 0), 1), (0, 1)]
assert real_roots(
x**3*(x**3 + x + 3)) == [RootOf(x**3 + x + 3, 0), 0, 0, 0]
assert real_roots(x**3*(x**3 + x + 3), multiple=False) == [(RootOf(
x**3 + x + 3, 0), 1), (0, 3)]
f = 2*x**3 - 7*x**2 + 4*x + 4
g = x**3 + x + 1
assert Poly(f).real_roots() == [-S(1)/2, 2, 2]
assert Poly(g).real_roots() == [RootOf(g, 0)]
def test_all_roots():
f = 2*x**3 - 7*x**2 + 4*x + 4
g = x**3 + x + 1
assert Poly(f).all_roots() == [-S(1)/2, 2, 2]
assert Poly(g).all_roots() == [RootOf(g, 0), RootOf(g, 1), RootOf(g, 2)]
def test_nroots():
assert Poly(0, x).nroots() == []
assert Poly(1, x).nroots() == []
assert Poly(x**2 - 1, x).nroots() == [-1.0, 1.0]
assert Poly(x**2 + 1, x).nroots() == [-1.0*I, 1.0*I]
roots = Poly(x**2 - 1, x).nroots()
assert roots == [-1.0, 1.0]
roots = Poly(x**2 + 1, x).nroots()
assert roots == [-1.0*I, 1.0*I]
roots = Poly(x**2/3 - S(1)/3, x).nroots()
assert roots == [-1.0, 1.0]
roots = Poly(x**2/3 + S(1)/3, x).nroots()
assert roots == [-1.0*I, 1.0*I]
assert Poly(x**2 + 2*I, x).nroots() == [-1.0 + 1.0*I, 1.0 - 1.0*I]
assert Poly(
x**2 + 2*I, x, extension=I).nroots() == [-1.0 + 1.0*I, 1.0 - 1.0*I]
assert Poly(0.2*x + 0.1).nroots() == [-0.5]
roots = nroots(x**5 + x + 1, n=5)
eps = Float("1e-5")
assert re(roots[0]).epsilon_eq(-0.75487, eps) is S.true
assert im(roots[0]) == 0.0
assert re(roots[1]) == -0.5
assert im(roots[1]).epsilon_eq(-0.86602, eps) is S.true
assert re(roots[2]) == -0.5
assert im(roots[2]).epsilon_eq(+0.86602, eps) is S.true
assert re(roots[3]).epsilon_eq(+0.87743, eps) is S.true
assert im(roots[3]).epsilon_eq(-0.74486, eps) is S.true
assert re(roots[4]).epsilon_eq(+0.87743, eps) is S.true
assert im(roots[4]).epsilon_eq(+0.74486, eps) is S.true
eps = Float("1e-6")
assert re(roots[0]).epsilon_eq(-0.75487, eps) is S.false
assert im(roots[0]) == 0.0
assert re(roots[1]) == -0.5
assert im(roots[1]).epsilon_eq(-0.86602, eps) is S.false
assert re(roots[2]) == -0.5
assert im(roots[2]).epsilon_eq(+0.86602, eps) is S.false
assert re(roots[3]).epsilon_eq(+0.87743, eps) is S.false
assert im(roots[3]).epsilon_eq(-0.74486, eps) is S.false
assert re(roots[4]).epsilon_eq(+0.87743, eps) is S.false
assert im(roots[4]).epsilon_eq(+0.74486, eps) is S.false
raises(DomainError, lambda: Poly(x + y, x).nroots())
raises(MultivariatePolynomialError, lambda: Poly(x + y).nroots())
assert nroots(x**2 - 1) == [-1.0, 1.0]
roots = nroots(x**2 - 1)
assert roots == [-1.0, 1.0]
assert nroots(x + I) == [-1.0*I]
assert nroots(x + 2*I) == [-2.0*I]
raises(PolynomialError, lambda: nroots(0))
# issue 8296
f = Poly(x**4 - 1)
assert f.nroots(2) == [w.n(2) for w in f.all_roots()]
def test_ground_roots():
f = x**6 - 4*x**4 + 4*x**3 - x**2
assert Poly(f).ground_roots() == {S(1): 2, S(0): 2}
assert ground_roots(f) == {S(1): 2, S(0): 2}
def test_nth_power_roots_poly():
f = x**4 - x**2 + 1
f_2 = (x**2 - x + 1)**2
f_3 = (x**2 + 1)**2
f_4 = (x**2 + x + 1)**2
f_12 = (x - 1)**4
assert nth_power_roots_poly(f, 1) == f
raises(ValueError, lambda: nth_power_roots_poly(f, 0))
raises(ValueError, lambda: nth_power_roots_poly(f, x))
assert factor(nth_power_roots_poly(f, 2)) == f_2
assert factor(nth_power_roots_poly(f, 3)) == f_3
assert factor(nth_power_roots_poly(f, 4)) == f_4
assert factor(nth_power_roots_poly(f, 12)) == f_12
raises(MultivariatePolynomialError, lambda: nth_power_roots_poly(
x + y, 2, x, y))
def test_torational_factor_list():
p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}))
assert _torational_factor_list(p, x) == (-2, [
(-x*(1 + sqrt(2))/2 + 1, 1),
(-x*(1 + sqrt(2)) - 1, 1),
(-x*(1 + sqrt(2)) + 1, 1)])
p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + 2**Rational(1, 4))}))
assert _torational_factor_list(p, x) is None
def test_cancel():
assert cancel(0) == 0
assert cancel(7) == 7
assert cancel(x) == x
assert cancel(oo) == oo
assert cancel((2, 3)) == (1, 2, 3)
assert cancel((1, 0), x) == (1, 1, 0)
assert cancel((0, 1), x) == (1, 0, 1)
f, g, p, q = 4*x**2 - 4, 2*x - 2, 2*x + 2, 1
F, G, P, Q = [ Poly(u, x) for u in (f, g, p, q) ]
assert F.cancel(G) == (1, P, Q)
assert cancel((f, g)) == (1, p, q)
assert cancel((f, g), x) == (1, p, q)
assert cancel((f, g), (x,)) == (1, p, q)
assert cancel((F, G)) == (1, P, Q)
assert cancel((f, g), polys=True) == (1, P, Q)
assert cancel((F, G), polys=False) == (1, p, q)
f = (x**2 - 2)/(x + sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x - sqrt(2)
f = (x**2 - 2)/(x - sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x + sqrt(2)
assert cancel((x**2/4 - 1, x/2 - 1)) == (S(1)/2, x + 2, 1)
assert cancel((x**2 - y)/(x - y)) == 1/(x - y)*(x**2 - y)
assert cancel((x**2 - y**2)/(x - y), x) == x + y
assert cancel((x**2 - y**2)/(x - y), y) == x + y
assert cancel((x**2 - y**2)/(x - y)) == x + y
assert cancel((x**3 - 1)/(x**2 - 1)) == (x**2 + x + 1)/(x + 1)
assert cancel((x**3/2 - S(1)/2)/(x**2 - 1)) == (x**2 + x + 1)/(2*x + 2)
assert cancel((exp(2*x) + 2*exp(x) + 1)/(exp(x) + 1)) == exp(x) + 1
f = Poly(x**2 - a**2, x)
g = Poly(x - a, x)
F = Poly(x + a, x)
G = Poly(1, x)
assert cancel((f, g)) == (1, F, G)
f = x**3 + (sqrt(2) - 2)*x**2 - (2*sqrt(2) + 3)*x - 3*sqrt(2)
g = x**2 - 2
assert cancel((f, g), extension=True) == (1, x**2 - 2*x - 3, x - sqrt(2))
f = Poly(-2*x + 3, x)
g = Poly(-x**9 + x**8 + x**6 - x**5 + 2*x**2 - 3*x + 1, x)
assert cancel((f, g)) == (1, -f, -g)
f = Poly(y, y, domain='ZZ(x)')
g = Poly(1, y, domain='ZZ[x]')
assert f.cancel(
g) == (1, Poly(y, y, domain='ZZ(x)'), Poly(1, y, domain='ZZ(x)'))
assert f.cancel(g, include=True) == (
Poly(y, y, domain='ZZ(x)'), Poly(1, y, domain='ZZ(x)'))
f = Poly(5*x*y + x, y, domain='ZZ(x)')
g = Poly(2*x**2*y, y, domain='ZZ(x)')
assert f.cancel(g, include=True) == (
Poly(5*y + 1, y, domain='ZZ(x)'), Poly(2*x*y, y, domain='ZZ(x)'))
f = -(-2*x - 4*y + 0.005*(z - y)**2)/((z - y)*(-z + y + 2))
assert cancel(f).is_Mul == True
P = tanh(x - 3.0)
Q = tanh(x + 3.0)
f = ((-2*P**2 + 2)*(-P**2 + 1)*Q**2/2 + (-2*P**2 + 2)*(-2*Q**2 + 2)*P*Q - (-2*P**2 + 2)*P**2*Q**2 + (-2*Q**2 + 2)*(-Q**2 + 1)*P**2/2 - (-2*Q**2 + 2)*P**2*Q**2)/(2*sqrt(P**2*Q**2 + 0.0001)) \
+ (-(-2*P**2 + 2)*P*Q**2/2 - (-2*Q**2 + 2)*P**2*Q/2)*((-2*P**2 + 2)*P*Q**2/2 + (-2*Q**2 + 2)*P**2*Q/2)/(2*(P**2*Q**2 + 0.0001)**(S(3)/2))
assert cancel(f).is_Mul == True
# issue 7022
A = Symbol('A', commutative=False)
p1 = Piecewise((A*(x**2 - 1)/(x + 1), x > 1), ((x + 2)/(x**2 + 2*x), True))
p2 = Piecewise((A*(x - 1), x > 1), (1/x, True))
assert cancel(p1) == p2
assert cancel(2*p1) == 2*p2
assert cancel(1 + p1) == 1 + p2
assert cancel((x**2 - 1)/(x + 1)*p1) == (x - 1)*p2
assert cancel((x**2 - 1)/(x + 1) + p1) == (x - 1) + p2
p3 = Piecewise(((x**2 - 1)/(x + 1), x > 1), ((x + 2)/(x**2 + 2*x), True))
p4 = Piecewise(((x - 1), x > 1), (1/x, True))
assert cancel(p3) == p4
assert cancel(2*p3) == 2*p4
assert cancel(1 + p3) == 1 + p4
assert cancel((x**2 - 1)/(x + 1)*p3) == (x - 1)*p4
assert cancel((x**2 - 1)/(x + 1) + p3) == (x - 1) + p4
def test_reduced():
f = 2*x**4 + y**2 - x**2 + y**3
G = [x**3 - x, y**3 - y]
Q = [2*x, 1]
r = x**2 + y**2 + y
assert reduced(f, G) == (Q, r)
assert reduced(f, G, x, y) == (Q, r)
H = groebner(G)
assert H.reduce(f) == (Q, r)
Q = [Poly(2*x, x, y), Poly(1, x, y)]
r = Poly(x**2 + y**2 + y, x, y)
assert _strict_eq(reduced(f, G, polys=True), (Q, r))
assert _strict_eq(reduced(f, G, x, y, polys=True), (Q, r))
H = groebner(G, polys=True)
assert _strict_eq(H.reduce(f), (Q, r))
f = 2*x**3 + y**3 + 3*y
G = groebner([x**2 + y**2 - 1, x*y - 2])
Q = [x**2 - x*y**3/2 + x*y/2 + y**6/4 - y**4/2 + y**2/4, -y**5/4 + y**3/2 + 3*y/4]
r = 0
assert reduced(f, G) == (Q, r)
assert G.reduce(f) == (Q, r)
assert reduced(f, G, auto=False)[1] != 0
assert G.reduce(f, auto=False)[1] != 0
assert G.contains(f) is True
assert G.contains(f + 1) is False
assert reduced(1, [1], x) == ([1], 0)
raises(ComputationFailed, lambda: reduced(1, [1]))
def test_groebner():
assert groebner([], x, y, z) == []
assert groebner([x**2 + 1, y**4*x + x**3], x, y, order='lex') == [1 + x**2, -1 + y**4]
assert groebner([x**2 + 1, y**4*x + x**3, x*y*z**3], x, y, z, order='grevlex') == [-1 + y**4, z**3, 1 + x**2]
assert groebner([x**2 + 1, y**4*x + x**3], x, y, order='lex', polys=True) == \
[Poly(1 + x**2, x, y), Poly(-1 + y**4, x, y)]
assert groebner([x**2 + 1, y**4*x + x**3, x*y*z**3], x, y, z, order='grevlex', polys=True) == \
[Poly(-1 + y**4, x, y, z), Poly(z**3, x, y, z), Poly(1 + x**2, x, y, z)]
assert groebner([x**3 - 1, x**2 - 1]) == [x - 1]
assert groebner([Eq(x**3, 1), Eq(x**2, 1)]) == [x - 1]
F = [3*x**2 + y*z - 5*x - 1, 2*x + 3*x*y + y**2, x - 3*y + x*z - 2*z**2]
f = z**9 - x**2*y**3 - 3*x*y**2*z + 11*y*z**2 + x**2*z**2 - 5
G = groebner(F, x, y, z, modulus=7, symmetric=False)
assert G == [1 + x + y + 3*z + 2*z**2 + 2*z**3 + 6*z**4 + z**5,
1 + 3*y + y**2 + 6*z**2 + 3*z**3 + 3*z**4 + 3*z**5 + 4*z**6,
1 + 4*y + 4*z + y*z + 4*z**3 + z**4 + z**6,
6 + 6*z + z**2 + 4*z**3 + 3*z**4 + 6*z**5 + 3*z**6 + z**7]
Q, r = reduced(f, G, x, y, z, modulus=7, symmetric=False, polys=True)
assert sum([ q*g for q, g in zip(Q, G.polys)], r) == Poly(f, modulus=7)
F = [x*y - 2*y, 2*y**2 - x**2]
assert groebner(F, x, y, order='grevlex') == \
[y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
assert groebner(F, y, x, order='grevlex') == \
[x**3 - 2*x**2, -x**2 + 2*y**2, x*y - 2*y]
assert groebner(F, order='grevlex', field=True) == \
[y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
assert groebner([1], x) == [1]
assert groebner([x**2 + 2.0*y], x, y) == [1.0*x**2 + 2.0*y]
raises(ComputationFailed, lambda: groebner([1]))
assert groebner([x**2 - 1, x**3 + 1], method='buchberger') == [x + 1]
assert groebner([x**2 - 1, x**3 + 1], method='f5b') == [x + 1]
raises(ValueError, lambda: groebner([x, y], method='unknown'))
def test_fglm():
F = [a + b + c + d, a*b + a*d + b*c + b*d, a*b*c + a*b*d + a*c*d + b*c*d, a*b*c*d - 1]
G = groebner(F, a, b, c, d, order=grlex)
B = [
4*a + 3*d**9 - 4*d**5 - 3*d,
4*b + 4*c - 3*d**9 + 4*d**5 + 7*d,
4*c**2 + 3*d**10 - 4*d**6 - 3*d**2,
4*c*d**4 + 4*c - d**9 + 4*d**5 + 5*d,
d**12 - d**8 - d**4 + 1,
]
assert groebner(F, a, b, c, d, order=lex) == B
assert G.fglm(lex) == B
F = [9*x**8 + 36*x**7 - 32*x**6 - 252*x**5 - 78*x**4 + 468*x**3 + 288*x**2 - 108*x + 9,
-72*t*x**7 - 252*t*x**6 + 192*t*x**5 + 1260*t*x**4 + 312*t*x**3 - 404*t*x**2 - 576*t*x + \
108*t - 72*x**7 - 256*x**6 + 192*x**5 + 1280*x**4 + 312*x**3 - 576*x + 96]
G = groebner(F, t, x, order=grlex)
B = [
203577793572507451707*t + 627982239411707112*x**7 - 666924143779443762*x**6 - \
10874593056632447619*x**5 + 5119998792707079562*x**4 + 72917161949456066376*x**3 + \
20362663855832380362*x**2 - 142079311455258371571*x + 183756699868981873194,
9*x**8 + 36*x**7 - 32*x**6 - 252*x**5 - 78*x**4 + 468*x**3 + 288*x**2 - 108*x + 9,
]
assert groebner(F, t, x, order=lex) == B
assert G.fglm(lex) == B
F = [x**2 - x - 3*y + 1, -2*x + y**2 + y - 1]
G = groebner(F, x, y, order=lex)
B = [
x**2 - x - 3*y + 1,
y**2 - 2*x + y - 1,
]
assert groebner(F, x, y, order=grlex) == B
assert G.fglm(grlex) == B
def test_is_zero_dimensional():
assert is_zero_dimensional([x, y], x, y) is True
assert is_zero_dimensional([x**3 + y**2], x, y) is False
assert is_zero_dimensional([x, y, z], x, y, z) is True
assert is_zero_dimensional([x, y, z], x, y, z, t) is False
F = [x*y - z, y*z - x, x*y - y]
assert is_zero_dimensional(F, x, y, z) is True
F = [x**2 - 2*x*z + 5, x*y**2 + y*z**3, 3*y**2 - 8*z**2]
assert is_zero_dimensional(F, x, y, z) is True
def test_GroebnerBasis():
F = [x*y - 2*y, 2*y**2 - x**2]
G = groebner(F, x, y, order='grevlex')
H = [y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
P = [ Poly(h, x, y) for h in H ]
assert isinstance(G, GroebnerBasis) is True
assert len(G) == 3
assert G[0] == H[0] and not G[0].is_Poly
assert G[1] == H[1] and not G[1].is_Poly
assert G[2] == H[2] and not G[2].is_Poly
assert G[1:] == H[1:] and not any(g.is_Poly for g in G[1:])
assert G[:2] == H[:2] and not any(g.is_Poly for g in G[1:])
assert G.exprs == H
assert G.polys == P
assert G.gens == (x, y)
assert G.domain == ZZ
assert G.order == grevlex
assert G == H
assert G == tuple(H)
assert G == P
assert G == tuple(P)
assert G != []
G = groebner(F, x, y, order='grevlex', polys=True)
assert G[0] == P[0] and G[0].is_Poly
assert G[1] == P[1] and G[1].is_Poly
assert G[2] == P[2] and G[2].is_Poly
assert G[1:] == P[1:] and all(g.is_Poly for g in G[1:])
assert G[:2] == P[:2] and all(g.is_Poly for g in G[1:])
def test_poly():
assert poly(x) == Poly(x, x)
assert poly(y) == Poly(y, y)
assert poly(x + y) == Poly(x + y, x, y)
assert poly(x + sin(x)) == Poly(x + sin(x), x, sin(x))
assert poly(x + y, wrt=y) == Poly(x + y, y, x)
assert poly(x + sin(x), wrt=sin(x)) == Poly(x + sin(x), sin(x), x)
assert poly(x*y + 2*x*z**2 + 17) == Poly(x*y + 2*x*z**2 + 17, x, y, z)
assert poly(2*(y + z)**2 - 1) == Poly(2*y**2 + 4*y*z + 2*z**2 - 1, y, z)
assert poly(
x*(y + z)**2 - 1) == Poly(x*y**2 + 2*x*y*z + x*z**2 - 1, x, y, z)
assert poly(2*x*(
y + z)**2 - 1) == Poly(2*x*y**2 + 4*x*y*z + 2*x*z**2 - 1, x, y, z)
assert poly(2*(
y + z)**2 - x - 1) == Poly(2*y**2 + 4*y*z + 2*z**2 - x - 1, x, y, z)
assert poly(x*(
y + z)**2 - x - 1) == Poly(x*y**2 + 2*x*y*z + x*z**2 - x - 1, x, y, z)
assert poly(2*x*(y + z)**2 - x - 1) == Poly(2*x*y**2 + 4*x*y*z + 2*
x*z**2 - x - 1, x, y, z)
assert poly(x*y + (x + y)**2 + (x + z)**2) == \
Poly(2*x*z + 3*x*y + y**2 + z**2 + 2*x**2, x, y, z)
assert poly(x*y*(x + y)*(x + z)**2) == \
Poly(x**3*y**2 + x*y**2*z**2 + y*x**2*z**2 + 2*z*x**2*
y**2 + 2*y*z*x**3 + y*x**4, x, y, z)
assert poly(Poly(x + y + z, y, x, z)) == Poly(x + y + z, y, x, z)
assert poly((x + y)**2, x) == Poly(x**2 + 2*x*y + y**2, x, domain=ZZ[y])
assert poly((x + y)**2, y) == Poly(x**2 + 2*x*y + y**2, y, domain=ZZ[x])
assert poly(1, x) == Poly(1, x)
raises(GeneratorsNeeded, lambda: poly(1))
# issue 6184
assert poly(x + y, x, y) == Poly(x + y, x, y)
assert poly(x + y, y, x) == Poly(x + y, y, x)
def test_keep_coeff():
u = Mul(2, x + 1, evaluate=False)
assert _keep_coeff(S(1), x) == x
assert _keep_coeff(S(-1), x) == -x
assert _keep_coeff(S(1.0), x) == 1.0*x
assert _keep_coeff(S(-1.0), x) == -1.0*x
assert _keep_coeff(S(1), 2*x) == 2*x
assert _keep_coeff(S(2), x/2) == x
assert _keep_coeff(S(2), sin(x)) == 2*sin(x)
assert _keep_coeff(S(2), x + 1) == u
assert _keep_coeff(x, 1/x) == 1
assert _keep_coeff(x + 1, S(2)) == u
@XFAIL
def test_poly_matching_consistency():
# Test for this issue:
# https://github.com/sympy/sympy/issues/5514
assert I * Poly(x, x) == Poly(I*x, x)
assert Poly(x, x) * I == Poly(I*x, x)
@XFAIL
def test_issue_5786():
assert expand(factor(expand(
(x - I*y)*(z - I*t)), extension=[I])) == -I*t*x - t*y + x*z - I*y*z
def test_noncommutative():
class foo(Expr):
is_commutative=False
e = x/(x + x*y)
c = 1/( 1 + y)
assert cancel(foo(e)) == foo(c)
assert cancel(e + foo(e)) == c + foo(c)
assert cancel(e*foo(c)) == c*foo(c)
def test_to_rational_coeffs():
assert to_rational_coeffs(
Poly(x**3 + y*x**2 + sqrt(y), x, domain='EX')) == None
|
Cuuuurzel/KiPyCalc
|
sympy/polys/tests/test_polytools.py
|
Python
|
mit
| 105,808
|
[
"Gaussian"
] |
89f75ce42d56474844e7d340bc6a8a9aa6380559d4b150244459f1ad09de048f
|
# Data sources
database(
#overrides RMG thermo calculation of RMG with these values.
#libraries found at http://rmg.mit.edu/database/thermo/libraries/
#if species exist in multiple libraries, the earlier libraries overwrite the
#previous values
thermoLibraries = ['KlippensteinH2O2','primaryThermoLibrary','DFT_QCI_thermo','CBS_QB3_1dHR'],
#overrides RMG kinetics estimation if needed in the core of RMG.
#list of libraries found at http://rmg.mit.edu/database/kinetics/libraries/
#input each library as a ('library_name',True/False) where a True means that all
#unused reactions will be automatically added to the chemkin file
reactionLibraries = [],
#seed mechanisms are reactionLibraries that are forced into the initial mechanism
#in addition to species listed in this input file.
#This is helpful for reducing run time for species you know will appear in
#the mechanism.
seedMechanisms = ['KlippensteinH2O2','ERC-FoundationFuelv0.9'],
#this is normally not changed in general RMG runs. Usually used for testing with
#outside kinetics databases
kineticsDepositories = 'default',
#lists specific families used to generate the model. 'default' uses a list of
#families from RMG-Database/input/families/recommended.py
#a visual list of families is available in PDF form at RMG-database/families
kineticsFamilies = 'default',
#specifies how RMG calculates rates. currently, the only option is 'rate rules'
kineticsEstimator = 'rate rules',
)
# List of species
#list initial and expected species below to automatically put them into the core mechanism.
#'structure' can utilize method of SMILES("put_SMILES_here"),
#adjacencyList("""put_adj_list_here"""), or InChI("put_InChI_here")
#for molecular oxygen, use the smiles string [O][O] so the triplet form is used
species(
label='butane',
reactive=True, #this parameter is optional if true
structure=SMILES("CCCC"),
)
species(
label='O2',
structure=SMILES("[O][O]"),
)
species(
label='N2',
reactive=False,
structure=adjacencyList("""
1 N u0 p1 c0 {2,T}
2 N u0 p1 c0 {1,T}
"""),
)
# You can list species not initially in reactor to make sure RMG includes them in the mechanism
species(
label='QOOH',
reactive=True,
structure=SMILES("OOCC[CH]C")
)
species(
label='CO2',
reactive=True,
structure=SMILES("O=C=O")
)
#Reaction systems
#currently RMG models only constant temperature and pressure as homogeneous batch reactors.
#two options are: simpleReactor for gas phase or liquidReactor for liquid phase
#use can use multiple reactors in an input file for each condition you want to test.
simpleReactor(
#specifies reaction temperature with units
temperature=(700,'K'),
#specifies reaction pressure with units
pressure=(10.0,'bar'),
#list initial mole fractions of compounds using the label from the 'species' label.
#RMG will normalize if sum/=1
initialMoleFractions={
"N2": 4,
"O2": 1,
"butane": 1./6.5,
},
#the following two values specify when to determine the final output model
#only one must be specified
#the first condition to be satisfied will terminate the process
terminationConversion={
'butane': .99,
},
terminationTime=(40,'s'),
#the next two optional values specify how RMG computes sensitivities of
#rate coefficients with respect to species concentrations.
#sensitivity contains a list of species' labels to conduct sensitivity analysis on.
#sensitvityThreshold is the required sensitiviy to be recorded in the csv output file
# sensitivity=['CH4'],
# sensitivityThreshold=0.0001,
)
# liquidReactor(
# temperature=(500,'K'),
# initialConcentrations={
# "N2": 4,
# "O2": 1,
# "CO": 1,
# },
# terminationConversion=None,
# terminationTime=(3600,'s'),
# sensitivity=None,
# sensitivityThreshold=1e-3
# )
#liquid reactors also have solvents, you can specify one solvent
#list of solvents available at : http://rmg.mit.edu/database/solvation/libraries/solvent/
# solvation('water')
#determines absolute and relative tolerances for ODE solver and sensitivities.
#normally this doesn't cause many issues and is modified after other issues are
#ruled out
simulator(
atol=1e-16,
rtol=1e-8,
# sens_atol=1e-6,
# sens_rtol=1e-4,
)
#used to add species to the model and to reduce memory usage by removing unimportant additional species.
#all relative values are normalized by a characteristic flux at that time point
model(
#determines the relative flux to put a species into the core.
#A higher value will result in a larger, more complex model
#when running a new model, it is recommended to start with higher values and then decrease to converge on the model
toleranceMoveToCore=0.1,
#comment out the next three terms to disable pruning
#determines the relative flux needed to not remove species from the model.
#Lower values will keep more species and utilize more memory
toleranceKeepInEdge=0.01,
#determines when to stop a ODE run to add a species.
#Lower values will improve speed.
#if it is too low, may never get to the end simulation to prune species.
toleranceInterruptSimulation=1,
#number of edge species needed to accumulate before pruning occurs
#larger values require more memory and will prune less often
maximumEdgeSpecies=100000,
#minimum number of core species needed before pruning occurs.
#this prevents pruning when kinetic model is far away from completeness
minCoreSizeForPrune=50,
#make sure that the pruned edge species have existed for a set number of RMG iterations.
#the user can specify to increase it from the default value of 2
minSpeciesExistIterationsForPrune=2,
#filter the reactions during the enlarge step to omit species from reacting if their
#concentration are deemed to be too low
filterReactions=False,
)
options(
#only option is 'si'
units='si',
#how often you want to save restart files.
#takes significant amount of time. comment out if you don't want to save
saveRestartPeriod=None,
#Draws images of species and reactions and saves the model output to HTML.
#May consume extra memory when running large models.
generateOutputHTML=True,
#generates plots of the RMG's performance statistics. Not helpful if you just want a model.
generatePlots=False,
#saves mole fraction of species in 'solver/' to help you create plots
saveSimulationProfiles=False,
#gets RMG to output comments on where kinetics were obtained in the chemkin file.
#useful for debugging kinetics but increases memory usage of the chemkin output file
verboseComments=False,
#gets RMG to generate edge species chemkin files. Uses lots of memory in output.
#Helpful for seeing why some reaction are not appearing in core model.
saveEdgeSpecies=False,
#Sets a time limit in the form DD:HH:MM:SS after which the RMG job will stop. Useful for profiling on jobs that
#do not converge.
#wallTime = '00:00:00',
)
# optional module allows for correction to unimolecular reaction rates at low pressures and/or temperatures.
pressureDependence(
#two methods available: 'modified strong collision' is faster and less accurate than 'reservoir state'
method='modified strong collision',
#these two categories determine how fine energy is descretized.
#more grains increases accuracy but takes longer
maximumGrainSize=(0.5,'kcal/mol'),
minimumNumberOfGrains=250,
#the conditions for the rate to be output over
#parameter order is: low_value, high_value, units, internal points
temperatures=(300,2200,'K',2),
pressures=(0.01,100,'bar',3),
#The two options for interpolation are 'PDepArrhenius' (no extra arguments) and
#'Chebyshev' which is followed by the number of basis sets in
#Temperature and Pressure. These values must be less than the number of
#internal points specified above
interpolation=('Chebyshev', 6, 4),
#turns off pressure dependence for molecules with number of atoms greater than the number specified below
#this is due to faster internal rate of energy transfer for larger molecules
maximumAtoms=15,
)
#optional block adds constraints on what RMG can output.
#This is helpful for improving the efficiency of RMG, but wrong inputs can lead to many errors.
generatedSpeciesConstraints(
#allows exceptions to the following restrictions
allowed=['input species','seed mechanisms','reaction libraries'],
#maximum number of each atom in a molecule
maximumCarbonAtoms=4,
maximumOxygenAtoms=7,
maximumNitrogenAtoms=0,
maximumSiliconAtoms=0,
maximumSulfurAtoms=0,
#max number of non-hydrogen atoms
#maximumHeavyAtoms=20,
#maximum radicals on a molecule
maximumRadicalElectrons=1,
#If this is false or missing, RMG will throw an error if the more less-stable form of O2 is entered
#which doesn't react in the RMG system. normally input O2 as triplet with SMILES [O][O]
#allowSingletO2=False,
# maximum allowed number of non-normal isotope atoms:
#maximumIsotopicAtoms=2,
)
#optional block allows thermo to be estimated through quantum calculations
# quantumMechanics(
# #the software package for calculations...can use 'mopac' or 'gaussian' if installed
# software='mopac',
# #methods available for calculations. 'pm2' 'pm3' or 'pm7' (last for mopac only)
# method='pm3',
# #where to store calculations
# fileStore='QMfiles',
# #where to store temporary run files
# scratchDirectory = None,
# #onlyCyclics allows linear molecules to be calculated using bensen group addivity....need to verify
# onlyCyclics = True,
# #how many radicals should be utilized in the calculation.
# #If the amount of radicals is more than this, RMG will use hydrogen bond incrementation method
# maxRadicalNumber = 0,
# )
|
pierrelb/RMG-Py
|
examples/rmg/commented/input.py
|
Python
|
mit
| 9,925
|
[
"Gaussian",
"MOPAC"
] |
dc9d7de958b15c500f648301bb3d0b798d4f68f4cfbd01edc6e146374d885045
|
# Sample module in the public domain. Feel free to use this as a template
# for your modules (and you can remove this header and take complete credit
# and liability)
#
# Contact: Brian Carrier [carrier <at> sleuthkit [dot] org]
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# Simple data source-level ingest module for Autopsy.
# Used as part of Python tutorials from Basis Technology - August 2015
#
# Looks for files of a given name, opens then in SQLite, queries the DB,
# and makes artifacts
import jarray
import inspect
import os
from java.lang import Class
from java.lang import System
from java.sql import DriverManager, SQLException
from java.util.logging import Level
from java.io import File
from org.sleuthkit.datamodel import SleuthkitCase
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import ReadContentInputStream
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.autopsy.ingest import IngestModule
from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException
from org.sleuthkit.autopsy.ingest import DataSourceIngestModule
from org.sleuthkit.autopsy.ingest import IngestModuleFactoryAdapter
from org.sleuthkit.autopsy.ingest import IngestMessage
from org.sleuthkit.autopsy.ingest import IngestServices
from org.sleuthkit.autopsy.ingest import ModuleDataEvent
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.casemodule.services import Services
from org.sleuthkit.autopsy.casemodule.services import FileManager
# This will work in 4.0.1 and beyond
# from org.sleuthkit.autopsy.casemodule.services import Blackboard
# Factory that defines the name and details of the module and allows Autopsy
# to create instances of the modules that will do the analysis.
class ContactsDbIngestModuleFactory(IngestModuleFactoryAdapter):
moduleName = "Contacts Db Analyzer"
def getModuleDisplayName(self):
return self.moduleName
def getModuleDescription(self):
return "Sample module that parses contacts.db"
def getModuleVersionNumber(self):
return "1.0"
def isDataSourceIngestModuleFactory(self):
return True
def createDataSourceIngestModule(self, ingestOptions):
return ContactsDbIngestModule()
# Data Source-level ingest module. One gets created per data source.
class ContactsDbIngestModule(DataSourceIngestModule):
_logger = Logger.getLogger(ContactsDbIngestModuleFactory.moduleName)
def log(self, level, msg):
self._logger.logp(level, self.__class__.__name__, inspect.stack()[1][3], msg)
def __init__(self):
self.context = None
# Where any setup and configuration is done
# 'context' is an instance of org.sleuthkit.autopsy.ingest.IngestJobContext.
# See: http://sleuthkit.org/autopsy/docs/api-docs/3.1/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_ingest_job_context.html
def startUp(self, context):
self.context = context
# Throw an IngestModule.IngestModuleException exception if there was a problem setting up
# raise IngestModuleException("Oh No!")
# Where the analysis is done.
# The 'dataSource' object being passed in is of type org.sleuthkit.datamodel.Content.
# See: http://www.sleuthkit.org/sleuthkit/docs/jni-docs/4.3/interfaceorg_1_1sleuthkit_1_1datamodel_1_1_content.html
# 'progressBar' is of type org.sleuthkit.autopsy.ingest.DataSourceIngestModuleProgress
# See: http://sleuthkit.org/autopsy/docs/api-docs/3.1/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_data_source_ingest_module_progress.html
def process(self, dataSource, progressBar):
# we don't know how much work there is yet
progressBar.switchToIndeterminate()
# This will work in 4.0.1 and beyond
# Use blackboard class to index blackboard artifacts for keyword search
# blackboard = Case.getCurrentCase().getServices().getBlackboard()
# Find files named contacts.db, regardless of parent path
fileManager = Case.getCurrentCase().getServices().getFileManager()
files = fileManager.findFiles(dataSource, "contacts.db")
numFiles = len(files)
progressBar.switchToDeterminate(numFiles)
fileCount = 0;
for file in files:
# Check if the user pressed cancel while we were busy
if self.context.isJobCancelled():
return IngestModule.ProcessResult.OK
self.log(Level.INFO, "Processing file: " + file.getName())
fileCount += 1
# Save the DB locally in the temp folder. use file id as name to reduce collisions
lclDbPath = os.path.join(Case.getCurrentCase().getTempDirectory(), str(file.getId()) + ".db")
ContentUtils.writeToFile(file, File(lclDbPath))
# Open the DB using JDBC
try:
Class.forName("org.sqlite.JDBC").newInstance()
dbConn = DriverManager.getConnection("jdbc:sqlite:%s" % lclDbPath)
except SQLException as e:
self.log(Level.INFO, "Could not open database file (not SQLite) " + file.getName() + " (" + e.getMessage() + ")")
return IngestModule.ProcessResult.OK
# Query the contacts table in the database and get all columns.
try:
stmt = dbConn.createStatement()
resultSet = stmt.executeQuery("SELECT * FROM contacts")
except SQLException as e:
self.log(Level.INFO, "Error querying database for contacts table (" + e.getMessage() + ")")
return IngestModule.ProcessResult.OK
# Cycle through each row and create artifacts
while resultSet.next():
try:
name = resultSet.getString("name")
email = resultSet.getString("email")
phone = resultSet.getString("phone")
except SQLException as e:
self.log(Level.INFO, "Error getting values from contacts table (" + e.getMessage() + ")")
# Make an artifact on the blackboard, TSK_CONTACT and give it attributes for each of the fields
art = file.newArtifact(BlackboardArtifact.ARTIFACT_TYPE.TSK_CONTACT)
art.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_NAME_PERSON.getTypeID(),
ContactsDbIngestModuleFactory.moduleName, name))
art.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_EMAIL.getTypeID(),
ContactsDbIngestModuleFactory.moduleName, email))
art.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_PHONE_NUMBER.getTypeID(),
ContactsDbIngestModuleFactory.moduleName, phone))
# This will work in 4.0.1 and beyond
#try:
# # index the artifact for keyword search
# blackboard.indexArtifact(art)
#except Blackboard.BlackboardException as e:
# self.log(Level.SEVERE, "Error indexing artifact " + art.getDisplayName())
# Fire an event to notify the UI and others that there are new artifacts
IngestServices.getInstance().fireModuleDataEvent(
ModuleDataEvent(ContactsDbIngestModuleFactory.moduleName,
BlackboardArtifact.ARTIFACT_TYPE.TSK_CONTACT, None))
# Clean up
stmt.close()
dbConn.close()
os.remove(lclDbPath)
# After all databases, post a message to the ingest messages in box.
message = IngestMessage.createMessage(IngestMessage.MessageType.DATA,
"ContactsDb Analyzer", "Found %d files" % fileCount)
IngestServices.getInstance().postMessage(message)
return IngestModule.ProcessResult.OK
|
mhmdfy/autopsy
|
pythonExamples/Aug2015DataSourceTutorial/FindContactsDb.py
|
Python
|
apache-2.0
| 9,361
|
[
"Brian"
] |
5d5b87a74ea749741118a9dd586f0e9efd87c7b6c2740b8627efb1649be62743
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
******************************************************************************************
**LBInitPopWave** - creates initial populations with uniform density and harmonic velocity
******************************************************************************************
This class creates initial populations with uniform density and harmonic velocity:
v_x = 0, v_y = 0, v_z = Amp * sin (2 * \pi * i / N_x)
This may be used to test the system: total moment is zero and the liquid tends to equilibrium,
i.e. relaxes to uniform zero velocity.
"""
from espresso.esutil import cxxinit
from espresso import pmi
from espresso.integrator.LBInit import *
from _espresso import integrator_LBInit_PopWave
class LBInitPopWaveLocal(LBInitLocal, integrator_LBInit_PopWave):
"""The (local) compute of LBInitPopWave."""
def __init__(self, system, latticeboltzmann):
if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_LBInit_PopWave, system, latticeboltzmann)
if pmi.isController :
class LBInitPopWave(LBInit):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.integrator.LBInitPopWaveLocal',
pmicall = [
"createDenVel"]
)
|
BackupTheBerlios/espressopp
|
src/integrator/LBInitPopWave.py
|
Python
|
gpl-3.0
| 2,164
|
[
"ESPResSo"
] |
965c9985c9266516ebfa93bffc89611e71d5d2a140f90b40c549db8f4729c79d
|
from __future__ import absolute_import
import cython
cython.declare(PyrexTypes=object, Naming=object, ExprNodes=object, Nodes=object,
Options=object, UtilNodes=object, LetNode=object,
LetRefNode=object, TreeFragment=object, EncodedString=object,
error=object, warning=object, copy=object, _unicode=object)
import copy
from . import PyrexTypes
from . import Naming
from . import ExprNodes
from . import Nodes
from . import Options
from . import Builtin
from .Visitor import VisitorTransform, TreeVisitor
from .Visitor import CythonTransform, EnvTransform, ScopeTrackingTransform
from .UtilNodes import LetNode, LetRefNode
from .TreeFragment import TreeFragment
from .StringEncoding import EncodedString, _unicode
from .Errors import error, warning, CompileError, InternalError
from .Code import UtilityCode
class NameNodeCollector(TreeVisitor):
"""Collect all NameNodes of a (sub-)tree in the ``name_nodes``
attribute.
"""
def __init__(self):
super(NameNodeCollector, self).__init__()
self.name_nodes = []
def visit_NameNode(self, node):
self.name_nodes.append(node)
def visit_Node(self, node):
self._visitchildren(node, None)
class SkipDeclarations(object):
"""
Variable and function declarations can often have a deep tree structure,
and yet most transformations don't need to descend to this depth.
Declaration nodes are removed after AnalyseDeclarationsTransform, so there
is no need to use this for transformations after that point.
"""
def visit_CTypeDefNode(self, node):
return node
def visit_CVarDefNode(self, node):
return node
def visit_CDeclaratorNode(self, node):
return node
def visit_CBaseTypeNode(self, node):
return node
def visit_CEnumDefNode(self, node):
return node
def visit_CStructOrUnionDefNode(self, node):
return node
class NormalizeTree(CythonTransform):
"""
This transform fixes up a few things after parsing
in order to make the parse tree more suitable for
transforms.
a) After parsing, blocks with only one statement will
be represented by that statement, not by a StatListNode.
When doing transforms this is annoying and inconsistent,
as one cannot in general remove a statement in a consistent
way and so on. This transform wraps any single statements
in a StatListNode containing a single statement.
b) The PassStatNode is a noop and serves no purpose beyond
plugging such one-statement blocks; i.e., once parsed a
` "pass" can just as well be represented using an empty
StatListNode. This means less special cases to worry about
in subsequent transforms (one always checks to see if a
StatListNode has no children to see if the block is empty).
"""
def __init__(self, context):
super(NormalizeTree, self).__init__(context)
self.is_in_statlist = False
self.is_in_expr = False
def visit_ExprNode(self, node):
stacktmp = self.is_in_expr
self.is_in_expr = True
self.visitchildren(node)
self.is_in_expr = stacktmp
return node
def visit_StatNode(self, node, is_listcontainer=False):
stacktmp = self.is_in_statlist
self.is_in_statlist = is_listcontainer
self.visitchildren(node)
self.is_in_statlist = stacktmp
if not self.is_in_statlist and not self.is_in_expr:
return Nodes.StatListNode(pos=node.pos, stats=[node])
else:
return node
def visit_StatListNode(self, node):
self.is_in_statlist = True
self.visitchildren(node)
self.is_in_statlist = False
return node
def visit_ParallelAssignmentNode(self, node):
return self.visit_StatNode(node, True)
def visit_CEnumDefNode(self, node):
return self.visit_StatNode(node, True)
def visit_CStructOrUnionDefNode(self, node):
return self.visit_StatNode(node, True)
def visit_PassStatNode(self, node):
"""Eliminate PassStatNode"""
if not self.is_in_statlist:
return Nodes.StatListNode(pos=node.pos, stats=[])
else:
return []
def visit_ExprStatNode(self, node):
"""Eliminate useless string literals"""
if node.expr.is_string_literal:
return self.visit_PassStatNode(node)
else:
return self.visit_StatNode(node)
def visit_CDeclaratorNode(self, node):
return node
class PostParseError(CompileError): pass
# error strings checked by unit tests, so define them
ERR_CDEF_INCLASS = 'Cannot assign default value to fields in cdef classes, structs or unions'
ERR_BUF_DEFAULTS = 'Invalid buffer defaults specification (see docs)'
ERR_INVALID_SPECIALATTR_TYPE = 'Special attributes must not have a type declared'
class PostParse(ScopeTrackingTransform):
"""
Basic interpretation of the parse tree, as well as validity
checking that can be done on a very basic level on the parse
tree (while still not being a problem with the basic syntax,
as such).
Specifically:
- Default values to cdef assignments are turned into single
assignments following the declaration (everywhere but in class
bodies, where they raise a compile error)
- Interpret some node structures into Python runtime values.
Some nodes take compile-time arguments (currently:
TemplatedTypeNode[args] and __cythonbufferdefaults__ = {args}),
which should be interpreted. This happens in a general way
and other steps should be taken to ensure validity.
Type arguments cannot be interpreted in this way.
- For __cythonbufferdefaults__ the arguments are checked for
validity.
TemplatedTypeNode has its directives interpreted:
Any first positional argument goes into the "dtype" attribute,
any "ndim" keyword argument goes into the "ndim" attribute and
so on. Also it is checked that the directive combination is valid.
- __cythonbufferdefaults__ attributes are parsed and put into the
type information.
Note: Currently Parsing.py does a lot of interpretation and
reorganization that can be refactored into this transform
if a more pure Abstract Syntax Tree is wanted.
"""
def __init__(self, context):
super(PostParse, self).__init__(context)
self.specialattribute_handlers = {
'__cythonbufferdefaults__' : self.handle_bufferdefaults
}
def visit_LambdaNode(self, node):
# unpack a lambda expression into the corresponding DefNode
collector = YieldNodeCollector()
collector.visitchildren(node.result_expr)
if collector.yields or collector.awaits or isinstance(node.result_expr, ExprNodes.YieldExprNode):
body = Nodes.ExprStatNode(
node.result_expr.pos, expr=node.result_expr)
else:
body = Nodes.ReturnStatNode(
node.result_expr.pos, value=node.result_expr)
node.def_node = Nodes.DefNode(
node.pos, name=node.name,
args=node.args, star_arg=node.star_arg,
starstar_arg=node.starstar_arg,
body=body, doc=None)
self.visitchildren(node)
return node
def visit_GeneratorExpressionNode(self, node):
# unpack a generator expression into the corresponding DefNode
node.def_node = Nodes.DefNode(node.pos, name=node.name,
doc=None,
args=[], star_arg=None,
starstar_arg=None,
body=node.loop)
self.visitchildren(node)
return node
# cdef variables
def handle_bufferdefaults(self, decl):
if not isinstance(decl.default, ExprNodes.DictNode):
raise PostParseError(decl.pos, ERR_BUF_DEFAULTS)
self.scope_node.buffer_defaults_node = decl.default
self.scope_node.buffer_defaults_pos = decl.pos
def visit_CVarDefNode(self, node):
# This assumes only plain names and pointers are assignable on
# declaration. Also, it makes use of the fact that a cdef decl
# must appear before the first use, so we don't have to deal with
# "i = 3; cdef int i = i" and can simply move the nodes around.
try:
self.visitchildren(node)
stats = [node]
newdecls = []
for decl in node.declarators:
declbase = decl
while isinstance(declbase, Nodes.CPtrDeclaratorNode):
declbase = declbase.base
if isinstance(declbase, Nodes.CNameDeclaratorNode):
if declbase.default is not None:
if self.scope_type in ('cclass', 'pyclass', 'struct'):
if isinstance(self.scope_node, Nodes.CClassDefNode):
handler = self.specialattribute_handlers.get(decl.name)
if handler:
if decl is not declbase:
raise PostParseError(decl.pos, ERR_INVALID_SPECIALATTR_TYPE)
handler(decl)
continue # Remove declaration
raise PostParseError(decl.pos, ERR_CDEF_INCLASS)
first_assignment = self.scope_type != 'module'
stats.append(Nodes.SingleAssignmentNode(node.pos,
lhs=ExprNodes.NameNode(node.pos, name=declbase.name),
rhs=declbase.default, first=first_assignment))
declbase.default = None
newdecls.append(decl)
node.declarators = newdecls
return stats
except PostParseError as e:
# An error in a cdef clause is ok, simply remove the declaration
# and try to move on to report more errors
self.context.nonfatal_error(e)
return None
# Split parallel assignments (a,b = b,a) into separate partial
# assignments that are executed rhs-first using temps. This
# restructuring must be applied before type analysis so that known
# types on rhs and lhs can be matched directly. It is required in
# the case that the types cannot be coerced to a Python type in
# order to assign from a tuple.
def visit_SingleAssignmentNode(self, node):
self.visitchildren(node)
return self._visit_assignment_node(node, [node.lhs, node.rhs])
def visit_CascadedAssignmentNode(self, node):
self.visitchildren(node)
return self._visit_assignment_node(node, node.lhs_list + [node.rhs])
def _visit_assignment_node(self, node, expr_list):
"""Flatten parallel assignments into separate single
assignments or cascaded assignments.
"""
if sum([ 1 for expr in expr_list
if expr.is_sequence_constructor or expr.is_string_literal ]) < 2:
# no parallel assignments => nothing to do
return node
expr_list_list = []
flatten_parallel_assignments(expr_list, expr_list_list)
temp_refs = []
eliminate_rhs_duplicates(expr_list_list, temp_refs)
nodes = []
for expr_list in expr_list_list:
lhs_list = expr_list[:-1]
rhs = expr_list[-1]
if len(lhs_list) == 1:
node = Nodes.SingleAssignmentNode(rhs.pos,
lhs = lhs_list[0], rhs = rhs)
else:
node = Nodes.CascadedAssignmentNode(rhs.pos,
lhs_list = lhs_list, rhs = rhs)
nodes.append(node)
if len(nodes) == 1:
assign_node = nodes[0]
else:
assign_node = Nodes.ParallelAssignmentNode(nodes[0].pos, stats = nodes)
if temp_refs:
duplicates_and_temps = [ (temp.expression, temp)
for temp in temp_refs ]
sort_common_subsequences(duplicates_and_temps)
for _, temp_ref in duplicates_and_temps[::-1]:
assign_node = LetNode(temp_ref, assign_node)
return assign_node
def _flatten_sequence(self, seq, result):
for arg in seq.args:
if arg.is_sequence_constructor:
self._flatten_sequence(arg, result)
else:
result.append(arg)
return result
def visit_DelStatNode(self, node):
self.visitchildren(node)
node.args = self._flatten_sequence(node, [])
return node
def visit_ExceptClauseNode(self, node):
if node.is_except_as:
# except-as must delete NameNode target at the end
del_target = Nodes.DelStatNode(
node.pos,
args=[ExprNodes.NameNode(
node.target.pos, name=node.target.name)],
ignore_nonexisting=True)
node.body = Nodes.StatListNode(
node.pos,
stats=[Nodes.TryFinallyStatNode(
node.pos,
body=node.body,
finally_clause=Nodes.StatListNode(
node.pos,
stats=[del_target]))])
self.visitchildren(node)
return node
def eliminate_rhs_duplicates(expr_list_list, ref_node_sequence):
"""Replace rhs items by LetRefNodes if they appear more than once.
Creates a sequence of LetRefNodes that set up the required temps
and appends them to ref_node_sequence. The input list is modified
in-place.
"""
seen_nodes = set()
ref_nodes = {}
def find_duplicates(node):
if node.is_literal or node.is_name:
# no need to replace those; can't include attributes here
# as their access is not necessarily side-effect free
return
if node in seen_nodes:
if node not in ref_nodes:
ref_node = LetRefNode(node)
ref_nodes[node] = ref_node
ref_node_sequence.append(ref_node)
else:
seen_nodes.add(node)
if node.is_sequence_constructor:
for item in node.args:
find_duplicates(item)
for expr_list in expr_list_list:
rhs = expr_list[-1]
find_duplicates(rhs)
if not ref_nodes:
return
def substitute_nodes(node):
if node in ref_nodes:
return ref_nodes[node]
elif node.is_sequence_constructor:
node.args = list(map(substitute_nodes, node.args))
return node
# replace nodes inside of the common subexpressions
for node in ref_nodes:
if node.is_sequence_constructor:
node.args = list(map(substitute_nodes, node.args))
# replace common subexpressions on all rhs items
for expr_list in expr_list_list:
expr_list[-1] = substitute_nodes(expr_list[-1])
def sort_common_subsequences(items):
"""Sort items/subsequences so that all items and subsequences that
an item contains appear before the item itself. This is needed
because each rhs item must only be evaluated once, so its value
must be evaluated first and then reused when packing sequences
that contain it.
This implies a partial order, and the sort must be stable to
preserve the original order as much as possible, so we use a
simple insertion sort (which is very fast for short sequences, the
normal case in practice).
"""
def contains(seq, x):
for item in seq:
if item is x:
return True
elif item.is_sequence_constructor and contains(item.args, x):
return True
return False
def lower_than(a,b):
return b.is_sequence_constructor and contains(b.args, a)
for pos, item in enumerate(items):
key = item[1] # the ResultRefNode which has already been injected into the sequences
new_pos = pos
for i in range(pos-1, -1, -1):
if lower_than(key, items[i][0]):
new_pos = i
if new_pos != pos:
for i in range(pos, new_pos, -1):
items[i] = items[i-1]
items[new_pos] = item
def unpack_string_to_character_literals(literal):
chars = []
pos = literal.pos
stype = literal.__class__
sval = literal.value
sval_type = sval.__class__
for char in sval:
cval = sval_type(char)
chars.append(stype(pos, value=cval, constant_result=cval))
return chars
def flatten_parallel_assignments(input, output):
# The input is a list of expression nodes, representing the LHSs
# and RHS of one (possibly cascaded) assignment statement. For
# sequence constructors, rearranges the matching parts of both
# sides into a list of equivalent assignments between the
# individual elements. This transformation is applied
# recursively, so that nested structures get matched as well.
rhs = input[-1]
if (not (rhs.is_sequence_constructor or isinstance(rhs, ExprNodes.UnicodeNode))
or not sum([lhs.is_sequence_constructor for lhs in input[:-1]])):
output.append(input)
return
complete_assignments = []
if rhs.is_sequence_constructor:
rhs_args = rhs.args
elif rhs.is_string_literal:
rhs_args = unpack_string_to_character_literals(rhs)
rhs_size = len(rhs_args)
lhs_targets = [[] for _ in range(rhs_size)]
starred_assignments = []
for lhs in input[:-1]:
if not lhs.is_sequence_constructor:
if lhs.is_starred:
error(lhs.pos, "starred assignment target must be in a list or tuple")
complete_assignments.append(lhs)
continue
lhs_size = len(lhs.args)
starred_targets = sum([1 for expr in lhs.args if expr.is_starred])
if starred_targets > 1:
error(lhs.pos, "more than 1 starred expression in assignment")
output.append([lhs,rhs])
continue
elif lhs_size - starred_targets > rhs_size:
error(lhs.pos, "need more than %d value%s to unpack"
% (rhs_size, (rhs_size != 1) and 's' or ''))
output.append([lhs,rhs])
continue
elif starred_targets:
map_starred_assignment(lhs_targets, starred_assignments,
lhs.args, rhs_args)
elif lhs_size < rhs_size:
error(lhs.pos, "too many values to unpack (expected %d, got %d)"
% (lhs_size, rhs_size))
output.append([lhs,rhs])
continue
else:
for targets, expr in zip(lhs_targets, lhs.args):
targets.append(expr)
if complete_assignments:
complete_assignments.append(rhs)
output.append(complete_assignments)
# recursively flatten partial assignments
for cascade, rhs in zip(lhs_targets, rhs_args):
if cascade:
cascade.append(rhs)
flatten_parallel_assignments(cascade, output)
# recursively flatten starred assignments
for cascade in starred_assignments:
if cascade[0].is_sequence_constructor:
flatten_parallel_assignments(cascade, output)
else:
output.append(cascade)
def map_starred_assignment(lhs_targets, starred_assignments, lhs_args, rhs_args):
# Appends the fixed-position LHS targets to the target list that
# appear left and right of the starred argument.
#
# The starred_assignments list receives a new tuple
# (lhs_target, rhs_values_list) that maps the remaining arguments
# (those that match the starred target) to a list.
# left side of the starred target
for i, (targets, expr) in enumerate(zip(lhs_targets, lhs_args)):
if expr.is_starred:
starred = i
lhs_remaining = len(lhs_args) - i - 1
break
targets.append(expr)
else:
raise InternalError("no starred arg found when splitting starred assignment")
# right side of the starred target
for i, (targets, expr) in enumerate(zip(lhs_targets[-lhs_remaining:],
lhs_args[starred + 1:])):
targets.append(expr)
# the starred target itself, must be assigned a (potentially empty) list
target = lhs_args[starred].target # unpack starred node
starred_rhs = rhs_args[starred:]
if lhs_remaining:
starred_rhs = starred_rhs[:-lhs_remaining]
if starred_rhs:
pos = starred_rhs[0].pos
else:
pos = target.pos
starred_assignments.append([
target, ExprNodes.ListNode(pos=pos, args=starred_rhs)])
class PxdPostParse(CythonTransform, SkipDeclarations):
"""
Basic interpretation/validity checking that should only be
done on pxd trees.
A lot of this checking currently happens in the parser; but
what is listed below happens here.
- "def" functions are let through only if they fill the
getbuffer/releasebuffer slots
- cdef functions are let through only if they are on the
top level and are declared "inline"
"""
ERR_INLINE_ONLY = "function definition in pxd file must be declared 'cdef inline'"
ERR_NOGO_WITH_INLINE = "inline function definition in pxd file cannot be '%s'"
def __call__(self, node):
self.scope_type = 'pxd'
return super(PxdPostParse, self).__call__(node)
def visit_CClassDefNode(self, node):
old = self.scope_type
self.scope_type = 'cclass'
self.visitchildren(node)
self.scope_type = old
return node
def visit_FuncDefNode(self, node):
# FuncDefNode always come with an implementation (without
# an imp they are CVarDefNodes..)
err = self.ERR_INLINE_ONLY
if (isinstance(node, Nodes.DefNode) and self.scope_type == 'cclass'
and node.name in ('__getbuffer__', '__releasebuffer__')):
err = None # allow these slots
if isinstance(node, Nodes.CFuncDefNode):
if (u'inline' in node.modifiers and
self.scope_type in ('pxd', 'cclass')):
node.inline_in_pxd = True
if node.visibility != 'private':
err = self.ERR_NOGO_WITH_INLINE % node.visibility
elif node.api:
err = self.ERR_NOGO_WITH_INLINE % 'api'
else:
err = None # allow inline function
else:
err = self.ERR_INLINE_ONLY
if err:
self.context.nonfatal_error(PostParseError(node.pos, err))
return None
else:
return node
class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
"""
After parsing, directives can be stored in a number of places:
- #cython-comments at the top of the file (stored in ModuleNode)
- Command-line arguments overriding these
- @cython.directivename decorators
- with cython.directivename: statements
This transform is responsible for interpreting these various sources
and store the directive in two ways:
- Set the directives attribute of the ModuleNode for global directives.
- Use a CompilerDirectivesNode to override directives for a subtree.
(The first one is primarily to not have to modify with the tree
structure, so that ModuleNode stay on top.)
The directives are stored in dictionaries from name to value in effect.
Each such dictionary is always filled in for all possible directives,
using default values where no value is given by the user.
The available directives are controlled in Options.py.
Note that we have to run this prior to analysis, and so some minor
duplication of functionality has to occur: We manually track cimports
and which names the "cython" module may have been imported to.
"""
unop_method_nodes = {
'typeof': ExprNodes.TypeofNode,
'operator.address': ExprNodes.AmpersandNode,
'operator.dereference': ExprNodes.DereferenceNode,
'operator.preincrement' : ExprNodes.inc_dec_constructor(True, '++'),
'operator.predecrement' : ExprNodes.inc_dec_constructor(True, '--'),
'operator.postincrement': ExprNodes.inc_dec_constructor(False, '++'),
'operator.postdecrement': ExprNodes.inc_dec_constructor(False, '--'),
'operator.typeid' : ExprNodes.TypeidNode,
# For backwards compatibility.
'address': ExprNodes.AmpersandNode,
}
binop_method_nodes = {
'operator.comma' : ExprNodes.c_binop_constructor(','),
}
special_methods = set(['declare', 'union', 'struct', 'typedef',
'sizeof', 'cast', 'pointer', 'compiled',
'NULL', 'fused_type', 'parallel'])
special_methods.update(unop_method_nodes)
valid_parallel_directives = set([
"parallel",
"prange",
"threadid",
#"threadsavailable",
])
def __init__(self, context, compilation_directive_defaults):
super(InterpretCompilerDirectives, self).__init__(context)
self.cython_module_names = set()
self.directive_names = {'staticmethod': 'staticmethod'}
self.parallel_directives = {}
directives = copy.deepcopy(Options.get_directive_defaults())
for key, value in compilation_directive_defaults.items():
directives[_unicode(key)] = copy.deepcopy(value)
self.directives = directives
def check_directive_scope(self, pos, directive, scope):
legal_scopes = Options.directive_scopes.get(directive, None)
if legal_scopes and scope not in legal_scopes:
self.context.nonfatal_error(PostParseError(pos, 'The %s compiler directive '
'is not allowed in %s scope' % (directive, scope)))
return False
else:
if directive not in Options.directive_types:
error(pos, "Invalid directive: '%s'." % (directive,))
return True
# Set up processing and handle the cython: comments.
def visit_ModuleNode(self, node):
for key in sorted(node.directive_comments):
if not self.check_directive_scope(node.pos, key, 'module'):
self.wrong_scope_error(node.pos, key, 'module')
del node.directive_comments[key]
self.module_scope = node.scope
self.directives.update(node.directive_comments)
node.directives = self.directives
node.parallel_directives = self.parallel_directives
self.visitchildren(node)
node.cython_module_names = self.cython_module_names
return node
# The following four functions track imports and cimports that
# begin with "cython"
def is_cython_directive(self, name):
return (name in Options.directive_types or
name in self.special_methods or
PyrexTypes.parse_basic_type(name))
def is_parallel_directive(self, full_name, pos):
"""
Checks to see if fullname (e.g. cython.parallel.prange) is a valid
parallel directive. If it is a star import it also updates the
parallel_directives.
"""
result = (full_name + ".").startswith("cython.parallel.")
if result:
directive = full_name.split('.')
if full_name == u"cython.parallel":
self.parallel_directives[u"parallel"] = u"cython.parallel"
elif full_name == u"cython.parallel.*":
for name in self.valid_parallel_directives:
self.parallel_directives[name] = u"cython.parallel.%s" % name
elif (len(directive) != 3 or
directive[-1] not in self.valid_parallel_directives):
error(pos, "No such directive: %s" % full_name)
self.module_scope.use_utility_code(
UtilityCode.load_cached("InitThreads", "ModuleSetupCode.c"))
return result
def visit_CImportStatNode(self, node):
if node.module_name == u"cython":
self.cython_module_names.add(node.as_name or u"cython")
elif node.module_name.startswith(u"cython."):
if node.module_name.startswith(u"cython.parallel."):
error(node.pos, node.module_name + " is not a module")
if node.module_name == u"cython.parallel":
if node.as_name and node.as_name != u"cython":
self.parallel_directives[node.as_name] = node.module_name
else:
self.cython_module_names.add(u"cython")
self.parallel_directives[
u"cython.parallel"] = node.module_name
self.module_scope.use_utility_code(
UtilityCode.load_cached("InitThreads", "ModuleSetupCode.c"))
elif node.as_name:
self.directive_names[node.as_name] = node.module_name[7:]
else:
self.cython_module_names.add(u"cython")
# if this cimport was a compiler directive, we don't
# want to leave the cimport node sitting in the tree
return None
return node
def visit_FromCImportStatNode(self, node):
if not node.relative_level and (
node.module_name == u"cython" or node.module_name.startswith(u"cython.")):
submodule = (node.module_name + u".")[7:]
newimp = []
for pos, name, as_name, kind in node.imported_names:
full_name = submodule + name
qualified_name = u"cython." + full_name
if self.is_parallel_directive(qualified_name, node.pos):
# from cython cimport parallel, or
# from cython.parallel cimport parallel, prange, ...
self.parallel_directives[as_name or name] = qualified_name
elif self.is_cython_directive(full_name):
self.directive_names[as_name or name] = full_name
if kind is not None:
self.context.nonfatal_error(PostParseError(pos,
"Compiler directive imports must be plain imports"))
else:
newimp.append((pos, name, as_name, kind))
if not newimp:
return None
node.imported_names = newimp
return node
def visit_FromImportStatNode(self, node):
if (node.module.module_name.value == u"cython") or \
node.module.module_name.value.startswith(u"cython."):
submodule = (node.module.module_name.value + u".")[7:]
newimp = []
for name, name_node in node.items:
full_name = submodule + name
qualified_name = u"cython." + full_name
if self.is_parallel_directive(qualified_name, node.pos):
self.parallel_directives[name_node.name] = qualified_name
elif self.is_cython_directive(full_name):
self.directive_names[name_node.name] = full_name
else:
newimp.append((name, name_node))
if not newimp:
return None
node.items = newimp
return node
def visit_SingleAssignmentNode(self, node):
if isinstance(node.rhs, ExprNodes.ImportNode):
module_name = node.rhs.module_name.value
is_parallel = (module_name + u".").startswith(u"cython.parallel.")
if module_name != u"cython" and not is_parallel:
return node
module_name = node.rhs.module_name.value
as_name = node.lhs.name
node = Nodes.CImportStatNode(node.pos,
module_name = module_name,
as_name = as_name)
node = self.visit_CImportStatNode(node)
else:
self.visitchildren(node)
return node
def visit_NameNode(self, node):
if node.name in self.cython_module_names:
node.is_cython_module = True
else:
node.cython_attribute = self.directive_names.get(node.name)
return node
def try_to_parse_directives(self, node):
# If node is the contents of an directive (in a with statement or
# decorator), returns a list of (directivename, value) pairs.
# Otherwise, returns None
if isinstance(node, ExprNodes.CallNode):
self.visit(node.function)
optname = node.function.as_cython_attribute()
if optname:
directivetype = Options.directive_types.get(optname)
if directivetype:
args, kwds = node.explicit_args_kwds()
directives = []
key_value_pairs = []
if kwds is not None and directivetype is not dict:
for keyvalue in kwds.key_value_pairs:
key, value = keyvalue
sub_optname = "%s.%s" % (optname, key.value)
if Options.directive_types.get(sub_optname):
directives.append(self.try_to_parse_directive(sub_optname, [value], None, keyvalue.pos))
else:
key_value_pairs.append(keyvalue)
if not key_value_pairs:
kwds = None
else:
kwds.key_value_pairs = key_value_pairs
if directives and not kwds and not args:
return directives
directives.append(self.try_to_parse_directive(optname, args, kwds, node.function.pos))
return directives
elif isinstance(node, (ExprNodes.AttributeNode, ExprNodes.NameNode)):
self.visit(node)
optname = node.as_cython_attribute()
if optname:
directivetype = Options.directive_types.get(optname)
if directivetype is bool:
return [(optname, True)]
elif directivetype is None:
return [(optname, None)]
else:
raise PostParseError(
node.pos, "The '%s' directive should be used as a function call." % optname)
return None
def try_to_parse_directive(self, optname, args, kwds, pos):
directivetype = Options.directive_types.get(optname)
if len(args) == 1 and isinstance(args[0], ExprNodes.NoneNode):
return optname, Options.get_directive_defaults()[optname]
elif directivetype is bool:
if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.BoolNode):
raise PostParseError(pos,
'The %s directive takes one compile-time boolean argument' % optname)
return (optname, args[0].value)
elif directivetype is int:
if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.IntNode):
raise PostParseError(pos,
'The %s directive takes one compile-time integer argument' % optname)
return (optname, int(args[0].value))
elif directivetype is str:
if kwds is not None or len(args) != 1 or not isinstance(
args[0], (ExprNodes.StringNode, ExprNodes.UnicodeNode)):
raise PostParseError(pos,
'The %s directive takes one compile-time string argument' % optname)
return (optname, str(args[0].value))
elif directivetype is type:
if kwds is not None or len(args) != 1:
raise PostParseError(pos,
'The %s directive takes one type argument' % optname)
return (optname, args[0])
elif directivetype is dict:
if len(args) != 0:
raise PostParseError(pos,
'The %s directive takes no prepositional arguments' % optname)
return optname, dict([(key.value, value) for key, value in kwds.key_value_pairs])
elif directivetype is list:
if kwds and len(kwds) != 0:
raise PostParseError(pos,
'The %s directive takes no keyword arguments' % optname)
return optname, [ str(arg.value) for arg in args ]
elif callable(directivetype):
if kwds is not None or len(args) != 1 or not isinstance(
args[0], (ExprNodes.StringNode, ExprNodes.UnicodeNode)):
raise PostParseError(pos,
'The %s directive takes one compile-time string argument' % optname)
return (optname, directivetype(optname, str(args[0].value)))
else:
assert False
def visit_with_directives(self, body, directives):
olddirectives = self.directives
newdirectives = copy.copy(olddirectives)
newdirectives.update(directives)
self.directives = newdirectives
assert isinstance(body, Nodes.StatListNode), body
retbody = self.visit_Node(body)
directive = Nodes.CompilerDirectivesNode(pos=retbody.pos, body=retbody,
directives=newdirectives)
self.directives = olddirectives
return directive
# Handle decorators
def visit_FuncDefNode(self, node):
directives = self._extract_directives(node, 'function')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_CVarDefNode(self, node):
directives = self._extract_directives(node, 'function')
if not directives:
return node
for name, value in directives.items():
if name == 'locals':
node.directive_locals = value
elif name not in ('final', 'staticmethod'):
self.context.nonfatal_error(PostParseError(
node.pos,
"Cdef functions can only take cython.locals(), "
"staticmethod, or final decorators, got %s." % name))
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_CClassDefNode(self, node):
directives = self._extract_directives(node, 'cclass')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_CppClassNode(self, node):
directives = self._extract_directives(node, 'cppclass')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_PyClassDefNode(self, node):
directives = self._extract_directives(node, 'class')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def _extract_directives(self, node, scope_name):
if not node.decorators:
return {}
# Split the decorators into two lists -- real decorators and directives
directives = []
realdecs = []
both = []
for dec in node.decorators:
new_directives = self.try_to_parse_directives(dec.decorator)
if new_directives is not None:
for directive in new_directives:
if self.check_directive_scope(node.pos, directive[0], scope_name):
name, value = directive
if self.directives.get(name, object()) != value:
directives.append(directive)
if directive[0] == 'staticmethod':
both.append(dec)
else:
realdecs.append(dec)
if realdecs and isinstance(node, (Nodes.CFuncDefNode, Nodes.CClassDefNode, Nodes.CVarDefNode)):
raise PostParseError(realdecs[0].pos, "Cdef functions/classes cannot take arbitrary decorators.")
else:
node.decorators = realdecs + both
# merge or override repeated directives
optdict = {}
directives.reverse() # Decorators coming first take precedence
for directive in directives:
name, value = directive
if name in optdict:
old_value = optdict[name]
# keywords and arg lists can be merged, everything
# else overrides completely
if isinstance(old_value, dict):
old_value.update(value)
elif isinstance(old_value, list):
old_value.extend(value)
else:
optdict[name] = value
else:
optdict[name] = value
return optdict
# Handle with statements
def visit_WithStatNode(self, node):
directive_dict = {}
for directive in self.try_to_parse_directives(node.manager) or []:
if directive is not None:
if node.target is not None:
self.context.nonfatal_error(
PostParseError(node.pos, "Compiler directive with statements cannot contain 'as'"))
else:
name, value = directive
if name in ('nogil', 'gil'):
# special case: in pure mode, "with nogil" spells "with cython.nogil"
node = Nodes.GILStatNode(node.pos, state = name, body = node.body)
return self.visit_Node(node)
if self.check_directive_scope(node.pos, name, 'with statement'):
directive_dict[name] = value
if directive_dict:
return self.visit_with_directives(node.body, directive_dict)
return self.visit_Node(node)
class ParallelRangeTransform(CythonTransform, SkipDeclarations):
"""
Transform cython.parallel stuff. The parallel_directives come from the
module node, set there by InterpretCompilerDirectives.
x = cython.parallel.threadavailable() -> ParallelThreadAvailableNode
with nogil, cython.parallel.parallel(): -> ParallelWithBlockNode
print cython.parallel.threadid() -> ParallelThreadIdNode
for i in cython.parallel.prange(...): -> ParallelRangeNode
...
"""
# a list of names, maps 'cython.parallel.prange' in the code to
# ['cython', 'parallel', 'prange']
parallel_directive = None
# Indicates whether a namenode in an expression is the cython module
namenode_is_cython_module = False
# Keep track of whether we are the context manager of a 'with' statement
in_context_manager_section = False
# One of 'prange' or 'with parallel'. This is used to disallow closely
# nested 'with parallel:' blocks
state = None
directive_to_node = {
u"cython.parallel.parallel": Nodes.ParallelWithBlockNode,
# u"cython.parallel.threadsavailable": ExprNodes.ParallelThreadsAvailableNode,
u"cython.parallel.threadid": ExprNodes.ParallelThreadIdNode,
u"cython.parallel.prange": Nodes.ParallelRangeNode,
}
def node_is_parallel_directive(self, node):
return node.name in self.parallel_directives or node.is_cython_module
def get_directive_class_node(self, node):
"""
Figure out which parallel directive was used and return the associated
Node class.
E.g. for a cython.parallel.prange() call we return ParallelRangeNode
"""
if self.namenode_is_cython_module:
directive = '.'.join(self.parallel_directive)
else:
directive = self.parallel_directives[self.parallel_directive[0]]
directive = '%s.%s' % (directive,
'.'.join(self.parallel_directive[1:]))
directive = directive.rstrip('.')
cls = self.directive_to_node.get(directive)
if cls is None and not (self.namenode_is_cython_module and
self.parallel_directive[0] != 'parallel'):
error(node.pos, "Invalid directive: %s" % directive)
self.namenode_is_cython_module = False
self.parallel_directive = None
return cls
def visit_ModuleNode(self, node):
"""
If any parallel directives were imported, copy them over and visit
the AST
"""
if node.parallel_directives:
self.parallel_directives = node.parallel_directives
return self.visit_Node(node)
# No parallel directives were imported, so they can't be used :)
return node
def visit_NameNode(self, node):
if self.node_is_parallel_directive(node):
self.parallel_directive = [node.name]
self.namenode_is_cython_module = node.is_cython_module
return node
def visit_AttributeNode(self, node):
self.visitchildren(node)
if self.parallel_directive:
self.parallel_directive.append(node.attribute)
return node
def visit_CallNode(self, node):
self.visit(node.function)
if not self.parallel_directive:
return node
# We are a parallel directive, replace this node with the
# corresponding ParallelSomethingSomething node
if isinstance(node, ExprNodes.GeneralCallNode):
args = node.positional_args.args
kwargs = node.keyword_args
else:
args = node.args
kwargs = {}
parallel_directive_class = self.get_directive_class_node(node)
if parallel_directive_class:
# Note: in case of a parallel() the body is set by
# visit_WithStatNode
node = parallel_directive_class(node.pos, args=args, kwargs=kwargs)
return node
def visit_WithStatNode(self, node):
"Rewrite with cython.parallel.parallel() blocks"
newnode = self.visit(node.manager)
if isinstance(newnode, Nodes.ParallelWithBlockNode):
if self.state == 'parallel with':
error(node.manager.pos,
"Nested parallel with blocks are disallowed")
self.state = 'parallel with'
body = self.visit(node.body)
self.state = None
newnode.body = body
return newnode
elif self.parallel_directive:
parallel_directive_class = self.get_directive_class_node(node)
if not parallel_directive_class:
# There was an error, stop here and now
return None
if parallel_directive_class is Nodes.ParallelWithBlockNode:
error(node.pos, "The parallel directive must be called")
return None
node.body = self.visit(node.body)
return node
def visit_ForInStatNode(self, node):
"Rewrite 'for i in cython.parallel.prange(...):'"
self.visit(node.iterator)
self.visit(node.target)
in_prange = isinstance(node.iterator.sequence,
Nodes.ParallelRangeNode)
previous_state = self.state
if in_prange:
# This will replace the entire ForInStatNode, so copy the
# attributes
parallel_range_node = node.iterator.sequence
parallel_range_node.target = node.target
parallel_range_node.body = node.body
parallel_range_node.else_clause = node.else_clause
node = parallel_range_node
if not isinstance(node.target, ExprNodes.NameNode):
error(node.target.pos,
"Can only iterate over an iteration variable")
self.state = 'prange'
self.visit(node.body)
self.state = previous_state
self.visit(node.else_clause)
return node
def visit(self, node):
"Visit a node that may be None"
if node is not None:
return super(ParallelRangeTransform, self).visit(node)
class WithTransform(CythonTransform, SkipDeclarations):
def visit_WithStatNode(self, node):
self.visitchildren(node, 'body')
pos = node.pos
is_async = node.is_async
body, target, manager = node.body, node.target, node.manager
node.enter_call = ExprNodes.SimpleCallNode(
pos, function=ExprNodes.AttributeNode(
pos, obj=ExprNodes.CloneNode(manager),
attribute=EncodedString('__aenter__' if is_async else '__enter__'),
is_special_lookup=True),
args=[],
is_temp=True)
if is_async:
node.enter_call = ExprNodes.AwaitExprNode(pos, arg=node.enter_call)
if target is not None:
body = Nodes.StatListNode(
pos, stats=[
Nodes.WithTargetAssignmentStatNode(
pos, lhs=target, with_node=node),
body])
excinfo_target = ExprNodes.TupleNode(pos, slow=True, args=[
ExprNodes.ExcValueNode(pos) for _ in range(3)])
except_clause = Nodes.ExceptClauseNode(
pos, body=Nodes.IfStatNode(
pos, if_clauses=[
Nodes.IfClauseNode(
pos, condition=ExprNodes.NotNode(
pos, operand=ExprNodes.WithExitCallNode(
pos, with_stat=node,
test_if_run=False,
args=excinfo_target,
await=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
body=Nodes.ReraiseStatNode(pos),
),
],
else_clause=None),
pattern=None,
target=None,
excinfo_target=excinfo_target,
)
node.body = Nodes.TryFinallyStatNode(
pos, body=Nodes.TryExceptStatNode(
pos, body=body,
except_clauses=[except_clause],
else_clause=None,
),
finally_clause=Nodes.ExprStatNode(
pos, expr=ExprNodes.WithExitCallNode(
pos, with_stat=node,
test_if_run=True,
args=ExprNodes.TupleNode(
pos, args=[ExprNodes.NoneNode(pos) for _ in range(3)]),
await=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
handle_error_case=False,
)
return node
def visit_ExprNode(self, node):
# With statements are never inside expressions.
return node
class DecoratorTransform(ScopeTrackingTransform, SkipDeclarations):
"""
Transforms method decorators in cdef classes into nested calls or properties.
Python-style decorator properties are transformed into a PropertyNode
with up to the three getter, setter and deleter DefNodes.
The functional style isn't supported yet.
"""
_properties = None
_map_property_attribute = {
'getter': '__get__',
'setter': '__set__',
'deleter': '__del__',
}.get
def visit_CClassDefNode(self, node):
if self._properties is None:
self._properties = []
self._properties.append({})
super(DecoratorTransform, self).visit_CClassDefNode(node)
self._properties.pop()
return node
def visit_PropertyNode(self, node):
# Low-level warning for other code until we can convert all our uses over.
level = 2 if isinstance(node.pos[0], str) else 0
warning(node.pos, "'property %s:' syntax is deprecated, use '@property'" % node.name, level)
return node
def visit_DefNode(self, node):
scope_type = self.scope_type
node = self.visit_FuncDefNode(node)
if scope_type != 'cclass' or not node.decorators:
return node
# transform @property decorators
properties = self._properties[-1]
for decorator_node in node.decorators[::-1]:
decorator = decorator_node.decorator
if decorator.is_name and decorator.name == 'property':
if len(node.decorators) > 1:
return self._reject_decorated_property(node, decorator_node)
name = node.name
node.name = EncodedString('__get__')
node.decorators.remove(decorator_node)
stat_list = [node]
if name in properties:
prop = properties[name]
prop.pos = node.pos
prop.doc = node.doc
prop.body.stats = stat_list
return []
prop = Nodes.PropertyNode(node.pos, name=name)
prop.doc = node.doc
prop.body = Nodes.StatListNode(node.pos, stats=stat_list)
properties[name] = prop
return [prop]
elif decorator.is_attribute and decorator.obj.name in properties:
handler_name = self._map_property_attribute(decorator.attribute)
if handler_name:
assert decorator.obj.name == node.name
if len(node.decorators) > 1:
return self._reject_decorated_property(node, decorator_node)
return self._add_to_property(properties, node, handler_name, decorator_node)
# transform normal decorators
return self.chain_decorators(node, node.decorators, node.name)
@staticmethod
def _reject_decorated_property(node, decorator_node):
# restrict transformation to outermost decorator as wrapped properties will probably not work
for deco in node.decorators:
if deco != decorator_node:
error(deco.pos, "Property methods with additional decorators are not supported")
return node
@staticmethod
def _add_to_property(properties, node, name, decorator):
prop = properties[node.name]
node.name = name
node.decorators.remove(decorator)
stats = prop.body.stats
for i, stat in enumerate(stats):
if stat.name == name:
stats[i] = node
break
else:
stats.append(node)
return []
@staticmethod
def chain_decorators(node, decorators, name):
"""
Decorators are applied directly in DefNode and PyClassDefNode to avoid
reassignments to the function/class name - except for cdef class methods.
For those, the reassignment is required as methods are originally
defined in the PyMethodDef struct.
The IndirectionNode allows DefNode to override the decorator.
"""
decorator_result = ExprNodes.NameNode(node.pos, name=name)
for decorator in decorators[::-1]:
decorator_result = ExprNodes.SimpleCallNode(
decorator.pos,
function=decorator.decorator,
args=[decorator_result])
name_node = ExprNodes.NameNode(node.pos, name=name)
reassignment = Nodes.SingleAssignmentNode(
node.pos,
lhs=name_node,
rhs=decorator_result)
reassignment = Nodes.IndirectionNode([reassignment])
node.decorator_indirection = reassignment
return [node, reassignment]
class CnameDirectivesTransform(CythonTransform, SkipDeclarations):
"""
Only part of the CythonUtilityCode pipeline. Must be run before
DecoratorTransform in case this is a decorator for a cdef class.
It filters out @cname('my_cname') decorators and rewrites them to
CnameDecoratorNodes.
"""
def handle_function(self, node):
if not getattr(node, 'decorators', None):
return self.visit_Node(node)
for i, decorator in enumerate(node.decorators):
decorator = decorator.decorator
if (isinstance(decorator, ExprNodes.CallNode) and
decorator.function.is_name and
decorator.function.name == 'cname'):
args, kwargs = decorator.explicit_args_kwds()
if kwargs:
raise AssertionError(
"cname decorator does not take keyword arguments")
if len(args) != 1:
raise AssertionError(
"cname decorator takes exactly one argument")
if not (args[0].is_literal and
args[0].type == Builtin.str_type):
raise AssertionError(
"argument to cname decorator must be a string literal")
cname = args[0].compile_time_value(None)
del node.decorators[i]
node = Nodes.CnameDecoratorNode(pos=node.pos, node=node,
cname=cname)
break
return self.visit_Node(node)
visit_FuncDefNode = handle_function
visit_CClassDefNode = handle_function
visit_CEnumDefNode = handle_function
visit_CStructOrUnionDefNode = handle_function
class ForwardDeclareTypes(CythonTransform):
def visit_CompilerDirectivesNode(self, node):
env = self.module_scope
old = env.directives
env.directives = node.directives
self.visitchildren(node)
env.directives = old
return node
def visit_ModuleNode(self, node):
self.module_scope = node.scope
self.module_scope.directives = node.directives
self.visitchildren(node)
return node
def visit_CDefExternNode(self, node):
old_cinclude_flag = self.module_scope.in_cinclude
self.module_scope.in_cinclude = 1
self.visitchildren(node)
self.module_scope.in_cinclude = old_cinclude_flag
return node
def visit_CEnumDefNode(self, node):
node.declare(self.module_scope)
return node
def visit_CStructOrUnionDefNode(self, node):
if node.name not in self.module_scope.entries:
node.declare(self.module_scope)
return node
def visit_CClassDefNode(self, node):
if node.class_name not in self.module_scope.entries:
node.declare(self.module_scope)
return node
class AnalyseDeclarationsTransform(EnvTransform):
basic_property = TreeFragment(u"""
property NAME:
def __get__(self):
return ATTR
def __set__(self, value):
ATTR = value
""", level='c_class', pipeline=[NormalizeTree(None)])
basic_pyobject_property = TreeFragment(u"""
property NAME:
def __get__(self):
return ATTR
def __set__(self, value):
ATTR = value
def __del__(self):
ATTR = None
""", level='c_class', pipeline=[NormalizeTree(None)])
basic_property_ro = TreeFragment(u"""
property NAME:
def __get__(self):
return ATTR
""", level='c_class', pipeline=[NormalizeTree(None)])
struct_or_union_wrapper = TreeFragment(u"""
cdef class NAME:
cdef TYPE value
def __init__(self, MEMBER=None):
cdef int count
count = 0
INIT_ASSIGNMENTS
if IS_UNION and count > 1:
raise ValueError, "At most one union member should be specified."
def __str__(self):
return STR_FORMAT % MEMBER_TUPLE
def __repr__(self):
return REPR_FORMAT % MEMBER_TUPLE
""", pipeline=[NormalizeTree(None)])
init_assignment = TreeFragment(u"""
if VALUE is not None:
ATTR = VALUE
count += 1
""", pipeline=[NormalizeTree(None)])
fused_function = None
in_lambda = 0
def __call__(self, root):
# needed to determine if a cdef var is declared after it's used.
self.seen_vars_stack = []
self.fused_error_funcs = set()
super_class = super(AnalyseDeclarationsTransform, self)
self._super_visit_FuncDefNode = super_class.visit_FuncDefNode
return super_class.__call__(root)
def visit_NameNode(self, node):
self.seen_vars_stack[-1].add(node.name)
return node
def visit_ModuleNode(self, node):
self.seen_vars_stack.append(set())
node.analyse_declarations(self.current_env())
self.visitchildren(node)
self.seen_vars_stack.pop()
return node
def visit_LambdaNode(self, node):
self.in_lambda += 1
node.analyse_declarations(self.current_env())
self.visitchildren(node)
self.in_lambda -= 1
return node
def visit_CClassDefNode(self, node):
node = self.visit_ClassDefNode(node)
if node.scope and node.scope.implemented and node.body:
stats = []
for entry in node.scope.var_entries:
if entry.needs_property:
property = self.create_Property(entry)
property.analyse_declarations(node.scope)
self.visit(property)
stats.append(property)
if stats:
node.body.stats += stats
return node
def _handle_fused_def_decorators(self, old_decorators, env, node):
"""
Create function calls to the decorators and reassignments to
the function.
"""
# Delete staticmethod and classmethod decorators, this is
# handled directly by the fused function object.
decorators = []
for decorator in old_decorators:
func = decorator.decorator
if (not func.is_name or
func.name not in ('staticmethod', 'classmethod') or
env.lookup_here(func.name)):
# not a static or classmethod
decorators.append(decorator)
if decorators:
transform = DecoratorTransform(self.context)
def_node = node.node
_, reassignments = transform.chain_decorators(
def_node, decorators, def_node.name)
reassignments.analyse_declarations(env)
node = [node, reassignments]
return node
def _handle_def(self, decorators, env, node):
"Handle def or cpdef fused functions"
# Create PyCFunction nodes for each specialization
node.stats.insert(0, node.py_func)
node.py_func = self.visit(node.py_func)
node.update_fused_defnode_entry(env)
pycfunc = ExprNodes.PyCFunctionNode.from_defnode(node.py_func, binding=True)
pycfunc = ExprNodes.ProxyNode(pycfunc.coerce_to_temp(env))
node.resulting_fused_function = pycfunc
# Create assignment node for our def function
node.fused_func_assignment = self._create_assignment(
node.py_func, ExprNodes.CloneNode(pycfunc), env)
if decorators:
node = self._handle_fused_def_decorators(decorators, env, node)
return node
def _create_fused_function(self, env, node):
"Create a fused function for a DefNode with fused arguments"
from . import FusedNode
if self.fused_function or self.in_lambda:
if self.fused_function not in self.fused_error_funcs:
if self.in_lambda:
error(node.pos, "Fused lambdas not allowed")
else:
error(node.pos, "Cannot nest fused functions")
self.fused_error_funcs.add(self.fused_function)
node.body = Nodes.PassStatNode(node.pos)
for arg in node.args:
if arg.type.is_fused:
arg.type = arg.type.get_fused_types()[0]
return node
decorators = getattr(node, 'decorators', None)
node = FusedNode.FusedCFuncDefNode(node, env)
self.fused_function = node
self.visitchildren(node)
self.fused_function = None
if node.py_func:
node = self._handle_def(decorators, env, node)
return node
def _handle_nogil_cleanup(self, lenv, node):
"Handle cleanup for 'with gil' blocks in nogil functions."
if lenv.nogil and lenv.has_with_gil_block:
# Acquire the GIL for cleanup in 'nogil' functions, by wrapping
# the entire function body in try/finally.
# The corresponding release will be taken care of by
# Nodes.FuncDefNode.generate_function_definitions()
node.body = Nodes.NogilTryFinallyStatNode(
node.body.pos,
body=node.body,
finally_clause=Nodes.EnsureGILNode(node.body.pos),
finally_except_clause=Nodes.EnsureGILNode(node.body.pos))
def _handle_fused(self, node):
if node.is_generator and node.has_fused_arguments:
node.has_fused_arguments = False
error(node.pos, "Fused generators not supported")
node.gbody = Nodes.StatListNode(node.pos,
stats=[],
body=Nodes.PassStatNode(node.pos))
return node.has_fused_arguments
def visit_FuncDefNode(self, node):
"""
Analyse a function and its body, as that hasn't happend yet. Also
analyse the directive_locals set by @cython.locals().
Then, if we are a function with fused arguments, replace the function
(after it has declared itself in the symbol table!) with a
FusedCFuncDefNode, and analyse its children (which are in turn normal
functions). If we're a normal function, just analyse the body of the
function.
"""
env = self.current_env()
self.seen_vars_stack.append(set())
lenv = node.local_scope
node.declare_arguments(lenv)
# @cython.locals(...)
for var, type_node in node.directive_locals.items():
if not lenv.lookup_here(var): # don't redeclare args
type = type_node.analyse_as_type(lenv)
if type:
lenv.declare_var(var, type, type_node.pos)
else:
error(type_node.pos, "Not a type")
if self._handle_fused(node):
node = self._create_fused_function(env, node)
else:
node.body.analyse_declarations(lenv)
self._handle_nogil_cleanup(lenv, node)
self._super_visit_FuncDefNode(node)
self.seen_vars_stack.pop()
return node
def visit_DefNode(self, node):
node = self.visit_FuncDefNode(node)
env = self.current_env()
if isinstance(node, Nodes.DefNode) and node.is_wrapper:
env = env.parent_scope
if (not isinstance(node, Nodes.DefNode) or
node.fused_py_func or node.is_generator_body or
not node.needs_assignment_synthesis(env)):
return node
return [node, self._synthesize_assignment(node, env)]
def visit_GeneratorBodyDefNode(self, node):
return self.visit_FuncDefNode(node)
def _synthesize_assignment(self, node, env):
# Synthesize assignment node and put it right after defnode
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
if genv.is_closure_scope:
rhs = node.py_cfunc_node = ExprNodes.InnerFunctionNode(
node.pos, def_node=node,
pymethdef_cname=node.entry.pymethdef_cname,
code_object=ExprNodes.CodeObjectNode(node))
else:
binding = self.current_directives.get('binding')
rhs = ExprNodes.PyCFunctionNode.from_defnode(node, binding)
node.code_object = rhs.code_object
if env.is_py_class_scope:
rhs.binding = True
node.is_cyfunction = rhs.binding
return self._create_assignment(node, rhs, env)
def _create_assignment(self, def_node, rhs, env):
if def_node.decorators:
for decorator in def_node.decorators[::-1]:
rhs = ExprNodes.SimpleCallNode(
decorator.pos,
function = decorator.decorator,
args = [rhs])
def_node.decorators = None
assmt = Nodes.SingleAssignmentNode(
def_node.pos,
lhs=ExprNodes.NameNode(def_node.pos, name=def_node.name),
rhs=rhs)
assmt.analyse_declarations(env)
return assmt
def visit_ScopedExprNode(self, node):
env = self.current_env()
node.analyse_declarations(env)
# the node may or may not have a local scope
if node.has_local_scope:
self.seen_vars_stack.append(set(self.seen_vars_stack[-1]))
self.enter_scope(node, node.expr_scope)
node.analyse_scoped_declarations(node.expr_scope)
self.visitchildren(node)
self.exit_scope()
self.seen_vars_stack.pop()
else:
node.analyse_scoped_declarations(env)
self.visitchildren(node)
return node
def visit_TempResultFromStatNode(self, node):
self.visitchildren(node)
node.analyse_declarations(self.current_env())
return node
def visit_CppClassNode(self, node):
if node.visibility == 'extern':
return None
else:
return self.visit_ClassDefNode(node)
def visit_CStructOrUnionDefNode(self, node):
# Create a wrapper node if needed.
# We want to use the struct type information (so it can't happen
# before this phase) but also create new objects to be declared
# (so it can't happen later).
# Note that we don't return the original node, as it is
# never used after this phase.
if True: # private (default)
return None
self_value = ExprNodes.AttributeNode(
pos = node.pos,
obj = ExprNodes.NameNode(pos=node.pos, name=u"self"),
attribute = EncodedString(u"value"))
var_entries = node.entry.type.scope.var_entries
attributes = []
for entry in var_entries:
attributes.append(ExprNodes.AttributeNode(pos = entry.pos,
obj = self_value,
attribute = entry.name))
# __init__ assignments
init_assignments = []
for entry, attr in zip(var_entries, attributes):
# TODO: branch on visibility
init_assignments.append(self.init_assignment.substitute({
u"VALUE": ExprNodes.NameNode(entry.pos, name = entry.name),
u"ATTR": attr,
}, pos = entry.pos))
# create the class
str_format = u"%s(%s)" % (node.entry.type.name, ("%s, " * len(attributes))[:-2])
wrapper_class = self.struct_or_union_wrapper.substitute({
u"INIT_ASSIGNMENTS": Nodes.StatListNode(node.pos, stats = init_assignments),
u"IS_UNION": ExprNodes.BoolNode(node.pos, value = not node.entry.type.is_struct),
u"MEMBER_TUPLE": ExprNodes.TupleNode(node.pos, args=attributes),
u"STR_FORMAT": ExprNodes.StringNode(node.pos, value = EncodedString(str_format)),
u"REPR_FORMAT": ExprNodes.StringNode(node.pos, value = EncodedString(str_format.replace("%s", "%r"))),
}, pos = node.pos).stats[0]
wrapper_class.class_name = node.name
wrapper_class.shadow = True
class_body = wrapper_class.body.stats
# fix value type
assert isinstance(class_body[0].base_type, Nodes.CSimpleBaseTypeNode)
class_body[0].base_type.name = node.name
# fix __init__ arguments
init_method = class_body[1]
assert isinstance(init_method, Nodes.DefNode) and init_method.name == '__init__'
arg_template = init_method.args[1]
if not node.entry.type.is_struct:
arg_template.kw_only = True
del init_method.args[1]
for entry, attr in zip(var_entries, attributes):
arg = copy.deepcopy(arg_template)
arg.declarator.name = entry.name
init_method.args.append(arg)
# setters/getters
for entry, attr in zip(var_entries, attributes):
# TODO: branch on visibility
if entry.type.is_pyobject:
template = self.basic_pyobject_property
else:
template = self.basic_property
property = template.substitute({
u"ATTR": attr,
}, pos = entry.pos).stats[0]
property.name = entry.name
wrapper_class.body.stats.append(property)
wrapper_class.analyse_declarations(self.current_env())
return self.visit_CClassDefNode(wrapper_class)
# Some nodes are no longer needed after declaration
# analysis and can be dropped. The analysis was performed
# on these nodes in a seperate recursive process from the
# enclosing function or module, so we can simply drop them.
def visit_CDeclaratorNode(self, node):
# necessary to ensure that all CNameDeclaratorNodes are visited.
self.visitchildren(node)
return node
def visit_CTypeDefNode(self, node):
return node
def visit_CBaseTypeNode(self, node):
return None
def visit_CEnumDefNode(self, node):
if node.visibility == 'public':
return node
else:
return None
def visit_CNameDeclaratorNode(self, node):
if node.name in self.seen_vars_stack[-1]:
entry = self.current_env().lookup(node.name)
if (entry is None or entry.visibility != 'extern'
and not entry.scope.is_c_class_scope):
warning(node.pos, "cdef variable '%s' declared after it is used" % node.name, 2)
self.visitchildren(node)
return node
def visit_CVarDefNode(self, node):
# to ensure all CNameDeclaratorNodes are visited.
self.visitchildren(node)
return None
def visit_CnameDecoratorNode(self, node):
child_node = self.visit(node.node)
if not child_node:
return None
if type(child_node) is list: # Assignment synthesized
node.child_node = child_node[0]
return [node] + child_node[1:]
node.node = child_node
return node
def create_Property(self, entry):
if entry.visibility == 'public':
if entry.type.is_pyobject:
template = self.basic_pyobject_property
else:
template = self.basic_property
elif entry.visibility == 'readonly':
template = self.basic_property_ro
property = template.substitute({
u"ATTR": ExprNodes.AttributeNode(pos=entry.pos,
obj=ExprNodes.NameNode(pos=entry.pos, name="self"),
attribute=entry.name),
}, pos=entry.pos).stats[0]
property.name = entry.name
property.doc = entry.doc
return property
class CalculateQualifiedNamesTransform(EnvTransform):
"""
Calculate and store the '__qualname__' and the global
module name on some nodes.
"""
def visit_ModuleNode(self, node):
self.module_name = self.global_scope().qualified_name
self.qualified_name = []
_super = super(CalculateQualifiedNamesTransform, self)
self._super_visit_FuncDefNode = _super.visit_FuncDefNode
self._super_visit_ClassDefNode = _super.visit_ClassDefNode
self.visitchildren(node)
return node
def _set_qualname(self, node, name=None):
if name:
qualname = self.qualified_name[:]
qualname.append(name)
else:
qualname = self.qualified_name
node.qualname = EncodedString('.'.join(qualname))
node.module_name = self.module_name
def _append_entry(self, entry):
if entry.is_pyglobal and not entry.is_pyclass_attr:
self.qualified_name = [entry.name]
else:
self.qualified_name.append(entry.name)
def visit_ClassNode(self, node):
self._set_qualname(node, node.name)
self.visitchildren(node)
return node
def visit_PyClassNamespaceNode(self, node):
# class name was already added by parent node
self._set_qualname(node)
self.visitchildren(node)
return node
def visit_PyCFunctionNode(self, node):
orig_qualified_name = self.qualified_name[:]
if node.def_node.is_wrapper and self.qualified_name and self.qualified_name[-1] == '<locals>':
self.qualified_name.pop()
self._set_qualname(node)
else:
self._set_qualname(node, node.def_node.name)
self.visitchildren(node)
self.qualified_name = orig_qualified_name
return node
def visit_DefNode(self, node):
if node.is_wrapper and self.qualified_name:
assert self.qualified_name[-1] == '<locals>', self.qualified_name
orig_qualified_name = self.qualified_name[:]
self.qualified_name.pop()
self._set_qualname(node)
self._super_visit_FuncDefNode(node)
self.qualified_name = orig_qualified_name
else:
self._set_qualname(node, node.name)
self.visit_FuncDefNode(node)
return node
def visit_FuncDefNode(self, node):
orig_qualified_name = self.qualified_name[:]
if getattr(node, 'name', None) == '<lambda>':
self.qualified_name.append('<lambda>')
else:
self._append_entry(node.entry)
self.qualified_name.append('<locals>')
self._super_visit_FuncDefNode(node)
self.qualified_name = orig_qualified_name
return node
def visit_ClassDefNode(self, node):
orig_qualified_name = self.qualified_name[:]
entry = (getattr(node, 'entry', None) or # PyClass
self.current_env().lookup_here(node.name)) # CClass
self._append_entry(entry)
self._super_visit_ClassDefNode(node)
self.qualified_name = orig_qualified_name
return node
class AnalyseExpressionsTransform(CythonTransform):
def visit_ModuleNode(self, node):
node.scope.infer_types()
node.body = node.body.analyse_expressions(node.scope)
self.visitchildren(node)
return node
def visit_FuncDefNode(self, node):
node.local_scope.infer_types()
node.body = node.body.analyse_expressions(node.local_scope)
self.visitchildren(node)
return node
def visit_ScopedExprNode(self, node):
if node.has_local_scope:
node.expr_scope.infer_types()
node = node.analyse_scoped_expressions(node.expr_scope)
self.visitchildren(node)
return node
def visit_IndexNode(self, node):
"""
Replace index nodes used to specialize cdef functions with fused
argument types with the Attribute- or NameNode referring to the
function. We then need to copy over the specialization properties to
the attribute or name node.
Because the indexing might be a Python indexing operation on a fused
function, or (usually) a Cython indexing operation, we need to
re-analyse the types.
"""
self.visit_Node(node)
if node.is_fused_index and not node.type.is_error:
node = node.base
return node
class FindInvalidUseOfFusedTypes(CythonTransform):
def visit_FuncDefNode(self, node):
# Errors related to use in functions with fused args will already
# have been detected
if not node.has_fused_arguments:
if not node.is_generator_body and node.return_type.is_fused:
error(node.pos, "Return type is not specified as argument type")
else:
self.visitchildren(node)
return node
def visit_ExprNode(self, node):
if node.type and node.type.is_fused:
error(node.pos, "Invalid use of fused types, type cannot be specialized")
else:
self.visitchildren(node)
return node
class ExpandInplaceOperators(EnvTransform):
def visit_InPlaceAssignmentNode(self, node):
lhs = node.lhs
rhs = node.rhs
if lhs.type.is_cpp_class:
# No getting around this exact operator here.
return node
if isinstance(lhs, ExprNodes.BufferIndexNode):
# There is code to handle this case in InPlaceAssignmentNode
return node
env = self.current_env()
def side_effect_free_reference(node, setting=False):
if node.is_name:
return node, []
elif node.type.is_pyobject and not setting:
node = LetRefNode(node)
return node, [node]
elif node.is_subscript:
base, temps = side_effect_free_reference(node.base)
index = LetRefNode(node.index)
return ExprNodes.IndexNode(node.pos, base=base, index=index), temps + [index]
elif node.is_attribute:
obj, temps = side_effect_free_reference(node.obj)
return ExprNodes.AttributeNode(node.pos, obj=obj, attribute=node.attribute), temps
elif isinstance(node, ExprNodes.BufferIndexNode):
raise ValueError("Don't allow things like attributes of buffer indexing operations")
else:
node = LetRefNode(node)
return node, [node]
try:
lhs, let_ref_nodes = side_effect_free_reference(lhs, setting=True)
except ValueError:
return node
dup = lhs.__class__(**lhs.__dict__)
binop = ExprNodes.binop_node(node.pos,
operator = node.operator,
operand1 = dup,
operand2 = rhs,
inplace=True)
# Manually analyse types for new node.
lhs.analyse_target_types(env)
dup.analyse_types(env)
binop.analyse_operation(env)
node = Nodes.SingleAssignmentNode(
node.pos,
lhs = lhs,
rhs=binop.coerce_to(lhs.type, env))
# Use LetRefNode to avoid side effects.
let_ref_nodes.reverse()
for t in let_ref_nodes:
node = LetNode(t, node)
return node
def visit_ExprNode(self, node):
# In-place assignments can't happen within an expression.
return node
class AdjustDefByDirectives(CythonTransform, SkipDeclarations):
"""
Adjust function and class definitions by the decorator directives:
@cython.cfunc
@cython.cclass
@cython.ccall
@cython.inline
"""
def visit_ModuleNode(self, node):
self.directives = node.directives
self.in_py_class = False
self.visitchildren(node)
return node
def visit_CompilerDirectivesNode(self, node):
old_directives = self.directives
self.directives = node.directives
self.visitchildren(node)
self.directives = old_directives
return node
def visit_DefNode(self, node):
modifiers = []
if 'inline' in self.directives:
modifiers.append('inline')
if 'ccall' in self.directives:
node = node.as_cfunction(
overridable=True, returns=self.directives.get('returns'), modifiers=modifiers)
return self.visit(node)
if 'cfunc' in self.directives:
if self.in_py_class:
error(node.pos, "cfunc directive is not allowed here")
else:
node = node.as_cfunction(
overridable=False, returns=self.directives.get('returns'), modifiers=modifiers)
return self.visit(node)
if 'inline' in modifiers:
error(node.pos, "Python functions cannot be declared 'inline'")
self.visitchildren(node)
return node
def visit_PyClassDefNode(self, node):
if 'cclass' in self.directives:
node = node.as_cclass()
return self.visit(node)
else:
old_in_pyclass = self.in_py_class
self.in_py_class = True
self.visitchildren(node)
self.in_py_class = old_in_pyclass
return node
def visit_CClassDefNode(self, node):
old_in_pyclass = self.in_py_class
self.in_py_class = False
self.visitchildren(node)
self.in_py_class = old_in_pyclass
return node
class AlignFunctionDefinitions(CythonTransform):
"""
This class takes the signatures from a .pxd file and applies them to
the def methods in a .py file.
"""
def visit_ModuleNode(self, node):
self.scope = node.scope
self.directives = node.directives
self.imported_names = set() # hack, see visit_FromImportStatNode()
self.visitchildren(node)
return node
def visit_PyClassDefNode(self, node):
pxd_def = self.scope.lookup(node.name)
if pxd_def:
if pxd_def.is_cclass:
return self.visit_CClassDefNode(node.as_cclass(), pxd_def)
elif not pxd_def.scope or not pxd_def.scope.is_builtin_scope:
error(node.pos, "'%s' redeclared" % node.name)
if pxd_def.pos:
error(pxd_def.pos, "previous declaration here")
return None
return node
def visit_CClassDefNode(self, node, pxd_def=None):
if pxd_def is None:
pxd_def = self.scope.lookup(node.class_name)
if pxd_def:
if not pxd_def.defined_in_pxd:
return node
outer_scope = self.scope
self.scope = pxd_def.type.scope
self.visitchildren(node)
if pxd_def:
self.scope = outer_scope
return node
def visit_DefNode(self, node):
pxd_def = self.scope.lookup(node.name)
if pxd_def and (not pxd_def.scope or not pxd_def.scope.is_builtin_scope):
if not pxd_def.is_cfunction:
error(node.pos, "'%s' redeclared" % node.name)
if pxd_def.pos:
error(pxd_def.pos, "previous declaration here")
return None
node = node.as_cfunction(pxd_def)
elif (self.scope.is_module_scope and self.directives['auto_cpdef']
and not node.name in self.imported_names
and node.is_cdef_func_compatible()):
# FIXME: cpdef-ing should be done in analyse_declarations()
node = node.as_cfunction(scope=self.scope)
# Enable this when nested cdef functions are allowed.
# self.visitchildren(node)
return node
def visit_FromImportStatNode(self, node):
# hack to prevent conditional import fallback functions from
# being cdpef-ed (global Python variables currently conflict
# with imports)
if self.scope.is_module_scope:
for name, _ in node.items:
self.imported_names.add(name)
return node
def visit_ExprNode(self, node):
# ignore lambdas and everything else that appears in expressions
return node
class RemoveUnreachableCode(CythonTransform):
def visit_StatListNode(self, node):
if not self.current_directives['remove_unreachable']:
return node
self.visitchildren(node)
for idx, stat in enumerate(node.stats):
idx += 1
if stat.is_terminator:
if idx < len(node.stats):
if self.current_directives['warn.unreachable']:
warning(node.stats[idx].pos, "Unreachable code", 2)
node.stats = node.stats[:idx]
node.is_terminator = True
break
return node
def visit_IfClauseNode(self, node):
self.visitchildren(node)
if node.body.is_terminator:
node.is_terminator = True
return node
def visit_IfStatNode(self, node):
self.visitchildren(node)
if node.else_clause and node.else_clause.is_terminator:
for clause in node.if_clauses:
if not clause.is_terminator:
break
else:
node.is_terminator = True
return node
def visit_TryExceptStatNode(self, node):
self.visitchildren(node)
if node.body.is_terminator and node.else_clause:
if self.current_directives['warn.unreachable']:
warning(node.else_clause.pos, "Unreachable code", 2)
node.else_clause = None
return node
class YieldNodeCollector(TreeVisitor):
def __init__(self):
super(YieldNodeCollector, self).__init__()
self.yields = []
self.awaits = []
self.returns = []
self.has_return_value = False
def visit_Node(self, node):
self.visitchildren(node)
def visit_YieldExprNode(self, node):
self.yields.append(node)
self.visitchildren(node)
def visit_AwaitExprNode(self, node):
self.awaits.append(node)
self.visitchildren(node)
def visit_ReturnStatNode(self, node):
self.visitchildren(node)
if node.value:
self.has_return_value = True
self.returns.append(node)
def visit_ClassDefNode(self, node):
pass
def visit_FuncDefNode(self, node):
pass
def visit_LambdaNode(self, node):
pass
def visit_GeneratorExpressionNode(self, node):
pass
def visit_CArgDeclNode(self, node):
# do not look into annotations
# FIXME: support (yield) in default arguments (currently crashes)
pass
class MarkClosureVisitor(CythonTransform):
def visit_ModuleNode(self, node):
self.needs_closure = False
self.visitchildren(node)
return node
def visit_FuncDefNode(self, node):
self.needs_closure = False
self.visitchildren(node)
node.needs_closure = self.needs_closure
self.needs_closure = True
collector = YieldNodeCollector()
collector.visitchildren(node)
if node.is_async_def:
if collector.yields:
error(collector.yields[0].pos, "'yield' not allowed in async coroutines (use 'await')")
yields = collector.awaits
elif collector.yields:
if collector.awaits:
error(collector.yields[0].pos, "'await' not allowed in generators (use 'yield')")
yields = collector.yields
else:
return node
for i, yield_expr in enumerate(yields, 1):
yield_expr.label_num = i
for retnode in collector.returns:
retnode.in_generator = True
gbody = Nodes.GeneratorBodyDefNode(
pos=node.pos, name=node.name, body=node.body)
coroutine = (Nodes.AsyncDefNode if node.is_async_def else Nodes.GeneratorDefNode)(
pos=node.pos, name=node.name, args=node.args,
star_arg=node.star_arg, starstar_arg=node.starstar_arg,
doc=node.doc, decorators=node.decorators,
gbody=gbody, lambda_name=node.lambda_name)
return coroutine
def visit_CFuncDefNode(self, node):
self.needs_closure = False
self.visitchildren(node)
node.needs_closure = self.needs_closure
self.needs_closure = True
if node.needs_closure and node.overridable:
error(node.pos, "closures inside cpdef functions not yet supported")
return node
def visit_LambdaNode(self, node):
self.needs_closure = False
self.visitchildren(node)
node.needs_closure = self.needs_closure
self.needs_closure = True
return node
def visit_ClassDefNode(self, node):
self.visitchildren(node)
self.needs_closure = True
return node
class CreateClosureClasses(CythonTransform):
# Output closure classes in module scope for all functions
# that really need it.
def __init__(self, context):
super(CreateClosureClasses, self).__init__(context)
self.path = []
self.in_lambda = False
def visit_ModuleNode(self, node):
self.module_scope = node.scope
self.visitchildren(node)
return node
def find_entries_used_in_closures(self, node):
from_closure = []
in_closure = []
for name, entry in node.local_scope.entries.items():
if entry.from_closure:
from_closure.append((name, entry))
elif entry.in_closure:
in_closure.append((name, entry))
return from_closure, in_closure
def create_class_from_scope(self, node, target_module_scope, inner_node=None):
# move local variables into closure
if node.is_generator:
for entry in node.local_scope.entries.values():
if not entry.from_closure:
entry.in_closure = True
from_closure, in_closure = self.find_entries_used_in_closures(node)
in_closure.sort()
# Now from the begining
node.needs_closure = False
node.needs_outer_scope = False
func_scope = node.local_scope
cscope = node.entry.scope
while cscope.is_py_class_scope or cscope.is_c_class_scope:
cscope = cscope.outer_scope
if not from_closure and (self.path or inner_node):
if not inner_node:
if not node.py_cfunc_node:
raise InternalError("DefNode does not have assignment node")
inner_node = node.py_cfunc_node
inner_node.needs_self_code = False
node.needs_outer_scope = False
if node.is_generator:
pass
elif not in_closure and not from_closure:
return
elif not in_closure:
func_scope.is_passthrough = True
func_scope.scope_class = cscope.scope_class
node.needs_outer_scope = True
return
as_name = '%s_%s' % (
target_module_scope.next_id(Naming.closure_class_prefix),
node.entry.cname)
entry = target_module_scope.declare_c_class(
name=as_name, pos=node.pos, defining=True,
implementing=True)
entry.type.is_final_type = True
func_scope.scope_class = entry
class_scope = entry.type.scope
class_scope.is_internal = True
if Options.closure_freelist_size:
class_scope.directives['freelist'] = Options.closure_freelist_size
if from_closure:
assert cscope.is_closure_scope
class_scope.declare_var(pos=node.pos,
name=Naming.outer_scope_cname,
cname=Naming.outer_scope_cname,
type=cscope.scope_class.type,
is_cdef=True)
node.needs_outer_scope = True
for name, entry in in_closure:
closure_entry = class_scope.declare_var(pos=entry.pos,
name=entry.name,
cname=entry.cname,
type=entry.type,
is_cdef=True)
if entry.is_declared_generic:
closure_entry.is_declared_generic = 1
node.needs_closure = True
# Do it here because other classes are already checked
target_module_scope.check_c_class(func_scope.scope_class)
def visit_LambdaNode(self, node):
if not isinstance(node.def_node, Nodes.DefNode):
# fused function, an error has been previously issued
return node
was_in_lambda = self.in_lambda
self.in_lambda = True
self.create_class_from_scope(node.def_node, self.module_scope, node)
self.visitchildren(node)
self.in_lambda = was_in_lambda
return node
def visit_FuncDefNode(self, node):
if self.in_lambda:
self.visitchildren(node)
return node
if node.needs_closure or self.path:
self.create_class_from_scope(node, self.module_scope)
self.path.append(node)
self.visitchildren(node)
self.path.pop()
return node
def visit_GeneratorBodyDefNode(self, node):
self.visitchildren(node)
return node
def visit_CFuncDefNode(self, node):
if not node.overridable:
return self.visit_FuncDefNode(node)
else:
self.visitchildren(node)
return node
class GilCheck(VisitorTransform):
"""
Call `node.gil_check(env)` on each node to make sure we hold the
GIL when we need it. Raise an error when on Python operations
inside a `nogil` environment.
Additionally, raise exceptions for closely nested with gil or with nogil
statements. The latter would abort Python.
"""
def __call__(self, root):
self.env_stack = [root.scope]
self.nogil = False
# True for 'cdef func() nogil:' functions, as the GIL may be held while
# calling this function (thus contained 'nogil' blocks may be valid).
self.nogil_declarator_only = False
return super(GilCheck, self).__call__(root)
def visit_FuncDefNode(self, node):
self.env_stack.append(node.local_scope)
was_nogil = self.nogil
self.nogil = node.local_scope.nogil
if self.nogil:
self.nogil_declarator_only = True
if self.nogil and node.nogil_check:
node.nogil_check(node.local_scope)
self.visitchildren(node)
# This cannot be nested, so it doesn't need backup/restore
self.nogil_declarator_only = False
self.env_stack.pop()
self.nogil = was_nogil
return node
def visit_GILStatNode(self, node):
if self.nogil and node.nogil_check:
node.nogil_check()
was_nogil = self.nogil
self.nogil = (node.state == 'nogil')
if was_nogil == self.nogil and not self.nogil_declarator_only:
if not was_nogil:
error(node.pos, "Trying to acquire the GIL while it is "
"already held.")
else:
error(node.pos, "Trying to release the GIL while it was "
"previously released.")
if isinstance(node.finally_clause, Nodes.StatListNode):
# The finally clause of the GILStatNode is a GILExitNode,
# which is wrapped in a StatListNode. Just unpack that.
node.finally_clause, = node.finally_clause.stats
self.visitchildren(node)
self.nogil = was_nogil
return node
def visit_ParallelRangeNode(self, node):
if node.nogil:
node.nogil = False
node = Nodes.GILStatNode(node.pos, state='nogil', body=node)
return self.visit_GILStatNode(node)
if not self.nogil:
error(node.pos, "prange() can only be used without the GIL")
# Forget about any GIL-related errors that may occur in the body
return None
node.nogil_check(self.env_stack[-1])
self.visitchildren(node)
return node
def visit_ParallelWithBlockNode(self, node):
if not self.nogil:
error(node.pos, "The parallel section may only be used without "
"the GIL")
return None
if node.nogil_check:
# It does not currently implement this, but test for it anyway to
# avoid potential future surprises
node.nogil_check(self.env_stack[-1])
self.visitchildren(node)
return node
def visit_TryFinallyStatNode(self, node):
"""
Take care of try/finally statements in nogil code sections.
"""
if not self.nogil or isinstance(node, Nodes.GILStatNode):
return self.visit_Node(node)
node.nogil_check = None
node.is_try_finally_in_nogil = True
self.visitchildren(node)
return node
def visit_Node(self, node):
if self.env_stack and self.nogil and node.nogil_check:
node.nogil_check(self.env_stack[-1])
self.visitchildren(node)
node.in_nogil_context = self.nogil
return node
class TransformBuiltinMethods(EnvTransform):
"""
Replace Cython's own cython.* builtins by the corresponding tree nodes.
"""
def visit_SingleAssignmentNode(self, node):
if node.declaration_only:
return None
else:
self.visitchildren(node)
return node
def visit_AttributeNode(self, node):
self.visitchildren(node)
return self.visit_cython_attribute(node)
def visit_NameNode(self, node):
return self.visit_cython_attribute(node)
def visit_cython_attribute(self, node):
attribute = node.as_cython_attribute()
if attribute:
if attribute == u'compiled':
node = ExprNodes.BoolNode(node.pos, value=True)
elif attribute == u'__version__':
from .. import __version__ as version
node = ExprNodes.StringNode(node.pos, value=EncodedString(version))
elif attribute == u'NULL':
node = ExprNodes.NullNode(node.pos)
elif attribute in (u'set', u'frozenset', u'staticmethod'):
node = ExprNodes.NameNode(node.pos, name=EncodedString(attribute),
entry=self.current_env().builtin_scope().lookup_here(attribute))
elif PyrexTypes.parse_basic_type(attribute):
pass
elif self.context.cython_scope.lookup_qualified_name(attribute):
pass
else:
error(node.pos, u"'%s' not a valid cython attribute or is being used incorrectly" % attribute)
return node
def visit_ExecStatNode(self, node):
lenv = self.current_env()
self.visitchildren(node)
if len(node.args) == 1:
node.args.append(ExprNodes.GlobalsExprNode(node.pos))
if not lenv.is_module_scope:
node.args.append(
ExprNodes.LocalsExprNode(
node.pos, self.current_scope_node(), lenv))
return node
def _inject_locals(self, node, func_name):
# locals()/dir()/vars() builtins
lenv = self.current_env()
entry = lenv.lookup_here(func_name)
if entry:
# not the builtin
return node
pos = node.pos
if func_name in ('locals', 'vars'):
if func_name == 'locals' and len(node.args) > 0:
error(self.pos, "Builtin 'locals()' called with wrong number of args, expected 0, got %d"
% len(node.args))
return node
elif func_name == 'vars':
if len(node.args) > 1:
error(self.pos, "Builtin 'vars()' called with wrong number of args, expected 0-1, got %d"
% len(node.args))
if len(node.args) > 0:
return node # nothing to do
return ExprNodes.LocalsExprNode(pos, self.current_scope_node(), lenv)
else: # dir()
if len(node.args) > 1:
error(self.pos, "Builtin 'dir()' called with wrong number of args, expected 0-1, got %d"
% len(node.args))
if len(node.args) > 0:
# optimised in Builtin.py
return node
if lenv.is_py_class_scope or lenv.is_module_scope:
if lenv.is_py_class_scope:
pyclass = self.current_scope_node()
locals_dict = ExprNodes.CloneNode(pyclass.dict)
else:
locals_dict = ExprNodes.GlobalsExprNode(pos)
return ExprNodes.SortedDictKeysNode(locals_dict)
local_names = sorted(var.name for var in lenv.entries.values() if var.name)
items = [ExprNodes.IdentifierStringNode(pos, value=var)
for var in local_names]
return ExprNodes.ListNode(pos, args=items)
def visit_PrimaryCmpNode(self, node):
# special case: for in/not-in test, we do not need to sort locals()
self.visitchildren(node)
if node.operator in 'not_in': # in/not_in
if isinstance(node.operand2, ExprNodes.SortedDictKeysNode):
arg = node.operand2.arg
if isinstance(arg, ExprNodes.NoneCheckNode):
arg = arg.arg
node.operand2 = arg
return node
def visit_CascadedCmpNode(self, node):
return self.visit_PrimaryCmpNode(node)
def _inject_eval(self, node, func_name):
lenv = self.current_env()
entry = lenv.lookup_here(func_name)
if entry or len(node.args) != 1:
return node
# Inject globals and locals
node.args.append(ExprNodes.GlobalsExprNode(node.pos))
if not lenv.is_module_scope:
node.args.append(
ExprNodes.LocalsExprNode(
node.pos, self.current_scope_node(), lenv))
return node
def _inject_super(self, node, func_name):
lenv = self.current_env()
entry = lenv.lookup_here(func_name)
if entry or node.args:
return node
# Inject no-args super
def_node = self.current_scope_node()
if (not isinstance(def_node, Nodes.DefNode) or not def_node.args or
len(self.env_stack) < 2):
return node
class_node, class_scope = self.env_stack[-2]
if class_scope.is_py_class_scope:
def_node.requires_classobj = True
class_node.class_cell.is_active = True
node.args = [
ExprNodes.ClassCellNode(
node.pos, is_generator=def_node.is_generator),
ExprNodes.NameNode(node.pos, name=def_node.args[0].name)
]
elif class_scope.is_c_class_scope:
node.args = [
ExprNodes.NameNode(
node.pos, name=class_node.scope.name,
entry=class_node.entry),
ExprNodes.NameNode(node.pos, name=def_node.args[0].name)
]
return node
def visit_SimpleCallNode(self, node):
# cython.foo
function = node.function.as_cython_attribute()
if function:
if function in InterpretCompilerDirectives.unop_method_nodes:
if len(node.args) != 1:
error(node.function.pos, u"%s() takes exactly one argument" % function)
else:
node = InterpretCompilerDirectives.unop_method_nodes[function](
node.function.pos, operand=node.args[0])
elif function in InterpretCompilerDirectives.binop_method_nodes:
if len(node.args) != 2:
error(node.function.pos, u"%s() takes exactly two arguments" % function)
else:
node = InterpretCompilerDirectives.binop_method_nodes[function](
node.function.pos, operand1=node.args[0], operand2=node.args[1])
elif function == u'cast':
if len(node.args) != 2:
error(node.function.pos,
u"cast() takes exactly two arguments and an optional typecheck keyword")
else:
type = node.args[0].analyse_as_type(self.current_env())
if type:
node = ExprNodes.TypecastNode(
node.function.pos, type=type, operand=node.args[1], typecheck=False)
else:
error(node.args[0].pos, "Not a type")
elif function == u'sizeof':
if len(node.args) != 1:
error(node.function.pos, u"sizeof() takes exactly one argument")
else:
type = node.args[0].analyse_as_type(self.current_env())
if type:
node = ExprNodes.SizeofTypeNode(node.function.pos, arg_type=type)
else:
node = ExprNodes.SizeofVarNode(node.function.pos, operand=node.args[0])
elif function == 'cmod':
if len(node.args) != 2:
error(node.function.pos, u"cmod() takes exactly two arguments")
else:
node = ExprNodes.binop_node(node.function.pos, '%', node.args[0], node.args[1])
node.cdivision = True
elif function == 'cdiv':
if len(node.args) != 2:
error(node.function.pos, u"cdiv() takes exactly two arguments")
else:
node = ExprNodes.binop_node(node.function.pos, '/', node.args[0], node.args[1])
node.cdivision = True
elif function == u'set':
node.function = ExprNodes.NameNode(node.pos, name=EncodedString('set'))
elif function == u'staticmethod':
node.function = ExprNodes.NameNode(node.pos, name=EncodedString('staticmethod'))
elif self.context.cython_scope.lookup_qualified_name(function):
pass
else:
error(node.function.pos,
u"'%s' not a valid cython language construct" % function)
self.visitchildren(node)
if isinstance(node, ExprNodes.SimpleCallNode) and node.function.is_name:
func_name = node.function.name
if func_name in ('dir', 'locals', 'vars'):
return self._inject_locals(node, func_name)
if func_name == 'eval':
return self._inject_eval(node, func_name)
if func_name == 'super':
return self._inject_super(node, func_name)
return node
def visit_GeneralCallNode(self, node):
function = node.function.as_cython_attribute()
if function:
args = node.positional_args.args
kwargs = node.keyword_args.compile_time_value(None)
if function == u'cast':
if (len(args) != 2 or len(kwargs) > 1 or
(len(kwargs) == 1 and 'typecheck' not in kwargs)):
error(node.function.pos,
u"cast() takes exactly two arguments and an optional typecheck keyword")
else:
type = args[0].analyse_as_type(self.current_env())
if type:
typecheck = kwargs.get('typecheck', False)
node = ExprNodes.TypecastNode(
node.function.pos, type=type, operand=args[1], typecheck=typecheck)
else:
error(args[0].pos, "Not a type")
self.visitchildren(node)
return node
class ReplaceFusedTypeChecks(VisitorTransform):
"""
This is not a transform in the pipeline. It is invoked on the specific
versions of a cdef function with fused argument types. It filters out any
type branches that don't match. e.g.
if fused_t is mytype:
...
elif fused_t in other_fused_type:
...
"""
def __init__(self, local_scope):
super(ReplaceFusedTypeChecks, self).__init__()
self.local_scope = local_scope
# defer the import until now to avoid circular import time dependencies
from .Optimize import ConstantFolding
self.transform = ConstantFolding(reevaluate=True)
def visit_IfStatNode(self, node):
"""
Filters out any if clauses with false compile time type check
expression.
"""
self.visitchildren(node)
return self.transform(node)
def visit_PrimaryCmpNode(self, node):
type1 = node.operand1.analyse_as_type(self.local_scope)
type2 = node.operand2.analyse_as_type(self.local_scope)
if type1 and type2:
false_node = ExprNodes.BoolNode(node.pos, value=False)
true_node = ExprNodes.BoolNode(node.pos, value=True)
type1 = self.specialize_type(type1, node.operand1.pos)
op = node.operator
if op in ('is', 'is_not', '==', '!='):
type2 = self.specialize_type(type2, node.operand2.pos)
is_same = type1.same_as(type2)
eq = op in ('is', '==')
if (is_same and eq) or (not is_same and not eq):
return true_node
elif op in ('in', 'not_in'):
# We have to do an instance check directly, as operand2
# needs to be a fused type and not a type with a subtype
# that is fused. First unpack the typedef
if isinstance(type2, PyrexTypes.CTypedefType):
type2 = type2.typedef_base_type
if type1.is_fused:
error(node.operand1.pos, "Type is fused")
elif not type2.is_fused:
error(node.operand2.pos,
"Can only use 'in' or 'not in' on a fused type")
else:
types = PyrexTypes.get_specialized_types(type2)
for specialized_type in types:
if type1.same_as(specialized_type):
if op == 'in':
return true_node
else:
return false_node
if op == 'not_in':
return true_node
return false_node
return node
def specialize_type(self, type, pos):
try:
return type.specialize(self.local_scope.fused_to_specific)
except KeyError:
error(pos, "Type is not specific")
return type
def visit_Node(self, node):
self.visitchildren(node)
return node
class DebugTransform(CythonTransform):
"""
Write debug information for this Cython module.
"""
def __init__(self, context, options, result):
super(DebugTransform, self).__init__(context)
self.visited = set()
# our treebuilder and debug output writer
# (see Cython.Debugger.debug_output.CythonDebugWriter)
self.tb = self.context.gdb_debug_outputwriter
#self.c_output_file = options.output_file
self.c_output_file = result.c_file
# Closure support, basically treat nested functions as if the AST were
# never nested
self.nested_funcdefs = []
# tells visit_NameNode whether it should register step-into functions
self.register_stepinto = False
def visit_ModuleNode(self, node):
self.tb.module_name = node.full_module_name
attrs = dict(
module_name=node.full_module_name,
filename=node.pos[0].filename,
c_filename=self.c_output_file)
self.tb.start('Module', attrs)
# serialize functions
self.tb.start('Functions')
# First, serialize functions normally...
self.visitchildren(node)
# ... then, serialize nested functions
for nested_funcdef in self.nested_funcdefs:
self.visit_FuncDefNode(nested_funcdef)
self.register_stepinto = True
self.serialize_modulenode_as_function(node)
self.register_stepinto = False
self.tb.end('Functions')
# 2.3 compatibility. Serialize global variables
self.tb.start('Globals')
entries = {}
for k, v in node.scope.entries.items():
if (v.qualified_name not in self.visited and not
v.name.startswith('__pyx_') and not
v.type.is_cfunction and not
v.type.is_extension_type):
entries[k]= v
self.serialize_local_variables(entries)
self.tb.end('Globals')
# self.tb.end('Module') # end Module after the line number mapping in
# Cython.Compiler.ModuleNode.ModuleNode._serialize_lineno_map
return node
def visit_FuncDefNode(self, node):
self.visited.add(node.local_scope.qualified_name)
if getattr(node, 'is_wrapper', False):
return node
if self.register_stepinto:
self.nested_funcdefs.append(node)
return node
# node.entry.visibility = 'extern'
if node.py_func is None:
pf_cname = ''
else:
pf_cname = node.py_func.entry.func_cname
attrs = dict(
name=node.entry.name or getattr(node, 'name', '<unknown>'),
cname=node.entry.func_cname,
pf_cname=pf_cname,
qualified_name=node.local_scope.qualified_name,
lineno=str(node.pos[1]))
self.tb.start('Function', attrs=attrs)
self.tb.start('Locals')
self.serialize_local_variables(node.local_scope.entries)
self.tb.end('Locals')
self.tb.start('Arguments')
for arg in node.local_scope.arg_entries:
self.tb.start(arg.name)
self.tb.end(arg.name)
self.tb.end('Arguments')
self.tb.start('StepIntoFunctions')
self.register_stepinto = True
self.visitchildren(node)
self.register_stepinto = False
self.tb.end('StepIntoFunctions')
self.tb.end('Function')
return node
def visit_NameNode(self, node):
if (self.register_stepinto and
node.type is not None and
node.type.is_cfunction and
getattr(node, 'is_called', False) and
node.entry.func_cname is not None):
# don't check node.entry.in_cinclude, as 'cdef extern: ...'
# declared functions are not 'in_cinclude'.
# This means we will list called 'cdef' functions as
# "step into functions", but this is not an issue as they will be
# recognized as Cython functions anyway.
attrs = dict(name=node.entry.func_cname)
self.tb.start('StepIntoFunction', attrs=attrs)
self.tb.end('StepIntoFunction')
self.visitchildren(node)
return node
def serialize_modulenode_as_function(self, node):
"""
Serialize the module-level code as a function so the debugger will know
it's a "relevant frame" and it will know where to set the breakpoint
for 'break modulename'.
"""
name = node.full_module_name.rpartition('.')[-1]
cname_py2 = 'init' + name
cname_py3 = 'PyInit_' + name
py2_attrs = dict(
name=name,
cname=cname_py2,
pf_cname='',
# Ignore the qualified_name, breakpoints should be set using
# `cy break modulename:lineno` for module-level breakpoints.
qualified_name='',
lineno='1',
is_initmodule_function="True",
)
py3_attrs = dict(py2_attrs, cname=cname_py3)
self._serialize_modulenode_as_function(node, py2_attrs)
self._serialize_modulenode_as_function(node, py3_attrs)
def _serialize_modulenode_as_function(self, node, attrs):
self.tb.start('Function', attrs=attrs)
self.tb.start('Locals')
self.serialize_local_variables(node.scope.entries)
self.tb.end('Locals')
self.tb.start('Arguments')
self.tb.end('Arguments')
self.tb.start('StepIntoFunctions')
self.register_stepinto = True
self.visitchildren(node)
self.register_stepinto = False
self.tb.end('StepIntoFunctions')
self.tb.end('Function')
def serialize_local_variables(self, entries):
for entry in entries.values():
if not entry.cname:
# not a local variable
continue
if entry.type.is_pyobject:
vartype = 'PythonObject'
else:
vartype = 'CObject'
if entry.from_closure:
# We're dealing with a closure where a variable from an outer
# scope is accessed, get it from the scope object.
cname = '%s->%s' % (Naming.cur_scope_cname,
entry.outer_entry.cname)
qname = '%s.%s.%s' % (entry.scope.outer_scope.qualified_name,
entry.scope.name,
entry.name)
elif entry.in_closure:
cname = '%s->%s' % (Naming.cur_scope_cname,
entry.cname)
qname = entry.qualified_name
else:
cname = entry.cname
qname = entry.qualified_name
if not entry.pos:
# this happens for variables that are not in the user's code,
# e.g. for the global __builtins__, __doc__, etc. We can just
# set the lineno to 0 for those.
lineno = '0'
else:
lineno = str(entry.pos[1])
attrs = dict(
name=entry.name,
cname=cname,
qualified_name=qname,
type=vartype,
lineno=lineno)
self.tb.start('LocalVar', attrs)
self.tb.end('LocalVar')
|
fabianrost84/cython
|
Cython/Compiler/ParseTreeTransforms.py
|
Python
|
apache-2.0
| 122,793
|
[
"VisIt"
] |
64dc568c732fbbe6c329711aa399df8b83b797fea968a7d6cbeb9b941a820bf6
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-service
# Author : Adria Casajus
########################################################################
__RCSID__ = "$Id$"
import sys
import DIRAC
from DIRAC.ConfigurationSystem.Client.LocalConfiguration import LocalConfiguration
from DIRAC.FrameworkSystem.Client.Logger import gLogger
from DIRAC.Core.DISET.ServiceReactor import ServiceReactor
from DIRAC.Core.Utilities.DErrno import includeExtensionErrors
localCfg = LocalConfiguration()
positionalArgs = localCfg.getPositionalArguments()
if len( positionalArgs ) == 0:
gLogger.fatal( "You must specify which server to run!" )
sys.exit( 1 )
serverName = positionalArgs[0]
localCfg.setConfigurationForServer( serverName )
localCfg.addMandatoryEntry( "Port" )
#localCfg.addMandatoryEntry( "HandlerPath" )
localCfg.addMandatoryEntry( "/DIRAC/Setup" )
localCfg.addDefaultEntry( "/DIRAC/Security/UseServerCertificate", "yes" )
localCfg.addDefaultEntry( "LogLevel", "INFO" )
localCfg.addDefaultEntry( "LogColor", True )
resultDict = localCfg.loadUserData()
if not resultDict[ 'OK' ]:
gLogger.initialize( serverName, "/" )
gLogger.error( "There were errors when loading configuration", resultDict[ 'Message' ] )
sys.exit( 1 )
includeExtensionErrors()
serverToLaunch = ServiceReactor()
result = serverToLaunch.initialize( positionalArgs )
if not result[ 'OK' ]:
gLogger.error( result[ 'Message' ] )
sys.exit( 1 )
result = serverToLaunch.serve()
if not result[ 'OK' ]:
gLogger.error( result[ 'Message' ] )
sys.exit( 1 )
|
vmendez/DIRAC
|
Core/scripts/dirac-service.py
|
Python
|
gpl-3.0
| 1,614
|
[
"DIRAC"
] |
04a115589565e4f89a916392b109796df0691781fa3cad34b72d1bca590d1821
|
"""pure-Python sugar wrappers for core 0MQ objects."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from zmq.sugar import (
constants, context, frame, poll, socket, tracker, version
)
from zmq import error
__all__ = ['constants']
for submod in (
constants, context, error, frame, poll, socket, tracker, version
):
__all__.extend(submod.__all__)
from zmq.error import *
from zmq.sugar.context import *
from zmq.sugar.tracker import *
from zmq.sugar.socket import *
from zmq.sugar.constants import *
from zmq.sugar.frame import *
from zmq.sugar.poll import *
# from zmq.sugar.stopwatch import *
# from zmq.sugar._device import *
from zmq.sugar.version import *
|
IsCoolEntertainment/debpkg_python-pyzmq
|
zmq/sugar/__init__.py
|
Python
|
lgpl-3.0
| 1,187
|
[
"Brian"
] |
148ca83686df6f3223524a70a2d73000e76521605cbdcc937781052b96619f15
|
# -*- coding: utf-8 -*-
"""
Generate centreline and write it out as .vtk legacy format.
"""
import os
import sys
# Run in current directory.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Import path for the CentrelineGenerator script.
importPath = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../util'))
if not importPath in sys.path:
sys.path.insert(1, importPath)
del importPath
import CentrelineGenerator
# A centreline for a mesh with 960 cores.
CentrelineGenerator.segmentList = [6.5, None, None]
CentrelineGenerator.radiusBase = 1.2732395447351628
CentrelineGenerator.outputFileName = "c960Centreline.vtk"
CentrelineGenerator.sphereRadius = None
def main():
# CentrelineGenerator.GenerateCentreline(CentrelineGenerator.BuildDecreasingRadiiScalars)
CentrelineGenerator.GenerateCentreline()
if __name__ == '__main__':
print "Starting", os.path.basename(__file__)
main()
print "Exiting", os.path.basename(__file__)
|
BlueFern/DBiharMesher
|
meshes/c960/Generate960Centreline.py
|
Python
|
gpl-2.0
| 975
|
[
"VTK"
] |
74b5b9ccf24a8a197080e87bda0cf7c9cbf46f9f63cd85cb10532fa3ce7eb6f3
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.