text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import os
from pathlib import Path
import numpy as np
import pytest
from pysisyphus.helpers_pure import results_to_json, json_to_results
from pysisyphus.run import run_from_dict
from pysisyphus.testing import using
def test_results_to_json():
size = 12
results = {
"energy": -1000.0,
"forces": np.random.rand(size),
"hessian": np.random.rand(size, size),
}
json_ = results_to_json(results)
results_ = json_to_results(json_)
for key, val in results_.items():
ref_val = results[key]
np.testing.assert_allclose(val, ref_val)
@using("pyscf")
@pytest.mark.parametrize(
"run_func",
(
None,
"get_energy",
"get_forces",
"get_hessian",
),
)
def test_calculator_dump(run_func):
out_dir = Path(".")
out_fn = "calculator_000.000.results"
try:
os.remove(out_dir / out_fn)
except FileNotFoundError:
pass
run_dict = {
"geom": {
"fn": "lib:h2o.xyz",
},
"calc": {
"type": "pyscf",
"basis": "sto3g",
"run_func": run_func,
"out_dir": out_dir,
},
}
_ = run_from_dict(run_dict)
assert (out_dir / out_fn).exists()
|
eljost/pysisyphus
|
tests/test_calculator/test_calculator.py
|
Python
|
gpl-3.0
| 1,247
|
[
"PySCF"
] |
3b2aab8307ad2461ad0e1847840a67a4e6f4bb2647643464da6a8668468f6f8d
|
from __future__ import print_function, division
import unittest, numpy as np
from pyscf import gto, tddft, scf
from pyscf.nao import gw
from pyscf.data.nist import HARTREE2EV
class KnowValues(unittest.TestCase):
def test_0168_cn_rohf(self):
""" Interacting case """
spin = 1
mol=gto.M(verbose=0,atom='C 0 0 -0.6;N 0 0 0.52',basis='cc-pvdz',spin=spin)
gto_mf = scf.ROHF(mol)
gto_mf.kernel()
ss_2sp1 = gto_mf.spin_square()
nao_gw = gw(gto=mol, mf=gto_mf, verbosity=0, nocc=4)
nao_gw.kernel_gw()
#nao_gw.report()
#print(nao_gw.spin_square())
if __name__ == "__main__": unittest.main()
|
gkc1000/pyscf
|
pyscf/nao/test/test_0169_cn_uks_b3lyp.py
|
Python
|
apache-2.0
| 634
|
[
"PySCF"
] |
869ff7e9b52086ae8d4bfdc840258f708f6c7a4ba1adbc1fa83f854401281517
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Batch Uploader core functions. Uploading metadata and documents.
"""
import os
import pwd
import grp
import sys
import time
import tempfile
import cgi
import re
from invenio.dbquery import run_sql, Error
from invenio.access_control_engine import acc_authorize_action
from invenio.webuser import collect_user_info, page_not_authorized
from invenio.config import CFG_BINDIR, CFG_TMPSHAREDDIR, CFG_LOGDIR, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \
CFG_OAI_ID_FIELD, CFG_BATCHUPLOADER_DAEMON_DIR, \
CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS, \
CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS, \
CFG_PREFIX, CFG_SITE_LANG
from invenio.textutils import encode_for_xml
from invenio.bibtask import task_low_level_submission
from invenio.messages import gettext_set_language
from invenio.textmarc2xmlmarc import transform_file
from invenio.shellutils import run_shell_command
from invenio.bibupload import xml_marc_to_records, bibupload
from invenio.access_control_firerole import _ip_matcher_builder, _ipmatch
from invenio.webinterface_handler_config import HTTP_BAD_REQUEST, HTTP_FORBIDDEN
import invenio.bibupload as bibupload_module
from invenio.bibrecord import create_records, \
record_strip_empty_volatile_subfields, \
record_strip_empty_fields
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PERMITTED_MODES = ['-i', '-r', '-c', '-a', '-ir',
'--insert', '--replace', '--correct', '--append', '--holdingpen']
_CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS_RE = re.compile(CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS)
_CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS = []
for _network, _collection in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS.items():
if '/' not in _network:
_network += '/32'
_CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS.append((_ip_matcher_builder(_network), _collection))
del _network
del _collection
def cli_allocate_record(req):
req.content_type = "text/plain"
req.send_http_header()
# check IP and useragent:
if not _get_client_authorized_collections(_get_client_ip(req)):
msg = "[ERROR] Sorry, client IP %s cannot use the service." % _get_client_ip(req)
_log(msg)
return _write(req, msg)
if not _check_client_useragent(req):
msg = '[ERROR] Sorry, the "%s" useragent cannot use the service.' % _get_useragent(req)
_log(msg)
return _write(req, msg)
recid = run_sql("insert into bibrec (creation_date,modification_date,earliest_date) values(NOW(),NOW(),NOW())")
return recid
def cli_upload(req, file_content=None, mode=None, callback_url=None, nonce=None, special_treatment=None, priority=0):
""" Robot interface for uploading MARC files
"""
req.content_type = "text/plain"
# check IP and useragent:
if not _get_client_authorized_collections(_get_client_ip(req)):
msg = "[ERROR] Sorry, client IP %s cannot use the service." % _get_client_ip(req)
_log(msg)
req.status = HTTP_FORBIDDEN
return _write(req, msg)
if not _check_client_useragent(req):
msg = "[ERROR] Sorry, the %s useragent cannot use the service." % _get_useragent(req)
_log(msg)
req.status = HTTP_FORBIDDEN
return _write(req, msg)
arg_mode = mode
if not arg_mode:
msg = "[ERROR] Please specify upload mode to use."
_log(msg)
req.status = HTTP_BAD_REQUEST
return _write(req, msg)
if arg_mode == '--insertorreplace':
arg_mode = '-ir'
if not arg_mode in PERMITTED_MODES:
msg = "[ERROR] Invalid upload mode."
_log(msg)
req.status = HTTP_BAD_REQUEST
return _write(req, msg)
arg_file = file_content
if hasattr(arg_file, 'read'):
## We've been passed a readable file, e.g. req
arg_file = arg_file.read()
if not arg_file:
msg = "[ERROR] Please provide a body to your request."
_log(msg)
req.status = HTTP_BAD_REQUEST
return _write(req, msg)
else:
if not arg_file:
msg = "[ERROR] Please specify file body to input."
_log(msg)
req.status = HTTP_BAD_REQUEST
return _write(req, msg)
if hasattr(arg_file, "filename"):
arg_file = arg_file.value
else:
msg = "[ERROR] 'file' parameter must be a (single) file"
_log(msg)
req.status = HTTP_BAD_REQUEST
return _write(req, msg)
# write temporary file:
(fd, filename) = tempfile.mkstemp(prefix="batchupload_" + \
time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_",
dir=CFG_TMPSHAREDDIR)
filedesc = os.fdopen(fd, 'w')
filedesc.write(arg_file)
filedesc.close()
# check if this client can run this file:
client_ip = _get_client_ip(req)
permitted_dbcollids = _get_client_authorized_collections(client_ip)
if '*' not in permitted_dbcollids: # wildcard
allow = _check_client_can_submit_file(client_ip, filename, req, 0)
if not allow:
msg = "[ERROR] Cannot submit such a file from this IP. (Wrong collection.)"
_log(msg)
req.status = HTTP_FORBIDDEN
return _write(req, msg)
# check validity of marcxml
xmlmarclint_path = CFG_BINDIR + '/xmlmarclint'
xmlmarclint_output, dummy1, dummy2 = run_shell_command('%s %s' % (xmlmarclint_path, filename))
if xmlmarclint_output != 0:
msg = "[ERROR] MARCXML is not valid."
_log(msg)
req.status = HTTP_BAD_REQUEST
return _write(req, msg)
args = ['bibupload', "batchupload", arg_mode, filename, '-P', str(priority)]
# run upload command
if callback_url:
args += ["--callback-url", callback_url]
if nonce:
args += ["--nonce", nonce]
if special_treatment:
args += ["--special-treatment", special_treatment]
task_low_level_submission(*args)
msg = "[INFO] %s" % ' '.join(args)
_log(msg)
return _write(req, msg)
def metadata_upload(req, metafile=None, filetype=None, mode=None, exec_date=None,
exec_time=None, metafilename=None, ln=CFG_SITE_LANG,
priority="1", email_logs_to=None):
"""
Metadata web upload service. Get upload parameters and exec bibupload for the given file.
Finally, write upload history.
@return: tuple (error code, message)
error code: code that indicates if an error ocurred
message: message describing the error
"""
# start output:
req.content_type = "text/html"
req.send_http_header()
error_codes = {'not_authorized': 1}
user_info = collect_user_info(req)
(fd, filename) = tempfile.mkstemp(prefix="batchupload_" + \
user_info['nickname'] + "_" + time.strftime("%Y%m%d%H%M%S",
time.localtime()) + "_", dir=CFG_TMPSHAREDDIR)
filedesc = os.fdopen(fd, 'w')
filedesc.write(metafile)
filedesc.close()
# check if this client can run this file:
if req is not None:
allow = _check_client_can_submit_file(req=req, metafile=metafile, webupload=1, ln=ln)
if allow[0] != 0:
return (error_codes['not_authorized'], allow[1])
# run upload command:
task_arguments = ('bibupload', user_info['nickname'], mode,
"--priority=" + priority, "-N", "batchupload")
if exec_date:
date = exec_date
if exec_time:
date += ' ' + exec_time
task_arguments += ("-t", date)
if email_logs_to:
task_arguments += ('--email-logs-to', email_logs_to)
task_arguments += (filename, )
jobid = task_low_level_submission(*task_arguments)
# write batch upload history
run_sql("""INSERT INTO hstBATCHUPLOAD (user, submitdate,
filename, execdate, id_schTASK, batch_mode)
VALUES (%s, NOW(), %s, %s, %s, "metadata")""",
(user_info['nickname'], metafilename,
exec_date != "" and (exec_date + ' ' + exec_time)
or time.strftime("%Y-%m-%d %H:%M:%S"), str(jobid), ))
return (0, "Task %s queued" % str(jobid))
def document_upload(req=None, folder="", matching="", mode="", exec_date="", exec_time="", ln=CFG_SITE_LANG, priority="1", email_logs_to=None):
""" Take files from the given directory and upload them with the appropiate mode.
@parameters:
+ folder: Folder where the files to upload are stored
+ matching: How to match file names with record fields (report number, barcode,...)
+ mode: Upload mode (append, revise, replace)
@return: tuple (file, error code)
file: file name causing the error to notify the user
error code:
1 - More than one possible recID, ambiguous behaviour
2 - No records match that file name
3 - File already exists
"""
import sys
if sys.hexversion < 0x2060000:
from md5 import md5
else:
from hashlib import md5
from invenio.bibdocfile import BibRecDocs, file_strip_ext
import shutil
from invenio.search_engine import perform_request_search, \
search_pattern, \
guess_collection_of_a_record
_ = gettext_set_language(ln)
errors = []
info = [0, []] # Number of files read, name of the files
try:
files = os.listdir(folder)
except OSError, error:
errors.append(("", error))
return errors, info
err_desc = {1: _("More than one possible recID, ambiguous behaviour"), 2: _("No records match that file name"),
3: _("File already exists"), 4: _("A file with the same name and format already exists"),
5: _("No rights to upload to collection '%s'")}
# Create directory DONE/ if doesn't exist
folder = (folder[-1] == "/") and folder or (folder + "/")
files_done_dir = folder + "DONE/"
try:
os.mkdir(files_done_dir)
except OSError:
# Directory exists or no write permission
pass
for docfile in files:
if os.path.isfile(os.path.join(folder, docfile)):
info[0] += 1
identifier = file_strip_ext(docfile)
extension = docfile[len(identifier):]
rec_id = None
if identifier:
rec_id = search_pattern(p=identifier, f=matching, m='e')
if not rec_id:
errors.append((docfile, err_desc[2]))
continue
elif len(rec_id) > 1:
errors.append((docfile, err_desc[1]))
continue
else:
rec_id = str(list(rec_id)[0])
rec_info = BibRecDocs(rec_id)
if rec_info.bibdocs:
for bibdoc in rec_info.bibdocs:
attached_files = bibdoc.list_all_files()
file_md5 = md5(open(os.path.join(folder, docfile), "rb").read()).hexdigest()
num_errors = len(errors)
for attached_file in attached_files:
if attached_file.checksum == file_md5:
errors.append((docfile, err_desc[3]))
break
elif attached_file.get_full_name() == docfile:
errors.append((docfile, err_desc[4]))
break
if len(errors) > num_errors:
continue
# Check if user has rights to upload file
if req is not None:
file_collection = guess_collection_of_a_record(int(rec_id))
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=file_collection)
if auth_code != 0:
error_msg = err_desc[5] % file_collection
errors.append((docfile, error_msg))
continue
# Move document to be uploaded to temporary folder
(fd, tmp_file) = tempfile.mkstemp(prefix=identifier + "_" + time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_", suffix=extension, dir=CFG_TMPSHAREDDIR)
shutil.copy(os.path.join(folder, docfile), tmp_file)
# Create MARC temporary file with FFT tag and call bibupload
(fd, filename) = tempfile.mkstemp(prefix=identifier + '_', dir=CFG_TMPSHAREDDIR)
filedesc = os.fdopen(fd, 'w')
marc_content = """ <record>
<controlfield tag="001">%(rec_id)s</controlfield>
<datafield tag="FFT" ind1=" " ind2=" ">
<subfield code="n">%(name)s</subfield>
<subfield code="a">%(path)s</subfield>
</datafield>
</record> """ % {'rec_id': rec_id,
'name': encode_for_xml(identifier),
'path': encode_for_xml(tmp_file),
}
filedesc.write(marc_content)
filedesc.close()
info[1].append(docfile)
user = ""
if req is not None:
user_info = collect_user_info(req)
user = user_info['nickname']
if not user:
user = "batchupload"
# Execute bibupload with the appropiate mode
task_arguments = ('bibupload', user, "--" + mode,
"--priority=" + priority, "-N", "batchupload")
if exec_date:
date = '--runtime=' + "\'" + exec_date + ' ' + exec_time + "\'"
task_arguments += (date, )
if email_logs_to:
task_arguments += ("--email-logs-to", email_logs_to)
task_arguments += (filename, )
jobid = task_low_level_submission(*task_arguments)
# write batch upload history
run_sql("""INSERT INTO hstBATCHUPLOAD (user, submitdate,
filename, execdate, id_schTASK, batch_mode)
VALUES (%s, NOW(), %s, %s, %s, "document")""",
(user_info['nickname'], docfile,
exec_date != "" and (exec_date + ' ' + exec_time)
or time.strftime("%Y-%m-%d %H:%M:%S"), str(jobid)))
# Move file to DONE folder
done_filename = docfile + "_" + time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_" + str(jobid)
try:
os.rename(os.path.join(folder, docfile), os.path.join(files_done_dir, done_filename))
except OSError:
errors.append('MoveError')
return errors, info
def get_user_metadata_uploads(req):
"""Retrieve all metadata upload history information for a given user"""
user_info = collect_user_info(req)
upload_list = run_sql("""SELECT DATE_FORMAT(h.submitdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
h.filename, DATE_FORMAT(h.execdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
s.status \
FROM hstBATCHUPLOAD h INNER JOIN schTASK s \
ON h.id_schTASK = s.id \
WHERE h.user=%s and h.batch_mode="metadata"
ORDER BY h.submitdate DESC""", (user_info['nickname'],))
return upload_list
def get_user_document_uploads(req):
"""Retrieve all document upload history information for a given user"""
user_info = collect_user_info(req)
upload_list = run_sql("""SELECT DATE_FORMAT(h.submitdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
h.filename, DATE_FORMAT(h.execdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
s.status \
FROM hstBATCHUPLOAD h INNER JOIN schTASK s \
ON h.id_schTASK = s.id \
WHERE h.user=%s and h.batch_mode="document"
ORDER BY h.submitdate DESC""", (user_info['nickname'],))
return upload_list
def get_daemon_doc_files():
""" Return all files found in batchuploader document folders """
files = {}
for folder in ['/revise', '/append']:
try:
daemon_dir = CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \
or CFG_PREFIX + '/' + CFG_BATCHUPLOADER_DAEMON_DIR
directory = daemon_dir + '/documents' + folder
files[directory] = [(filename, []) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
for file_instance, info in files[directory]:
stat_info = os.lstat(os.path.join(directory, file_instance))
info.append("%s" % pwd.getpwuid(stat_info.st_uid)[0]) # Owner
info.append("%s" % grp.getgrgid(stat_info.st_gid)[0]) # Group
info.append("%d" % stat_info.st_size) # Size
time_stat = stat_info.st_mtime
time_fmt = "%Y-%m-%d %R"
info.append(time.strftime(time_fmt, time.gmtime(time_stat))) # Last modified
except OSError:
pass
return files
def get_daemon_meta_files():
""" Return all files found in batchuploader metadata folders """
files = {}
for folder in ['/correct', '/replace', '/insert', '/append']:
try:
daemon_dir = CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \
or CFG_PREFIX + '/' + CFG_BATCHUPLOADER_DAEMON_DIR
directory = daemon_dir + '/metadata' + folder
files[directory] = [(filename, []) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
for file_instance, info in files[directory]:
stat_info = os.lstat(os.path.join(directory, file_instance))
info.append("%s" % pwd.getpwuid(stat_info.st_uid)[0]) # Owner
info.append("%s" % grp.getgrgid(stat_info.st_gid)[0]) # Group
info.append("%d" % stat_info.st_size) # Size
time_stat = stat_info.st_mtime
time_fmt = "%Y-%m-%d %R"
info.append(time.strftime(time_fmt, time.gmtime(time_stat))) # Last modified
except OSError:
pass
return files
def user_authorization(req, ln):
""" Check user authorization to visit page """
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader')
if auth_code != 0:
referer = '/batchuploader/'
return page_not_authorized(req=req, referer=referer,
text=auth_message, navmenuid="batchuploader")
else:
return None
def perform_basic_upload_checks(xml_record):
""" Performs tests that would provoke the bibupload task to fail with
an exit status 1, to prevent batchupload from crashing while alarming
the user wabout the issue
"""
from invenio.bibupload import writing_rights_p
errors = []
if not writing_rights_p():
errors.append("Error: BibUpload does not have rights to write fulltext files.")
recs = create_records(xml_record, 1, 1)
if recs == []:
errors.append("Error: Cannot parse MARCXML file.")
elif recs[0][0] is None:
errors.append("Error: MARCXML file has wrong format: %s" % recs)
return errors
def perform_upload_check(xml_record, mode):
""" Performs a upload simulation with the given record and mode
@return: string describing errors
@rtype: string
"""
error_cache = []
def my_writer(msg, stream=sys.stdout, verbose=1):
if verbose == 1:
if 'DONE' not in msg:
error_cache.append(msg.strip())
orig_writer = bibupload_module.write_message
bibupload_module.write_message = my_writer
error_cache.extend(perform_basic_upload_checks(xml_record))
if error_cache:
# There has been some critical error
return '\n'.join(error_cache)
recs = xml_marc_to_records(xml_record)
try:
upload_mode = mode[2:]
# Adapt input data for bibupload function
if upload_mode == "r insert-or-replace":
upload_mode = "replace_or_insert"
for record in recs:
if record:
record_strip_empty_volatile_subfields(record)
record_strip_empty_fields(record)
bibupload(record, opt_mode=upload_mode, pretend=True)
finally:
bibupload_module.write_message = orig_writer
return '\n'.join(error_cache)
def _get_useragent(req):
"""Return client user agent from req object."""
user_info = collect_user_info(req)
return user_info['agent']
def _get_client_ip(req):
"""Return client IP address from req object."""
return str(req.remote_ip)
def _get_client_authorized_collections(client_ip):
"""
Is this client permitted to use the service?
Return list of collections for which the client is authorized
"""
ret = []
for network, collection in _CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS:
if _ipmatch(client_ip, network):
if '*' in collection:
return ['*']
ret += collection
return ret
def _check_client_useragent(req):
"""
Is this user agent permitted to use the service?
"""
client_useragent = _get_useragent(req)
if _CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS_RE.match(client_useragent):
return True
return False
def _check_client_can_submit_file(client_ip="", metafile="", req=None, webupload=0, ln=CFG_SITE_LANG):
"""
Is this client able to upload such a FILENAME?
check 980 $a values and collection tags in the file to see if they are among the
permitted ones as specified by CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS and ACC_AUTHORIZE_ACTION.
Useful to make sure that the client does not override other records by
mistake.
"""
_ = gettext_set_language(ln)
recs = create_records(metafile, 0, 0)
user_info = collect_user_info(req)
permitted_dbcollids = _get_client_authorized_collections(client_ip)
if '*' in permitted_dbcollids:
if not webupload:
return True
else:
return (0, " ")
filename_tag980_values = _detect_980_values_from_marcxml_file(recs)
for filename_tag980_value in filename_tag980_values:
if not filename_tag980_value:
if not webupload:
return False
else:
return(1, "Invalid collection in tag 980")
if not webupload:
if not filename_tag980_value in permitted_dbcollids:
return False
else:
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=filename_tag980_value)
if auth_code != 0:
error_msg = _("The user '%(x_user)s' is not authorized to modify collection '%(x_coll)s'") % \
{'x_user': user_info['nickname'], 'x_coll': filename_tag980_value}
return (auth_code, error_msg)
filename_rec_id_collections = _detect_collections_from_marcxml_file(recs)
for filename_rec_id_collection in filename_rec_id_collections:
if not webupload:
if not filename_rec_id_collection in permitted_dbcollids:
return False
else:
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=filename_rec_id_collection)
if auth_code != 0:
error_msg = _("The user '%(x_user)s' is not authorized to modify collection '%(x_coll)s'") % \
{'x_user': user_info['nickname'], 'x_coll': filename_rec_id_collection}
return (auth_code, error_msg)
if not webupload:
return True
else:
return (0, " ")
def _detect_980_values_from_marcxml_file(recs):
"""
Read MARCXML file and return list of 980 $a values found in that file.
Useful for checking rights.
"""
from invenio.bibrecord import record_get_field_values
collection_tag = run_sql("SELECT value FROM tag, field_tag, field \
WHERE tag.id=field_tag.id_tag AND \
field_tag.id_field=field.id AND \
field.code='collection'")
collection_tag = collection_tag[0][0]
dbcollids = {}
for rec, dummy1, dummy2 in recs:
if rec:
for tag980 in record_get_field_values(rec,
tag=collection_tag[:3],
ind1=collection_tag[3],
ind2=collection_tag[4],
code=collection_tag[5]):
dbcollids[tag980] = 1
return dbcollids.keys()
def _detect_collections_from_marcxml_file(recs):
"""
Extract all possible recIDs from MARCXML file and guess collections
for these recIDs.
"""
from invenio.bibrecord import record_get_field_values
from invenio.search_engine import guess_collection_of_a_record
from invenio.bibupload import find_record_from_sysno, \
find_records_from_extoaiid, \
find_record_from_oaiid
dbcollids = {}
sysno_tag = CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG
oaiid_tag = CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG
oai_tag = CFG_OAI_ID_FIELD
for rec, dummy1, dummy2 in recs:
if rec:
for tag001 in record_get_field_values(rec, '001'):
collection = guess_collection_of_a_record(int(tag001))
dbcollids[collection] = 1
for tag_sysno in record_get_field_values(rec, tag=sysno_tag[:3],
ind1=sysno_tag[3],
ind2=sysno_tag[4],
code=sysno_tag[5]):
record = find_record_from_sysno(tag_sysno)
if record:
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
for tag_oaiid in record_get_field_values(rec, tag=oaiid_tag[:3],
ind1=oaiid_tag[3],
ind2=oaiid_tag[4],
code=oaiid_tag[5]):
try:
records = find_records_from_extoaiid(tag_oaiid)
except Error:
records = []
if records:
record = records.pop()
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
for tag_oai in record_get_field_values(rec, tag=oai_tag[0:3],
ind1=oai_tag[3],
ind2=oai_tag[4],
code=oai_tag[5]):
record = find_record_from_oaiid(tag_oai)
if record:
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
return dbcollids.keys()
def _transform_input_to_marcxml(file_input=""):
"""
Takes text-marc as input and transforms it
to MARCXML.
"""
# Create temporary file to read from
tmp_fd, filename = tempfile.mkstemp(dir=CFG_TMPSHAREDDIR)
os.write(tmp_fd, file_input)
os.close(tmp_fd)
try:
# Redirect output, transform, restore old references
old_stdout = sys.stdout
new_stdout = StringIO()
sys.stdout = new_stdout
transform_file(filename)
finally:
sys.stdout = old_stdout
return new_stdout.getvalue()
def _log(msg, logfile="webupload.log"):
"""
Log MSG into LOGFILE with timestamp.
"""
filedesc = open(CFG_LOGDIR + "/" + logfile, "a")
filedesc.write(time.strftime("%Y-%m-%d %H:%M:%S") + " --> " + msg + "\n")
filedesc.close()
return
def _write(req, msg):
"""
Write MSG to the output stream for the end user.
"""
req.write(msg + "\n")
return
|
Panos512/invenio
|
modules/bibupload/lib/batchuploader_engine.py
|
Python
|
gpl-2.0
| 29,489
|
[
"VisIt"
] |
c3424430153d2769a834c82d095e835410d8335152a946f696676124bf81b669
|
# Shared and common functions (declustering redundant code)
import numpy as np, os
import random, cv2
import operator
def get(link, save_as=False):
import urllib
base_dir = './tmp'
assert type(link) == str, type(link)
if not os.path.exists(base_dir):
os.makedirs(base_dir)
if save_as:
save_path = os.path.join(base_dir, save_as)
else:
save_path = os.path.join(base_dir, 'tmp.png')
urllib.urlretrieve(link, save_path)
im = cv2.imread(save_path)[:,:,[2,1,0]]
return im
def softmax(X, theta = 1.0, axis = None):
y = np.atleast_2d(X)
if axis is None:
axis = next(j[0] for j in enumerate(y.shape) if j[1] > 1)
y = y * float(theta)
y = y - np.expand_dims(np.max(y, axis = axis), axis)
y = np.exp(y)
ax_sum = np.expand_dims(np.sum(y, axis = axis), axis)
p = y / ax_sum
if len(X.shape) == 1: p = p.flatten()
return p
def sort_dict(d, sort_by='value'):
""" Sorts dictionary """
assert sort_by in ['value', 'key'], sort_by
if sort_by == 'key':
return sorted(d.items(), key=operator.itemgetter(0))
if sort_by == 'value':
return sorted(d.items(), key=operator.itemgetter(1))
def random_crop(im, crop_size, return_crop_loc=False):
""" Randomly crop """
h,w = np.shape(im)[:2]
hSt = random.randint(0, h - crop_size[0])
wSt = random.randint(0, w - crop_size[1])
patch = im[hSt:hSt+crop_size[0], wSt:wSt+crop_size[1], :]
assert tuple(np.shape(patch)[:2]) == tuple(crop_size)
if return_crop_loc:
return patch, (hSt, wSt)
return patch
def process_im(im):
""" Normalizes images into the range [-1.0, 1.0] """
im = np.array(im)
if np.max(im) <= 1:
# PNG format
im = (2.0 * im) - 1.0
else:
# JPEG format
im = 2.0 * (im / 255.) - 1.0
return im
def deprocess_im(im, dtype=None):
""" Map images in [-1.0, 1.0] back to [0, 255] """
im = np.array(im)
return ((255.0 * (im + 1.0))/2.0).astype(dtype)
def random_resize(im_a, im_b, same):
valid_interps = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_LANCZOS4, cv2.INTER_AREA]
def get_param():
hr, wr = np.random.choice(np.linspace(0.5, 1.5, 11), 2)
#hr, wr = np.random.uniform(low=0.5, high=1.5, size=2)
interp = np.random.choice(valid_interps)
return [hr, wr, interp]
if same:
if np.random.randint(2):
a_par = get_param()
im_a = cv2.resize(im_a, None, fx=a_par[0], fy=a_par[1], interpolation=a_par[2])
im_b = cv2.resize(im_b, None, fx=a_par[0], fy=a_par[1], interpolation=a_par[2])
else:
a_par = get_param()
im_a = cv2.resize(im_a, None, fx=a_par[0], fy=a_par[1], interpolation=a_par[2])
if np.random.randint(2):
b_par = get_param()
while np.all(a_par == b_par):
b_par = get_param()
im_b = cv2.resize(im_b, None, fx=b_par[0], fy=b_par[1], interpolation=b_par[2])
return im_a, im_b
def random_jpeg(im_a, im_b, same):
def get_param():
#jpeg_quality_a = np.random.randint(50, 100) # doesnt include 100
return np.random.choice(np.linspace(50, 100, 11))
if same:
if np.random.randint(2):
a_par = get_param()
_, enc_a = cv2.imencode('.jpg', im_a, [int(cv2.IMWRITE_JPEG_QUALITY), a_par])
im_a = cv2.imdecode(enc_a, 1)
_, enc_b = cv2.imencode('.jpg', im_b, [int(cv2.IMWRITE_JPEG_QUALITY), a_par])
im_b = cv2.imdecode(enc_b, 1)
else:
a_par = get_param()
_, enc_a = cv2.imencode('.jpg', im_a, [int(cv2.IMWRITE_JPEG_QUALITY), a_par])
im_a = cv2.imdecode(enc_a, 1)
if np.random.randint(2):
b_par = get_param()
while np.all(a_par == b_par):
b_par = get_param()
_, enc_b = cv2.imencode('.jpg', im_b, [int(cv2.IMWRITE_JPEG_QUALITY), b_par])
im_b = cv2.imdecode(enc_b, 1)
return im_a, im_b
def gaussian_blur(im, kSz=None, sigma=1.0):
# 5x5 kernel blur
if kSz is None:
kSz = np.ceil(3.0 * sigma)
kSz = kSz + 1 if kSz % 2 == 0 else kSz
kSz = max(kSz, 3) # minimum kernel size
kSz = int(kSz)
blur = cv2.GaussianBlur(im,(kSz,kSz), sigma)
return blur
def random_blur(im_a, im_b, same):
# only square gaussian kernels
def get_param():
kSz = (2 * np.random.randint(1, 8)) + 1 # [3, 15]
sigma = np.random.choice(np.linspace(1.0, 5.0, 9))
#sigma = np.random.uniform(low=1.0, high=5.0, size=None) # 3 * sigma = kSz
return [kSz, sigma]
if same:
if np.random.randint(2):
a_par = get_param()
im_a = cv2.GaussianBlur(im_a, (a_par[0], a_par[0]), a_par[1])
im_b = cv2.GaussianBlur(im_b, (a_par[0], a_par[0]), a_par[1])
else:
a_par = get_param()
im_a = cv2.GaussianBlur(im_a, (a_par[0], a_par[0]), a_par[1])
if np.random.randint(2):
b_par = get_param()
while np.all(a_par == b_par):
b_par = get_param()
im_b = cv2.GaussianBlur(im_b, (b_par[0], b_par[0]), b_par[1])
return im_a, im_b
def random_noise(im):
noise = np.random.randn(*np.shape(im)) * 10.0
return np.array(np.clip(noise + im, 0, 255.0), dtype=np.uint8)
|
minyoungg/selfconsistency
|
lib/utils/util.py
|
Python
|
apache-2.0
| 5,391
|
[
"Gaussian"
] |
c94e89d7ce2f21aa5156094b9f6b4bca935a6261401601b364b98baa45147845
|
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import importlib
import logging
import os
import tempfile
import warnings
from contextlib import contextmanager
from distutils.version import LooseVersion
from functools import partial
import dill
import numpy as np
import scipy
import scipy.odr as odr
from IPython.display import display, display_pretty
from scipy.linalg import svd
from scipy.optimize import (
differential_evolution,
leastsq,
least_squares,
minimize,
OptimizeResult
)
from hyperspy.component import Component
from hyperspy.defaults_parser import preferences
from hyperspy.docstrings.model import FIT_PARAMETERS_ARG
from hyperspy.docstrings.signal import SHOW_PROGRESSBAR_ARG
from hyperspy.events import Event, Events, EventSuppressor
from hyperspy.exceptions import VisibleDeprecationWarning
from hyperspy.extensions import ALL_EXTENSIONS
from hyperspy.external.mpfit.mpfit import mpfit
from hyperspy.external.progressbar import progressbar
from hyperspy.misc.export_dictionary import (export_to_dictionary,
load_from_dictionary,
parse_flag_string,
reconstruct_object)
from hyperspy.misc.model_tools import current_model_values
from hyperspy.misc.slicing import copy_slice_from_whitelist
from hyperspy.misc.utils import (dummy_context_manager, shorten_name, slugify,
stash_active_state)
from hyperspy.signal import BaseSignal
from hyperspy.ui_registry import add_gui_method
_logger = logging.getLogger(__name__)
_COMPONENTS = ALL_EXTENSIONS["components1D"]
_COMPONENTS.update(ALL_EXTENSIONS["components1D"])
def _check_deprecated_optimizer(optimizer):
"""Can be removed in HyperSpy 2.0"""
deprecated_optimizer_dict = {
"fmin": "Nelder-Mead",
"fmin_cg": "CG",
"fmin_ncg": "Newton-CG",
"fmin_bfgs": "BFGS",
"fmin_l_bfgs_b": "L-BFGS-B",
"fmin_tnc": "TNC",
"fmin_powell": "Powell",
"mpfit": "lm",
"leastsq": "lm",
}
check_optimizer = deprecated_optimizer_dict.get(optimizer, None)
if check_optimizer:
warnings.warn(
f"`{optimizer}` has been deprecated and will be removed "
f"in HyperSpy 2.0. Please use `{check_optimizer}` instead.",
VisibleDeprecationWarning,
)
optimizer = check_optimizer
return optimizer
def reconstruct_component(comp_dictionary, **init_args):
_id = comp_dictionary['_id_name']
if _id in _COMPONENTS:
_class = getattr(
importlib.import_module(
_COMPONENTS[_id]["module"]), _COMPONENTS[_id]["class"])
elif "_class_dump" in comp_dictionary:
# When a component is not registered using the extension mechanism,
# it is serialized using dill.
_class = dill.loads(comp_dictionary['_class_dump'])
else:
raise ImportError(
f'Loading the {comp_dictionary["class"]} component ' +
'failed because the component is provided by the ' +
f'{comp_dictionary["package"]} Python package, but ' +
f'{comp_dictionary["package"]} is not installed.')
return _class(**init_args)
class ModelComponents(object):
"""Container for model components.
Useful to provide tab completion when running in IPython.
"""
def __init__(self, model):
self._model = model
def __repr__(self):
signature = "%4s | %19s | %19s | %19s"
ans = signature % ('#',
'Attribute Name',
'Component Name',
'Component Type')
ans += "\n"
ans += signature % ('-' * 4, '-' * 19, '-' * 19, '-' * 19)
if self._model:
for i, c in enumerate(self._model):
ans += "\n"
name_string = c.name
variable_name = slugify(name_string, valid_variable_name=True)
component_type = c.__class__.__name__
variable_name = shorten_name(variable_name, 19)
name_string = shorten_name(name_string, 19)
component_type = shorten_name(component_type, 19)
ans += signature % (i,
variable_name,
name_string,
component_type)
return ans
@add_gui_method(toolkey="hyperspy.Model")
class BaseModel(list):
"""Model and data fitting tools applicable to signals of both one and two
dimensions.
Models of one-dimensional signals should use the
:py:class:`~hyperspy.models.model1d` and models of two-dimensional signals
should use the :class:`~hyperspy.models.model2d`.
A model is constructed as a linear combination of
:py:mod:`~hyperspy._components` that are added to the model using the
:py:meth:`~hyperspy.model.BaseModel.append` or
:py:meth:`~hyperspy.model.BaseModel.extend`. There are many predefined
components available in the in the :py:mod:`~hyperspy._components`
module. If needed, new components can be created easily using the code of
existing components as a template.
Once defined, the model can be fitted to the data using :meth:`fit` or
:py:meth:`~hyperspy.model.BaseModel.multifit`. Once the optimizer reaches
the convergence criteria or the maximum number of iterations the new value
of the component parameters are stored in the components.
It is possible to access the components in the model by their name or by
the index in the model. An example is given at the end of this docstring.
Attributes
----------
signal : BaseSignal instance
It contains the data to fit.
chisq : :py:class:`~.signal.BaseSignal` of float
Chi-squared of the signal (or np.nan if not yet fit)
dof : :py:class:`~.signal.BaseSignal` of int
Degrees of freedom of the signal (0 if not yet fit)
components : :py:class:`~.model.ModelComponents` instance
The components of the model are attributes of this class. This provides
a convenient way to access the model components when working in IPython
as it enables tab completion.
Methods
-------
set_signal_range, remove_signal range, reset_signal_range,
add signal_range.
Customize the signal range to fit.
fit, multifit
Fit the model to the data at the current position or the
full dataset.
save_parameters2file, load_parameters_from_file
Save/load the parameter values to/from a file.
plot
Plot the model and the data.
enable_plot_components, disable_plot_components
Plot each component separately. (Use after `plot`.)
set_current_values_to
Set the current value of all the parameters of the given component as
the value for all the dataset.
enable_adjust_position, disable_adjust_position
Enable/disable interactive adjustment of the position of the components
that have a well defined position. (Use after `plot`).
fit_component
Fit just the given component in the given signal range, that can be
set interactively.
set_parameters_not_free, set_parameters_free
Fit the `free` status of several components and parameters at once.
See also
--------
:py:class:`~hyperspy.models.model1d.Model1D`
:py:class:`~hyperspy.models.model2d.Model2D`
"""
def __init__(self):
self.events = Events()
self.events.fitted = Event("""
Event that triggers after fitting changed at least one parameter.
The event triggers after the fitting step was finished, and only of
at least one of the parameters changed.
Arguments
---------
obj : Model
The Model that the event belongs to
""", arguments=['obj'])
def __hash__(self):
# This is needed to simulate a hashable object so that PySide does not
# raise an exception when using windows.connect
return id(self)
def store(self, name=None):
"""Stores current model in the original signal
Parameters
----------
name : {None, str}
Stored model name. Auto-generated if left empty
"""
if self.signal is None:
raise ValueError("Cannot store models with no signal")
s = self.signal
s.models.store(self, name)
def save(self, file_name, name=None, **kwargs):
"""Saves signal and its model to a file
Parameters
----------
file_name : str
Name of the file
name : {None, str}
Stored model name. Auto-generated if left empty
**kwargs :
Other keyword arguments are passed onto `BaseSignal.save()`
"""
if self.signal is None:
raise ValueError("Currently cannot save models with no signal")
else:
self.store(name)
self.signal.save(file_name, **kwargs)
def _load_dictionary(self, dic):
"""Load data from dictionary.
Parameters
----------
dic : dict
A dictionary containing at least the following fields:
* _whitelist: a dictionary with keys used as references of save
attributes, for more information, see
:py:func:`~.misc.export_dictionary.load_from_dictionary`
* components: a dictionary, with information about components of
the model (see
:py:meth:`~.component.Parameter.as_dictionary`
documentation for more details)
* any field from _whitelist.keys()
"""
if 'components' in dic:
while len(self) != 0:
self.remove(self[0])
id_dict = {}
for comp in dic['components']:
init_args = {}
for k, flags_str in comp['_whitelist'].items():
if not len(flags_str):
continue
if 'init' in parse_flag_string(flags_str):
init_args[k] = reconstruct_object(flags_str, comp[k])
self.append(reconstruct_component(comp, **init_args))
id_dict.update(self[-1]._load_dictionary(comp))
# deal with twins:
for comp in dic['components']:
for par in comp['parameters']:
for tw in par['_twins']:
id_dict[tw].twin = id_dict[par['self']]
if '_whitelist' in dic:
load_from_dictionary(self, dic)
def __repr__(self):
title = self.signal.metadata.General.title
class_name = str(self.__class__).split("'")[1].split('.')[-1]
if len(title):
return "<%s, title: %s>" % (
class_name, self.signal.metadata.General.title)
else:
return "<%s>" % class_name
def _get_component(self, thing):
if isinstance(thing, int) or isinstance(thing, str):
thing = self[thing]
elif np.iterable(thing):
thing = [self._get_component(athing) for athing in thing]
return thing
elif not isinstance(thing, Component):
raise ValueError("Not a component or component id.")
if thing in self:
return thing
else:
raise ValueError("The component is not in the model.")
def insert(self, **kwargs):
raise NotImplementedError
def append(self, thing):
"""Add component to Model.
Parameters
----------
thing: `Component` instance.
"""
if not isinstance(thing, Component):
raise ValueError(
"Only `Component` instances can be added to a model")
# Check if any of the other components in the model has the same name
if thing in self:
raise ValueError("Component already in model")
component_name_list = [component.name for component in self]
if thing.name:
name_string = thing.name
else:
name_string = thing.__class__.__name__
if name_string in component_name_list:
temp_name_string = name_string
index = 0
while temp_name_string in component_name_list:
temp_name_string = name_string + "_" + str(index)
index += 1
name_string = temp_name_string
thing.name = name_string
thing._axes_manager = self.axes_manager
thing._create_arrays()
list.append(self, thing)
thing.model = self
setattr(self.components, slugify(name_string,
valid_variable_name=True), thing)
if self._plot_active:
self._connect_parameters2update_plot(components=[thing])
self.signal._plot.signal_plot.update()
def extend(self, iterable):
"""Append multiple components to the model.
Parameters
----------
iterable: iterable of `Component` instances.
"""
for object in iterable:
self.append(object)
def __delitem__(self, thing):
thing = self.__getitem__(thing)
self.remove(thing)
def remove(self, thing):
"""Remove component from model.
Examples
--------
>>> s = hs.signals.Signal1D(np.empty(1))
>>> m = s.create_model()
>>> g = hs.model.components1D.Gaussian()
>>> m.append(g)
You could remove `g` like this
>>> m.remove(g)
Like this:
>>> m.remove("Gaussian")
Or like this:
>>> m.remove(0)
"""
thing = self._get_component(thing)
if not np.iterable(thing):
thing = [thing, ]
for athing in thing:
for parameter in athing.parameters:
# Remove the parameter from its twin _twins
parameter.twin = None
for twin in [twin for twin in parameter._twins]:
twin.twin = None
list.remove(self, athing)
athing.model = None
if self._plot_active:
self.signal._plot.signal_plot.update()
def as_signal(self, component_list=None, out_of_range_to_nan=True,
show_progressbar=None, out=None, **kwargs):
"""Returns a recreation of the dataset using the model.
By default, the signal range outside of the fitted range is filled with nans.
Parameters
----------
component_list : list of HyperSpy components, optional
If a list of components is given, only the components given in the
list is used in making the returned spectrum. The components can
be specified by name, index or themselves.
out_of_range_to_nan : bool
If True the signal range outside of the fitted range is filled with
nans. Default True.
%s
out : {None, BaseSignal}
The signal where to put the result into. Convenient for parallel
processing. If None (default), creates a new one. If passed, it is
assumed to be of correct shape and dtype and not checked.
Returns
-------
BaseSignal : An instance of the same class as `BaseSignal`.
Examples
--------
>>> s = hs.signals.Signal1D(np.random.random((10,100)))
>>> m = s.create_model()
>>> l1 = hs.model.components1D.Lorentzian()
>>> l2 = hs.model.components1D.Lorentzian()
>>> m.append(l1)
>>> m.append(l2)
>>> s1 = m.as_signal()
>>> s2 = m.as_signal(component_list=[l1])
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
for k in [k for k in ["parallel", "max_workers"] if k in kwargs]:
warnings.warn(
f"`{k}` argument has been deprecated and will be removed in HyperSpy 2.0",
VisibleDeprecationWarning,
)
if out is None:
data = np.empty(self.signal.data.shape, dtype='float')
data.fill(np.nan)
signal = self.signal.__class__(
data,
axes=self.signal.axes_manager._get_axes_dicts())
signal.metadata.General.title = (
self.signal.metadata.General.title + " from fitted model")
else:
signal = out
data = signal.data
if not out_of_range_to_nan:
# we want the full signal range, including outside the fitted
# range, we need to set all the channel_switches to True
channel_switches_backup = copy.copy(self.channel_switches)
self.channel_switches[:] = True
self._as_signal_iter(
component_list=component_list,
show_progressbar=show_progressbar,
data=data
)
if not out_of_range_to_nan:
# Restore the channel_switches, previously set
self.channel_switches[:] = channel_switches_backup
return signal
as_signal.__doc__ %= SHOW_PROGRESSBAR_ARG
def _as_signal_iter(self, data, component_list=None,
show_progressbar=None):
# Note that show_progressbar can be an int to determine the progressbar
# position for a thread-friendly bars. Otherwise race conditions are
# ugly...
if show_progressbar is None: # pragma: no cover
show_progressbar = preferences.General.show_progressbar
with stash_active_state(self if component_list else []):
if component_list:
component_list = [self._get_component(x)
for x in component_list]
for component_ in self:
active = component_ in component_list
if component_.active_is_multidimensional:
if active:
continue # Keep active_map
component_.active_is_multidimensional = False
component_.active = active
maxval = self.axes_manager._get_iterpath_size()
enabled = show_progressbar and (maxval != 0)
pbar = progressbar(total=maxval, disable=not enabled,
position=show_progressbar, leave=True)
for index in self.axes_manager:
self.fetch_stored_values(only_fixed=False)
data[self.axes_manager._getitem_tuple][
np.where(self.channel_switches)] = self.__call__(
non_convolved=not self.convolved, onlyactive=True).ravel()
pbar.update(1)
@property
def _plot_active(self):
if self._plot is not None and self._plot.is_active:
return True
else:
return False
def _connect_parameters2update_plot(self, components):
if self._plot_active is False:
return
for i, component in enumerate(components):
component.events.active_changed.connect(
self._model_line._auto_update_line, [])
for parameter in component.parameters:
parameter.events.value_changed.connect(
self._model_line._auto_update_line, [])
def _disconnect_parameters2update_plot(self, components):
if self._model_line is None:
return
for component in components:
component.events.active_changed.disconnect(
self._model_line._auto_update_line)
for parameter in component.parameters:
parameter.events.value_changed.disconnect(
self._model_line._auto_update_line)
def update_plot(self, render_figure=False, update_ylimits=False, **kwargs):
"""Update model plot.
The updating can be suspended using `suspend_update`.
See Also
--------
suspend_update
"""
if self._plot_active is True and self._suspend_update is False:
try:
if self._model_line is not None:
self._model_line.update(render_figure=render_figure,
update_ylimits=update_ylimits)
if self._plot_components:
for component in [component for component in self if
component.active is True]:
self._update_component_line(component)
except BaseException:
self._disconnect_parameters2update_plot(components=self)
@contextmanager
def suspend_update(self, update_on_resume=True):
"""Prevents plot from updating until 'with' clause completes.
See Also
--------
update_plot
"""
es = EventSuppressor()
es.add(self.axes_manager.events.indices_changed)
if self._model_line:
f = self._model_line._auto_update_line
for c in self:
es.add(c.events, f)
if c._position:
es.add(c._position.events)
for p in c.parameters:
es.add(p.events, f)
for c in self:
if hasattr(c, '_component_line'):
f = c._component_line._auto_update_line
es.add(c.events, f)
for p in c.parameters:
es.add(p.events, f)
old = self._suspend_update
self._suspend_update = True
with es.suppress():
yield
self._suspend_update = old
if update_on_resume is True:
for c in self:
position = c._position
if position:
position.events.value_changed.trigger(
obj=position, value=position.value)
self.update_plot(render_figure=True, update_ylimits=False)
def _close_plot(self):
if self._plot_components is True:
self.disable_plot_components()
self._disconnect_parameters2update_plot(components=self)
self._model_line = None
def enable_plot_components(self):
if self._plot is None or self._plot_components:
return
for component in [component for component in self if
component.active]:
self._plot_component(component)
self._plot_components = True
def disable_plot_components(self):
if self._plot is None:
return
if self._plot_components:
for component in self:
self._disable_plot_component(component)
self._plot_components = False
def _set_p0(self):
"(Re)sets the initial values for the parameters used in the curve fitting functions"
self.p0 = () # Stores the values and is fed as initial values to the fitter
for component in self:
if component.active:
for parameter in component.free_parameters:
self.p0 = (self.p0 + (parameter.value,)
if parameter._number_of_elements == 1
else self.p0 + parameter.value)
def set_boundaries(self, bounded=True):
warnings.warn(
"`set_boundaries()` has been deprecated and "
"will be made private in HyperSpy 2.0.",
VisibleDeprecationWarning,
)
self._set_boundaries(bounded=bounded)
def _set_boundaries(self, bounded=True):
"""Generate the boundary list.
Necessary before fitting with a boundary aware optimizer.
Parameters
----------
bounded : bool, default True
If True, loops through the model components and
populates the free parameter boundaries.
Returns
-------
None
"""
if not bounded:
self.free_parameters_boundaries = None
else:
self.free_parameters_boundaries = []
for component in self:
if component.active:
for param in component.free_parameters:
if param._number_of_elements == 1:
self.free_parameters_boundaries.append((param._bounds))
else:
self.free_parameters_boundaries.extend((param._bounds))
def _bounds_as_tuple(self):
"""Converts parameter bounds to tuples for least_squares()"""
if self.free_parameters_boundaries is None:
return (-np.inf, np.inf)
return tuple(
(a if a is not None else -np.inf, b if b is not None else np.inf)
for a, b in self.free_parameters_boundaries
)
def set_mpfit_parameters_info(self, bounded=True):
warnings.warn(
"`set_mpfit_parameters_info()` has been deprecated and "
"will be made private in HyperSpy 2.0.",
VisibleDeprecationWarning,
)
self._set_mpfit_parameters_info(bounded=bounded)
def _set_mpfit_parameters_info(self, bounded=True):
"""Generate the boundary list for mpfit.
Parameters
----------
bounded : bool, default True
If True, loops through the model components and
populates the free parameter boundaries.
Returns
-------
None
"""
if not bounded:
self.mpfit_parinfo = None
else:
self.mpfit_parinfo = []
for component in self:
if component.active:
for param in component.free_parameters:
limited = [False, False]
limits = [0, 0]
if param.bmin is not None:
limited[0] = True
limits[0] = param.bmin
if param.bmax is not None:
limited[1] = True
limits[1] = param.bmax
if param._number_of_elements == 1:
self.mpfit_parinfo.append(
{"limited": limited, "limits": limits}
)
else:
self.mpfit_parinfo.extend(
({"limited": limited, "limits": limits},)
* param._number_of_elements
)
def ensure_parameters_in_bounds(self):
"""For all active components, snaps their free parameter values to
be within their boundaries (if bounded). Does not touch the array of
values.
"""
for component in self:
if component.active:
for param in component.free_parameters:
bmin = -np.inf if param.bmin is None else param.bmin
bmax = np.inf if param.bmax is None else param.bmax
if param._number_of_elements == 1:
if not bmin <= param.value <= bmax:
min_d = np.abs(param.value - bmin)
max_d = np.abs(param.value - bmax)
if min_d < max_d:
param.value = bmin
else:
param.value = bmax
else:
values = np.array(param.value)
if param.bmin is not None:
minmask = values < bmin
values[minmask] = bmin
if param.bmax is not None:
maxmask = values > bmax
values[maxmask] = bmax
param.value = tuple(values)
def store_current_values(self):
""" Store the parameters of the current coordinates into the
`parameter.map` array and sets the `is_set` array attribute to True.
If the parameters array has not being defined yet it creates it filling
it with the current parameters at the current indices in the array."""
for component in self:
if component.active:
component.store_current_parameters_in_map()
def fetch_stored_values(self, only_fixed=False, update_on_resume=True):
"""Fetch the value of the parameters that have been previously stored
in `parameter.map['values']` if `parameter.map['is_set']` is `True` for
those indices.
If it is not previously stored, the current values from `parameter.value`
are used, which are typically from the fit in the previous pixel of a
multidimensional signal.
Parameters
----------
only_fixed : bool, optional
If True, only the fixed parameters are fetched.
update_on_resume : bool, optional
If True, update the model plot after values are updated.
See Also
--------
store_current_values
"""
cm = self.suspend_update if self._plot_active else dummy_context_manager
with cm(update_on_resume=update_on_resume):
for component in self:
component.fetch_stored_values(only_fixed=only_fixed)
def _on_navigating(self):
"""Same as fetch_stored_values but without update_on_resume since
the model plot is updated in the figure update callback.
"""
self.fetch_stored_values(only_fixed=False, update_on_resume=False)
def fetch_values_from_array(self, array, array_std=None):
"""Fetch the parameter values from the given array, optionally also
fetching the standard deviations.
Places the parameter values into both `m.p0` (the initial values
for the optimizer routine) and `component.parameter.value` and
`...std`, for parameters in active components ordered by their
position in the model and component.
Parameters
----------
array : array
array with the parameter values
array_std : {None, array}
array with the standard deviations of parameters
"""
self.p0 = array
self._fetch_values_from_p0(p_std=array_std)
def _fetch_values_from_p0(self, p_std=None):
"""Fetch the parameter values from the output of the optimizer `self.p0`,
placing them in their appropriate `component.parameter.value` and `...std`
Parameters
----------
p_std : array, optional
array containing the corresponding standard deviation.
"""
comp_p_std = None
counter = 0
for component in self: # Cut the parameters list
if component.active is True:
if p_std is not None:
comp_p_std = p_std[
counter: counter +
component._nfree_param]
component.fetch_values_from_array(
self.p0[counter: counter + component._nfree_param],
comp_p_std, onlyfree=True)
counter += component._nfree_param
def _model2plot(self, axes_manager, out_of_range2nans=True):
old_axes_manager = None
if axes_manager is not self.axes_manager:
old_axes_manager = self.axes_manager
self.axes_manager = axes_manager
self.fetch_stored_values()
s = self.__call__(non_convolved=False, onlyactive=True)
if old_axes_manager is not None:
self.axes_manager = old_axes_manager
self.fetch_stored_values()
if out_of_range2nans is True:
ns = np.empty(self.axis.axis.shape)
ns.fill(np.nan)
ns[np.where(self.channel_switches)] = s
s = ns
return s
def _model_function(self, param):
self.p0 = param
self._fetch_values_from_p0()
to_return = self.__call__(non_convolved=False, onlyactive=True)
return to_return
def _errfunc_sq(self, param, y, weights=None):
if weights is None:
weights = 1.0
return ((weights * self._errfunc(param, y)) ** 2).sum()
def _errfunc4mpfit(self, p, fjac=None, x=None, y=None, weights=None):
if fjac is None:
errfunc = self._model_function(p).ravel() - y
if weights is not None:
errfunc *= weights.ravel()
status = 0
return [status, errfunc]
else:
return [0, self._jacobian(p, y).T]
def _get_variance(self, only_current=True):
"""Return the variance taking into account the `channel_switches`.
If only_current=True, the variance for the current navigation indices
is returned, otherwise the variance for all navigation indices is
returned.
"""
variance = self.signal.get_noise_variance()
if variance is not None:
if isinstance(variance, BaseSignal):
if only_current:
variance = variance.data.__getitem__(
self.axes_manager._getitem_tuple
)[np.where(self.channel_switches)]
else:
variance = variance.data[..., np.where(
self.channel_switches)[0]]
else:
variance = 1.0
return variance
def _calculate_chisq(self):
variance = self._get_variance()
d = self(onlyactive=True).ravel() - self.signal()[np.where(
self.channel_switches)]
d *= d / (1. * variance) # d = difference^2 / variance.
self.chisq.data[self.signal.axes_manager.indices[::-1]] = d.sum()
def _set_current_degrees_of_freedom(self):
self.dof.data[self.signal.axes_manager.indices[::-1]] = len(self.p0)
@property
def red_chisq(self):
""":py:class:`~.signal.BaseSignal`: Reduced chi-squared.
Calculated from ``self.chisq`` and ``self.dof``.
"""
tmp = self.chisq / (- self.dof + self.channel_switches.sum() - 1)
tmp.metadata.General.title = self.signal.metadata.General.title + \
' reduced chi-squared'
return tmp
def _calculate_parameter_std(self, pcov, cost, ysize):
warn_cov = False
if pcov is None: # Indeterminate covariance
p_var = np.zeros(len(self.p0), dtype=float)
p_var.fill(np.nan)
warn_cov = True
elif isinstance(pcov, np.ndarray):
p_var = np.diag(pcov).astype(float) if pcov.ndim > 1 else pcov.astype(float)
if p_var.min() < 0 or np.any(np.isnan(p_var)) or np.any(np.isinf(p_var)):
# Numerical overflow on diagonal
p_var.fill(np.nan)
warn_cov = True
elif ysize > self.p0.size:
p_var *= cost / (ysize - self.p0.size)
p_var = np.sqrt(p_var)
else:
p_var.fill(np.nan)
warn_cov = True
else:
raise ValueError(f"pcov should be None or np.ndarray, got {type(pcov)}")
if warn_cov:
_logger.warning(
"Covariance of the parameters could not be estimated. "
"Estimated parameter standard deviations will be np.nan."
)
return p_var
def _convert_variance_to_weights(self):
weights = None
variance = self.signal.get_noise_variance()
if variance is not None:
if isinstance(variance, BaseSignal):
variance = variance.data.__getitem__(self.axes_manager._getitem_tuple)[
np.where(self.channel_switches)
]
_logger.info("Setting weights to 1/variance of signal noise")
# Note that we square this later in self._errfunc_sq()
weights = 1.0 / np.sqrt(variance)
return weights
def fit(
self,
optimizer="lm",
loss_function="ls",
grad="fd",
bounded=False,
update_plot=False,
print_info=False,
return_info=True,
fd_scheme="2-point",
**kwargs,
):
"""Fits the model to the experimental data.
Read more in the :ref:`User Guide <model.fitting>`.
Parameters
----------
%s
Returns
-------
None
Notes
-----
The chi-squared and reduced chi-squared statistics, and the
degrees of freedom, are computed automatically when fitting,
only when `loss_function="ls"`. They are stored as signals:
``chisq``, ``red_chisq`` and ``dof``.
If the attribute ``metada.Signal.Noise_properties.variance``
is defined as a ``Signal`` instance with the same
``navigation_dimension`` as the signal, and ``loss_function``
is ``"ls"`` or ``"huber"``, then a weighted fit is performed,
using the inverse of the noise variance as the weights.
Note that for both homoscedastic and heteroscedastic noise, if
``metadata.Signal.Noise_properties.variance`` does not contain
an accurate estimation of the variance of the data, then the
chi-squared and reduced chi-squared statistics will not be be
computed correctly. See the :ref:`Setting the noise properties
<signal.noise_properties>` in the User Guide for more details.
See Also
--------
* :py:meth:`~hyperspy.model.BaseModel.multifit`
* :py:meth:`~hyperspy.model.EELSModel.fit`
"""
cm = (
self.suspend_update
if (update_plot != self._plot_active) and not update_plot
else dummy_context_manager
)
# ---------------------------------------------
# Deprecated arguments (remove in HyperSpy 2.0)
# ---------------------------------------------
# Deprecate "fitter" argument
check_fitter = kwargs.pop("fitter", None)
if check_fitter:
warnings.warn(
f"`fitter='{check_fitter}'` has been deprecated and will be removed "
f"in HyperSpy 2.0. Please use `optimizer='{check_fitter}'` instead.",
VisibleDeprecationWarning,
)
optimizer = check_fitter
# Deprecated optimization algorithms
optimizer = _check_deprecated_optimizer(optimizer)
# Deprecate loss_function
if loss_function == "ml":
warnings.warn(
"`loss_function='ml'` has been deprecated and will be removed in "
"HyperSpy 2.0. Please use `loss_function='ML-poisson'` instead.",
VisibleDeprecationWarning,
)
loss_function = "ML-poisson"
# Deprecate grad=True/False
if isinstance(grad, bool):
alt_grad = "analytical" if grad else None
warnings.warn(
f"`grad={grad}` has been deprecated and will be removed in "
f"HyperSpy 2.0. Please use `grad={alt_grad}` instead.",
VisibleDeprecationWarning,
)
grad = alt_grad
# Deprecate ext_bounding
ext_bounding = kwargs.pop("ext_bounding", False)
if ext_bounding:
warnings.warn(
"`ext_bounding=True` has been deprecated and will be removed "
"in HyperSpy 2.0. Please use `bounded=True` instead.",
VisibleDeprecationWarning,
)
# Deprecate custom min_function
min_function = kwargs.pop("min_function", None)
if min_function:
warnings.warn(
"`min_function` has been deprecated and will be removed "
"in HyperSpy 2.0. Please use `loss_function` instead.",
VisibleDeprecationWarning,
)
loss_function = min_function
# Deprecate custom min_function
min_function_grad = kwargs.pop("min_function_grad", None)
if min_function_grad:
warnings.warn(
"`min_function_grad` has been deprecated and will be removed "
"in HyperSpy 2.0. Please use `grad` instead.",
VisibleDeprecationWarning,
)
grad = min_function_grad
# ---------------------------
# End of deprecated arguments
# ---------------------------
# Supported losses and optimizers
_supported_global = {
"Differential Evolution": differential_evolution,
}
if optimizer in ["Dual Annealing", "SHGO"]:
if LooseVersion(scipy.__version__) < LooseVersion("1.2.0"):
raise ValueError(f"`optimizer='{optimizer}'` requires scipy >= 1.2.0")
from scipy.optimize import dual_annealing, shgo
_supported_global.update({"Dual Annealing": dual_annealing, "SHGO": shgo})
_supported_fd_schemes = ["2-point", "3-point", "cs"]
_supported_losses = ["ls", "ML-poisson", "huber"]
_supported_bounds = [
"lm",
"trf",
"dogbox",
"Powell",
"TNC",
"L-BFGS-B",
"SLSQP",
"trust-constr",
"Differential Evolution",
"Dual Annealing",
"SHGO",
]
_supported_deriv_free = [
"Powell",
"COBYLA",
"Nelder-Mead",
"SLSQP",
"trust-constr",
]
# Validate arguments
if bounded:
if optimizer not in _supported_bounds:
raise ValueError(
f"Bounded optimization is only supported by "
f"'{_supported_bounds}', not '{optimizer}'."
)
# This has to be done before setting p0
self.ensure_parameters_in_bounds()
# Check validity of loss_function argument
if callable(loss_function):
loss_function = partial(loss_function, self)
elif loss_function not in _supported_losses:
raise ValueError(
f"loss_function must be one of {_supported_losses} "
f"or callable, not '{loss_function}'"
)
elif loss_function != "ls" and optimizer in ["lm", "trf", "dogbox", "odr"]:
raise NotImplementedError(
f"`optimizer='{optimizer}'` only supports "
"least-squares fitting (`loss_function='ls'`)"
)
# Initialize print_info
if print_info:
to_print = [
"Fit info:",
f" optimizer={optimizer}",
f" loss_function={loss_function}",
f" bounded={bounded}",
f" grad={grad}",
]
# Don't let user pass "jac" kwarg since
# it will clash with "grad" argument
jac = kwargs.pop("jac", None)
if jac:
_logger.warning(
f"`jac={jac}` keyword argument is not supported. "
f"Please use `grad={jac}` instead."
)
grad = jac
# Check validity of grad and fd_scheme arguments
if grad == "analytical":
_has_gradient, _jac_err_msg = self._check_analytical_jacobian()
if not _has_gradient:
# Alert the user that analytical gradients
# are not supported (and the reason why)
raise ValueError(f"`grad='analytical' is not supported: {_jac_err_msg}")
elif callable(grad):
grad = partial(grad, self)
elif grad == "fd":
if optimizer in ["lm", "odr"]:
grad = None
elif optimizer in _supported_deriv_free:
# Setting it to None here avoids unnecessary warnings
# from `scipy.optimize.minimize`
grad = None
else:
if fd_scheme not in _supported_fd_schemes:
raise ValueError(
"`fd_scheme` must be one of "
f"{_supported_fd_schemes}, not '{fd_scheme}'"
)
grad = fd_scheme
elif grad is None:
if optimizer in ["lm", "trf", "dogbox"]:
# `scipy.optimize.least_squares` does not accept None as
# an argument. `scipy.optimize.leastsq` will ALWAYS estimate
# the Jacobian even if Dfun=None. `mpfit` can support no
# differentiation, but for consistency across all three
# we enforce estimation below, and raise an error here.
raise ValueError(
f"`optimizer='{optimizer}'` does not support `grad=None`."
)
else:
raise ValueError(
"`grad` must be one of ['analytical', callable, None], not "
f"'{grad}'."
)
with cm(update_on_resume=True):
self.p_std = None
self._set_p0()
old_p0 = self.p0
if ext_bounding:
self._enable_ext_bounding()
# Get weights if metadata.Signal.Noise_properties.variance
# has been set, otherwise this returns None
weights = self._convert_variance_to_weights()
if weights is not None and loss_function == "ML-poisson":
# The attribute ``metadata.Signal.Noise_properties.variance`` is set,
# but weighted fitting is not supported for `loss_function='ml_poisson'`.
# Will proceed with unweighted fitting.
weights = None
args = (self.signal()[np.where(self.channel_switches)], weights)
if optimizer == "lm":
if bounded:
# Bounded Levenberg-Marquardt algorithm is supported
# using the `mpfit` function (bundled with HyperSpy)
self._set_mpfit_parameters_info(bounded=bounded)
# We enforce estimation of the Jacobian if no
# analytical gradients available for consistency
# with `scipy.optimize.leastsq`
auto_deriv = 0 if grad == "analytical" else 1
res = mpfit(
self._errfunc4mpfit,
self.p0[:],
parinfo=self.mpfit_parinfo,
functkw={
"y": self.signal()[self.channel_switches],
"weights": weights,
},
autoderivative=auto_deriv,
quiet=1,
**kwargs,
)
# Return as an OptimizeResult object
self.fit_output = res.optimize_result
self.p0 = self.fit_output.x
ysize = len(self.fit_output.x) + self.fit_output.dof
cost = self.fit_output.fnorm
pcov = self.fit_output.perror ** 2
# Calculate estimated parameter standard deviation
self.p_std = self._calculate_parameter_std(pcov, cost, ysize)
else:
# Unbounded Levenberg-Marquardt algorithm is supported
# using the `scipy.optimize.leastsq` function. Note that
# Dfun=None means the gradient is always estimated here.
grad = self._jacobian if grad == "analytical" else None
res = leastsq(
self._errfunc,
self.p0[:],
Dfun=grad,
col_deriv=1,
args=args,
full_output=True,
**kwargs,
)
self.fit_output = OptimizeResult(
x=res[0],
covar=res[1],
fun=res[2]["fvec"],
nfev=res[2]["nfev"],
success=res[4] in [1, 2, 3, 4],
status=res[4],
message=res[3],
)
self.p0 = self.fit_output.x
ysize = len(self.fit_output.fun)
cost = np.sum(self.fit_output.fun ** 2)
pcov = self.fit_output.covar
# Calculate estimated parameter standard deviation
self.p_std = self._calculate_parameter_std(pcov, cost, ysize)
elif optimizer in ["trf", "dogbox"]:
self._set_boundaries(bounded=bounded)
def _wrap_jac(*args, **kwargs):
# Our Jacobian function computes derivatives along
# columns, so we need the transpose instead here
return self._jacobian(*args, **kwargs).T
grad = _wrap_jac if grad == "analytical" else grad
self.fit_output = least_squares(
self._errfunc,
self.p0[:],
args=args,
bounds=self._bounds_as_tuple(),
jac=grad,
method=optimizer,
**kwargs,
)
self.p0 = self.fit_output.x
ysize = len(self.fit_output.fun)
jac = self.fit_output.jac
cost = 2 * self.fit_output.cost
# Do Moore-Penrose inverse, discarding zero singular values
# to get pcov (as per scipy.optimize.curve_fit())
_, s, VT = svd(jac, full_matrices=False)
threshold = np.finfo(float).eps * max(jac.shape) * s[0]
s = s[s > threshold]
VT = VT[: s.size]
pcov = np.dot(VT.T / s ** 2, VT)
# Calculate estimated parameter standard deviation
self.p_std = self._calculate_parameter_std(pcov, cost, ysize)
elif optimizer == "odr":
if not hasattr(self, "axis"):
raise NotImplementedError(
"`optimizer='odr'` is not implemented for Model2D"
)
odr_jacobian = self._jacobian4odr if grad == "analytical" else None
modelo = odr.Model(fcn=self._function4odr, fjacb=odr_jacobian)
mydata = odr.RealData(
self.axis.axis[np.where(self.channel_switches)],
self.signal()[np.where(self.channel_switches)],
sx=None,
sy=(1.0 / weights if weights is not None else None),
)
myodr = odr.ODR(mydata, modelo, beta0=self.p0[:], **kwargs)
res = myodr.run()
dd = {
"x": res.beta,
"perror": res.sd_beta,
"covar": res.cov_beta,
}
if hasattr(res, "info"):
dd["status"] = res.info
dd["message"] = ", ".join(res.stopreason)
# Note that a value of 5 means maximum iterations reached
dd["success"] = (res.info >= 0) and (res.info < 4)
self.fit_output = OptimizeResult(**dd)
self.p0 = self.fit_output.x
self.p_std = self.fit_output.perror
else:
# scipy.optimize.* functions
if loss_function == "ls":
f_min = self._errfunc_sq
f_der = self._gradient_ls if grad == "analytical" else grad
elif loss_function == "ML-poisson":
f_min = self._poisson_likelihood_function
f_der = self._gradient_ml if grad == "analytical" else grad
elif loss_function == "huber":
f_min = self._huber_loss_function
f_der = self._gradient_huber if grad == "analytical" else grad
huber_delta = kwargs.pop("huber_delta", 1.0)
args = args + (huber_delta,)
elif callable(loss_function):
f_min = loss_function
f_der = grad
self._set_boundaries(bounded=bounded)
if optimizer in _supported_global:
de_b = self._bounds_as_tuple()
if np.any(~np.isfinite(de_b)):
raise ValueError(
"Finite upper and lower bounds must be specified "
"using `bmin/bmax` for every free parameter and "
"`bounded=True` needs to be set as argument of "
f"`m.fit()` when using `optimizer='{optimizer}'`."
)
self.fit_output = _supported_global[optimizer](
f_min, de_b, args=args, **kwargs
)
else:
self.fit_output = minimize(
f_min,
self.p0,
jac=f_der,
args=args,
method=optimizer,
bounds=self.free_parameters_boundaries,
**kwargs,
)
self.p0 = self.fit_output.x
if np.iterable(self.p0) == 0:
self.p0 = (self.p0,)
self._fetch_values_from_p0(p_std=self.p_std)
self.store_current_values()
self._calculate_chisq()
self._set_current_degrees_of_freedom()
if ext_bounding:
self._disable_ext_bounding()
if np.any(old_p0 != self.p0):
self.events.fitted.trigger(self)
# Print details about the fit we just performed
if print_info:
output_print = copy.copy(self.fit_output)
# Drop these as they can be large (== size of data array)
output_print.pop("fun", None)
output_print.pop("jac", None)
to_print.extend(["Fit result:", output_print])
print("\n".join([str(pr) for pr in to_print]))
# Check if the optimization actually succeeded
success = self.fit_output.get("success", None)
if success is False:
message = self.fit_output.get("message", "Unknown reason")
_logger.warning(f"`m.fit()` did not exit successfully. Reason: {message}")
# Return info
if return_info:
return self.fit_output
else:
return None
fit.__doc__ %= FIT_PARAMETERS_ARG
def multifit(
self,
mask=None,
fetch_only_fixed=False,
autosave=False,
autosave_every=10,
show_progressbar=None,
interactive_plot=False,
iterpath=None,
**kwargs,
):
"""Fit the data to the model at all positions of the navigation dimensions.
Parameters
----------
mask : np.ndarray, optional
To mask (i.e. do not fit) at certain position, pass a boolean
numpy.array, where True indicates that the data will NOT be
fitted at the given position.
fetch_only_fixed : bool, default False
If True, only the fixed parameters values will be updated
when changing the positon.
autosave : bool, default False
If True, the result of the fit will be saved automatically
with a frequency defined by autosave_every.
autosave_every : int, default 10
Save the result of fitting every given number of spectra.
%s
interactive_plot : bool, default False
If True, update the plot for every position as they are processed.
Note that this slows down the fitting by a lot, but it allows for
interactive monitoring of the fitting (if in interactive mode).
iterpath : {None, "flyback", "serpentine"}, default None
If "flyback":
At each new row the index begins at the first column,
in accordance with the way :py:class:`numpy.ndindex` generates indices.
If "serpentine":
Iterate through the signal in a serpentine, "snake-game"-like
manner instead of beginning each new row at the first index.
Works for n-dimensional navigation space, not just 2D.
If None:
Currently ``None -> "flyback"``. The default argument will use
the ``"flyback"`` iterpath, but shows a warning that this will
change to ``"serpentine"`` in version 2.0.
**kwargs : keyword arguments
Any extra keyword argument will be passed to the fit method.
See the documentation for :py:meth:`~hyperspy.model.BaseModel.fit`
for a list of valid arguments.
Returns
-------
None
See Also
--------
* :py:meth:`~hyperspy.model.BaseModel.fit`
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
if autosave:
fd, autosave_fn = tempfile.mkstemp(
prefix="hyperspy_autosave-", dir=".", suffix=".npz"
)
os.close(fd)
autosave_fn = autosave_fn[:-4]
_logger.info(
f"Autosaving every {autosave_every} pixels to {autosave_fn}.npz. "
"When multifit finishes, this file will be deleted."
)
if mask is not None and (
mask.shape != tuple(self.axes_manager._navigation_shape_in_array)
):
raise ValueError(
"The mask must be a numpy array of boolean type with "
f"shape: {self.axes_manager._navigation_shape_in_array}"
)
if iterpath is None:
if self.axes_manager.iterpath == "flyback":
# flyback is set by default in axes_manager.iterpath on signal creation
warnings.warn(
"The `iterpath` default will change from 'flyback' to 'serpentine' "
"in HyperSpy version 2.0. Change the 'iterpath' argument to other than "
"None to suppress this warning.",
VisibleDeprecationWarning,
)
# otherwise use whatever is set at m.axes_manager.iterpath
else:
self.axes_manager.iterpath = iterpath
masked_elements = 0 if mask is None else mask.sum()
maxval = self.axes_manager._get_iterpath_size(masked_elements)
show_progressbar = show_progressbar and (maxval != 0)
i = 0
with self.axes_manager.events.indices_changed.suppress_callback(
self.fetch_stored_values
):
if interactive_plot:
outer = dummy_context_manager
inner = self.suspend_update
else:
outer = self.suspend_update
inner = dummy_context_manager
with outer(update_on_resume=True):
with progressbar(
total=maxval, disable=not show_progressbar, leave=True
) as pbar:
for index in self.axes_manager:
with inner(update_on_resume=True):
if mask is None or not mask[index[::-1]]:
# first check if model has set initial values in
# parameters.map['values'][indices],
# otherwise use values from previous fit
self.fetch_stored_values(only_fixed=fetch_only_fixed)
self.fit(**kwargs)
i += 1
pbar.update(1)
if autosave and i % autosave_every == 0:
self.save_parameters2file(autosave_fn)
# Trigger the indices_changed event to update to current indices,
# since the callback was suppressed
self.axes_manager.events.indices_changed.trigger(self.axes_manager)
if autosave is True:
_logger.info(f"Deleting temporary file: {autosave_fn}.npz")
os.remove(autosave_fn + ".npz")
multifit.__doc__ %= (SHOW_PROGRESSBAR_ARG)
def save_parameters2file(self, filename):
"""Save the parameters array in binary format.
The data is saved to a single file in numpy's uncompressed ``.npz``
format.
Parameters
----------
filename : str
See Also
--------
load_parameters_from_file, export_results
Notes
-----
This method can be used to save the current state of the model in a way
that can be loaded back to recreate the it using `load_parameters_from
file`. Actually, as of HyperSpy 0.8 this is the only way to do so.
However, this is known to be brittle. For example see
https://github.com/hyperspy/hyperspy/issues/341.
"""
kwds = {}
i = 0
for component in self:
cname = component.name.lower().replace(' ', '_')
for param in component.parameters:
pname = param.name.lower().replace(' ', '_')
kwds['%s_%s.%s' % (i, cname, pname)] = param.map
i += 1
np.savez(filename, **kwds)
def load_parameters_from_file(self, filename):
"""Loads the parameters array from a binary file written with the
'save_parameters2file' function.
Parameters
---------
filename : str
See Also
--------
save_parameters2file, export_results
Notes
-----
In combination with `save_parameters2file`, this method can be used to
recreate a model stored in a file. Actually, before HyperSpy 0.8 this
is the only way to do so. However, this is known to be brittle. For
example see https://github.com/hyperspy/hyperspy/issues/341.
"""
f = np.load(filename)
i = 0
for component in self: # Cut the parameters list
cname = component.name.lower().replace(' ', '_')
for param in component.parameters:
pname = param.name.lower().replace(' ', '_')
param.map = f['%s_%s.%s' % (i, cname, pname)]
i += 1
self.fetch_stored_values()
def assign_current_values_to_all(self, components_list=None, mask=None):
"""Set parameter values for all positions to the current ones.
Parameters
----------
component_list : list of components, optional
If a list of components is given, the operation will be performed
only in the value of the parameters of the given components.
The components can be specified by name, index or themselves.
mask : boolean numpy array or None, optional
The operation won't be performed where mask is True.
"""
if components_list is None:
components_list = []
for comp in self:
if comp.active:
components_list.append(comp)
else:
components_list = [self._get_component(x) for x in components_list]
for comp in components_list:
for parameter in comp.parameters:
parameter.assign_current_value_to_all(mask=mask)
def _enable_ext_bounding(self, components=None):
"""
"""
if components is None:
components = self
for component in components:
for parameter in component.parameters:
parameter.ext_bounded = True
def _disable_ext_bounding(self, components=None):
"""
"""
if components is None:
components = self
for component in components:
for parameter in component.parameters:
parameter.ext_bounded = False
def export_results(self, folder=None, format="hspy", save_std=False,
only_free=True, only_active=True):
"""Export the results of the parameters of the model to the desired
folder.
Parameters
----------
folder : str or None
The path to the folder where the file will be saved. If `None` the
current folder is used by default.
format : str
The extension of the file format. It must be one of the
fileformats supported by HyperSpy. The default is "hspy".
save_std : bool
If True, also the standard deviation will be saved.
only_free : bool
If True, only the value of the parameters that are free will be
exported.
only_active : bool
If True, only the value of the active parameters will be exported.
Notes
-----
The name of the files will be determined by each the Component and
each Parameter name attributes. Therefore, it is possible to customise
the file names modify the name attributes.
"""
for component in self:
if only_active is False or component.active:
component.export(folder=folder, format=format,
save_std=save_std, only_free=only_free)
def plot_results(self, only_free=True, only_active=True):
"""Plot the value of the parameters of the model
Parameters
----------
only_free : bool
If True, only the value of the parameters that are free will be
plotted.
only_active : bool
If True, only the value of the active parameters will be plotted.
Notes
-----
The name of the files will be determined by each the Component and
each Parameter name attributes. Therefore, it is possible to customise
the file names modify the name attributes.
"""
for component in self:
if only_active is False or component.active:
component.plot(only_free=only_free)
def print_current_values(self, only_free=False, only_active=False,
component_list=None, fancy=True):
"""Prints the current values of the parameters of all components.
Parameters
----------
only_free : bool
If True, only components with free parameters will be printed. Within these,
only parameters which are free will be printed.
only_active : bool
If True, only values of active components will be printed
component_list : None or list of components.
If None, print all components.
fancy : bool
If True, attempts to print using html rather than text in the notebook.
"""
if fancy:
display(current_model_values(
model=self, only_free=only_free, only_active=only_active,
component_list=component_list))
else:
display_pretty(current_model_values(
model=self, only_free=only_free, only_active=only_active,
component_list=component_list))
def set_parameters_not_free(self, component_list=None,
parameter_name_list=None):
"""
Sets the parameters in a component in a model to not free.
Parameters
----------
component_list : None, or list of hyperspy components, optional
If None, will apply the function to all components in the model.
If list of components, will apply the functions to the components
in the list. The components can be specified by name, index or
themselves.
parameter_name_list : None or list of strings, optional
If None, will set all the parameters to not free.
If list of strings, will set all the parameters with the same name
as the strings in parameter_name_list to not free.
Examples
--------
>>> v1 = hs.model.components1D.Voigt()
>>> m.append(v1)
>>> m.set_parameters_not_free()
>>> m.set_parameters_not_free(component_list=[v1],
parameter_name_list=['area','centre'])
See also
--------
set_parameters_free
hyperspy.component.Component.set_parameters_free
hyperspy.component.Component.set_parameters_not_free
"""
if not component_list:
component_list = []
for _component in self:
component_list.append(_component)
else:
component_list = [self._get_component(x) for x in component_list]
for _component in component_list:
_component.set_parameters_not_free(parameter_name_list)
def set_parameters_free(self, component_list=None,
parameter_name_list=None):
"""
Sets the parameters in a component in a model to free.
Parameters
----------
component_list : None, or list of hyperspy components, optional
If None, will apply the function to all components in the model.
If list of components, will apply the functions to the components
in the list. The components can be specified by name, index or
themselves.
parameter_name_list : None or list of strings, optional
If None, will set all the parameters to not free.
If list of strings, will set all the parameters with the same name
as the strings in parameter_name_list to not free.
Examples
--------
>>> v1 = hs.model.components1D.Voigt()
>>> m.append(v1)
>>> m.set_parameters_free()
>>> m.set_parameters_free(component_list=[v1],
parameter_name_list=['area','centre'])
See also
--------
set_parameters_not_free
hyperspy.component.Component.set_parameters_free
hyperspy.component.Component.set_parameters_not_free
"""
if not component_list:
component_list = []
for _component in self:
component_list.append(_component)
else:
component_list = [self._get_component(x) for x in component_list]
for _component in component_list:
_component.set_parameters_free(parameter_name_list)
def set_parameters_value(
self,
parameter_name,
value,
component_list=None,
only_current=False):
"""
Sets the value of a parameter in components in a model to a specified
value
Parameters
----------
parameter_name : string
Name of the parameter whose value will be changed
value : number
The new value of the parameter
component_list : list of hyperspy components, optional
A list of components whose parameters will changed. The components
can be specified by name, index or themselves.
only_current : bool, default False
If True, will only change the parameter value at the current
position in the model.
If False, will change the parameter value for all the positions.
Examples
--------
>>> v1 = hs.model.components1D.Voigt()
>>> v2 = hs.model.components1D.Voigt()
>>> m.extend([v1,v2])
>>> m.set_parameters_value('area', 5)
>>> m.set_parameters_value('area', 5, component_list=[v1])
>>> m.set_parameters_value('area', 5, component_list=[v1],
only_current=True)
"""
if not component_list:
component_list = []
for _component in self:
component_list.append(_component)
else:
component_list = [self._get_component(x) for x in component_list]
for _component in component_list:
for _parameter in _component.parameters:
if _parameter.name == parameter_name:
if only_current:
_parameter.value = value
_parameter.store_current_value_in_array()
else:
_parameter.value = value
_parameter.assign_current_value_to_all()
def as_dictionary(self, fullcopy=True):
"""Returns a dictionary of the model, including all components, degrees
of freedom (dof) and chi-squared (chisq) with values.
Parameters
----------
fullcopy : bool (optional, True)
Copies of objects are stored, not references. If any found,
functions will be pickled and signals converted to dictionaries
Returns
-------
dictionary : dict
A dictionary including at least the following fields:
* components: a list of dictionaries of components, one per
component
* _whitelist: a dictionary with keys used as references for saved
attributes, for more information, see
:py:func:`~hyperspy.misc.export_dictionary.export_to_dictionary`
* any field from _whitelist.keys()
Examples
--------
>>> s = signals.Signal1D(np.random.random((10,100)))
>>> m = s.create_model()
>>> l1 = components1d.Lorentzian()
>>> l2 = components1d.Lorentzian()
>>> m.append(l1)
>>> m.append(l2)
>>> d = m.as_dictionary()
>>> m2 = s.create_model(dictionary=d)
"""
dic = {'components': [c.as_dictionary(fullcopy) for c in self]}
export_to_dictionary(self, self._whitelist, dic, fullcopy)
def remove_empty_numpy_strings(dic):
for k, v in dic.items():
if isinstance(v, dict):
remove_empty_numpy_strings(v)
elif isinstance(v, list):
for vv in v:
if isinstance(vv, dict):
remove_empty_numpy_strings(vv)
elif isinstance(vv, np.string_) and len(vv) == 0:
vv = ''
elif isinstance(v, np.string_) and len(v) == 0:
del dic[k]
dic[k] = ''
remove_empty_numpy_strings(dic)
return dic
def set_component_active_value(
self, value, component_list=None, only_current=False):
"""
Sets the component 'active' parameter to a specified value
Parameters
----------
value : bool
The new value of the 'active' parameter
component_list : list of hyperspy components, optional
A list of components whose parameters will changed. The components
can be specified by name, index or themselves.
only_current : bool, default False
If True, will only change the parameter value at the current
position in the model.
If False, will change the parameter value for all the positions.
Examples
--------
>>> v1 = hs.model.components1D.Voigt()
>>> v2 = hs.model.components1D.Voigt()
>>> m.extend([v1,v2])
>>> m.set_component_active_value(False)
>>> m.set_component_active_value(True, component_list=[v1])
>>> m.set_component_active_value(False, component_list=[v1],
only_current=True)
"""
if not component_list:
component_list = []
for _component in self:
component_list.append(_component)
else:
component_list = [self._get_component(x) for x in component_list]
for _component in component_list:
_component.active = value
if _component.active_is_multidimensional:
if only_current:
_component._active_array[
self.axes_manager.indices[::-1]] = value
else:
_component._active_array.fill(value)
def __getitem__(self, value):
"""x.__getitem__(y) <==> x[y]"""
if isinstance(value, str):
component_list = []
for component in self:
if component.name:
if component.name == value:
component_list.append(component)
elif component.__class__.__name__ == value:
component_list.append(component)
if component_list:
if len(component_list) == 1:
return component_list[0]
else:
raise ValueError(
"There are several components with "
"the name \"" + str(value) + "\"")
else:
raise ValueError(
"Component name \"" + str(value) +
"\" not found in model")
else:
return list.__getitem__(self, value)
def create_samfire(self, workers=None, setup=True, **kwargs):
"""Creates a SAMFire object.
Parameters
----------
workers : {None, int}
the number of workers to initialise.
If zero, all computations will be done serially.
If None (default), will attempt to use (number-of-cores - 1),
however if just one core is available, will use one worker.
setup : bool
if the setup should be run upon initialization.
**kwargs
Any that will be passed to the _setup and in turn SamfirePool.
"""
from hyperspy.samfire import Samfire
return Samfire(self, workers=workers,
setup=setup, **kwargs)
class ModelSpecialSlicers(object):
def __init__(self, model, isNavigation):
self.isNavigation = isNavigation
self.model = model
def __getitem__(self, slices):
array_slices = self.model.signal._get_array_slices(
slices,
self.isNavigation)
_signal = self.model.signal._slicer(slices, self.isNavigation)
# TODO: for next major release, change model creation defaults to not
# automate anything. For now we explicitly look for "auto_" kwargs and
# disable them:
import inspect
pars = inspect.signature(_signal.create_model).parameters
kwargs = {key: False for key in pars.keys() if key.startswith('auto_')}
_model = _signal.create_model(**kwargs)
dims = (self.model.axes_manager.navigation_dimension,
self.model.axes_manager.signal_dimension)
if self.isNavigation:
_model.channel_switches[:] = self.model.channel_switches
else:
_model.channel_switches[:] = \
np.atleast_1d(
self.model.channel_switches[
tuple(array_slices[-dims[1]:])])
twin_dict = {}
for comp in self.model:
init_args = {}
for k, v in comp._whitelist.items():
if v is None:
continue
flags_str, value = v
if 'init' in parse_flag_string(flags_str):
init_args[k] = value
_model.append(comp.__class__(**init_args))
copy_slice_from_whitelist(self.model,
_model,
dims,
(slices, array_slices),
self.isNavigation,
)
for co, cn in zip(self.model, _model):
copy_slice_from_whitelist(co,
cn,
dims,
(slices, array_slices),
self.isNavigation)
if _model.axes_manager.navigation_size < 2:
if co.active_is_multidimensional:
cn.active = co._active_array[array_slices[:dims[0]]]
for po, pn in zip(co.parameters, cn.parameters):
copy_slice_from_whitelist(po,
pn,
dims,
(slices, array_slices),
self.isNavigation)
twin_dict[id(po)] = ([id(i) for i in list(po._twins)], pn)
for k in twin_dict.keys():
for tw_id in twin_dict[k][0]:
twin_dict[tw_id][1].twin = twin_dict[k][1]
_model.chisq.data = _model.chisq.data.copy()
_model.dof.data = _model.dof.data.copy()
_model.fetch_stored_values() # to update and have correct values
if not self.isNavigation:
for _ in _model.axes_manager:
_model._calculate_chisq()
return _model
# vim: textwidth=80
|
thomasaarholt/hyperspy
|
hyperspy/model.py
|
Python
|
gpl-3.0
| 82,634
|
[
"Gaussian"
] |
d6480f42ef71cc011905bc68561abb229202a77737e0f9dcfd3a92ad5a542107
|
"""A simple example of how to use IPython.config.application.Application.
This should serve as a simple example that shows how the IPython config
system works. The main classes are:
* IPython.config.configurable.Configurable
* IPython.config.configurable.SingletonConfigurable
* IPython.config.loader.Config
* IPython.config.application.Application
To see the command line option help, run this program from the command line::
$ python appconfig.py -h
To make one of your classes configurable (from the command line and config
files) inherit from Configurable and declare class attributes as traits (see
classes Foo and Bar below). To make the traits configurable, you will need
to set the following options:
* ``config``: set to ``True`` to make the attribute configurable.
* ``shortname``: by default, configurable attributes are set using the syntax
"Classname.attributename". At the command line, this is a bit verbose, so
we allow "shortnames" to be declared. Setting a shortname is optional, but
when you do this, you can set the option at the command line using the
syntax: "shortname=value".
* ``help``: set the help string to display a help message when the ``-h``
option is given at the command line. The help string should be valid ReST.
When the config attribute of an Application is updated, it will fire all of
the trait's events for all of the config=True attributes.
"""
import sys
from IPython.config.configurable import Configurable
from IPython.config.application import Application
from IPython.utils.traitlets import (
Bool, Unicode, Int, Float, List, Dict
)
class Foo(Configurable):
"""A class that has configurable, typed attributes.
"""
i = Int(0, config=True, help="The integer i.")
j = Int(1, config=True, help="The integer j.")
name = Unicode(u'Brian', config=True, help="First name.")
class Bar(Configurable):
enabled = Bool(True, config=True, help="Enable bar.")
class MyApp(Application):
name = Unicode(u'myapp')
running = Bool(False, config=True,
help="Is the app running?")
classes = List([Bar, Foo])
config_file = Unicode(u'', config=True,
help="Load this config file")
aliases = Dict(dict(i='Foo.i',j='Foo.j',name='Foo.name', running='MyApp.running',
enabled='Bar.enabled', log_level='MyApp.log_level'))
flags = Dict(dict(enable=({'Bar': {'enabled' : True}}, "Enable Bar"),
disable=({'Bar': {'enabled' : False}}, "Disable Bar"),
debug=({'MyApp':{'log_level':10}}, "Set loglevel to DEBUG")
))
def init_foo(self):
# Pass config to other classes for them to inherit the config.
self.foo = Foo(config=self.config)
def init_bar(self):
# Pass config to other classes for them to inherit the config.
self.bar = Bar(config=self.config)
def initialize(self, argv=None):
self.parse_command_line(argv)
if self.config_file:
self.load_config_file(self.config_file)
self.init_foo()
self.init_bar()
def start(self):
print "app.config:"
print self.config
def main():
app = MyApp()
app.initialize()
app.start()
if __name__ == "__main__":
main()
|
cloud9ers/gurumate
|
environment/share/doc/ipython/examples/core/appconfig.py
|
Python
|
lgpl-3.0
| 3,307
|
[
"Brian"
] |
6a642e5169de9166882b384776f67651b18de174c5fe80d0d9d04b5c0b106187
|
# -*- encoding: utf-8
from sqlalchemy.testing import eq_, engines
from sqlalchemy import *
from sqlalchemy import exc
from sqlalchemy.dialects.mssql import pyodbc, pymssql
from sqlalchemy.engine import url
from sqlalchemy.testing import fixtures
from sqlalchemy import testing
from sqlalchemy.testing import assert_raises_message, assert_warnings
from sqlalchemy.testing.mock import Mock
class ParseConnectTest(fixtures.TestBase):
def test_pyodbc_connect_dsn_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url('mssql://mydsn')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;Trusted_Connection=Yes'], {}], connection)
def test_pyodbc_connect_old_style_dsn_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url('mssql:///?dsn=mydsn')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;Trusted_Connection=Yes'], {}], connection)
def test_pyodbc_connect_dsn_non_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url('mssql://username:password@mydsn')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;UID=username;PWD=password'], {}], connection)
def test_pyodbc_connect_dsn_extra(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@mydsn/?LANGUAGE=us_'
'english&foo=bar')
connection = dialect.create_connect_args(u)
dsn_string = connection[0][0]
assert ";LANGUAGE=us_english" in dsn_string
assert ";foo=bar" in dsn_string
def test_pyodbc_hostname(self):
dialect = pyodbc.dialect()
u = url.make_url('mssql://username:password@hostspec/database?driver=SQL+Server')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI'
'D=username;PWD=password'], {}], connection)
def test_pyodbc_host_no_driver(self):
dialect = pyodbc.dialect()
u = url.make_url('mssql://username:password@hostspec/database')
def go():
return dialect.create_connect_args(u)
connection = assert_warnings(
go,
["No driver name specified; this is expected by "
"PyODBC when using DSN-less connections"])
eq_([['Server=hostspec;Database=database;UI'
'D=username;PWD=password'], {}], connection)
def test_pyodbc_connect_comma_port(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@hostspec:12345/data'
'base?driver=SQL Server')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec,12345;Database=datab'
'ase;UID=username;PWD=password'], {}], connection)
def test_pyodbc_connect_config_port(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@hostspec/database?p'
'ort=12345&driver=SQL+Server')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI'
'D=username;PWD=password;port=12345'], {}], connection)
def test_pyodbc_extra_connect(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@hostspec/database?L'
'ANGUAGE=us_english&foo=bar&driver=SQL+Server')
connection = dialect.create_connect_args(u)
eq_(connection[1], {})
eq_(connection[0][0]
in ('DRIVER={SQL Server};Server=hostspec;Database=database;'
'UID=username;PWD=password;foo=bar;LANGUAGE=us_english',
'DRIVER={SQL Server};Server=hostspec;Database=database;UID='
'username;PWD=password;LANGUAGE=us_english;foo=bar'), True)
def test_pyodbc_odbc_connect(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql:///?odbc_connect=DRIVER%3D%7BSQL+Server'
'%7D%3BServer%3Dhostspec%3BDatabase%3Ddatabase'
'%3BUID%3Dusername%3BPWD%3Dpassword')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI'
'D=username;PWD=password'], {}], connection)
def test_pyodbc_odbc_connect_with_dsn(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql:///?odbc_connect=dsn%3Dmydsn%3BDatabase'
'%3Ddatabase%3BUID%3Dusername%3BPWD%3Dpassword'
)
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;Database=database;UID=username;PWD=password'],
{}], connection)
def test_pyodbc_odbc_connect_ignores_other_values(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://userdiff:passdiff@localhost/dbdiff?od'
'bc_connect=DRIVER%3D%7BSQL+Server%7D%3BServer'
'%3Dhostspec%3BDatabase%3Ddatabase%3BUID%3Duse'
'rname%3BPWD%3Dpassword')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI'
'D=username;PWD=password'], {}], connection)
def test_pymssql_port_setting(self):
dialect = pymssql.dialect()
u = \
url.make_url('mssql+pymssql://scott:tiger@somehost/test')
connection = dialect.create_connect_args(u)
eq_(
[[], {'host': 'somehost', 'password': 'tiger',
'user': 'scott', 'database': 'test'}], connection
)
u = \
url.make_url('mssql+pymssql://scott:tiger@somehost:5000/test')
connection = dialect.create_connect_args(u)
eq_(
[[], {'host': 'somehost:5000', 'password': 'tiger',
'user': 'scott', 'database': 'test'}], connection
)
def test_pymssql_disconnect(self):
dialect = pymssql.dialect()
for error in [
'Adaptive Server connection timed out',
'Net-Lib error during Connection reset by peer',
'message 20003',
'Error 10054',
'Not connected to any MS SQL server',
'Connection is closed'
]:
eq_(dialect.is_disconnect(error, None, None), True)
eq_(dialect.is_disconnect("not an error", None, None), False)
@testing.requires.mssql_freetds
def test_bad_freetds_warning(self):
engine = engines.testing_engine()
def _bad_version(connection):
return 95, 10, 255
engine.dialect._get_server_version_info = _bad_version
assert_raises_message(exc.SAWarning,
'Unrecognized server version info',
engine.connect)
class EngineFromConfigTest(fixtures.TestBase):
def test_legacy_schema_flag(self):
cfg = {
"sqlalchemy.url": "mssql://foodsn",
"sqlalchemy.legacy_schema_aliasing": "false"
}
e = engine_from_config(
cfg, module=Mock(version="MS SQL Server 11.0.92"))
eq_(e.dialect.legacy_schema_aliasing, False)
class VersionDetectionTest(fixtures.TestBase):
def test_pymssql_version(self):
dialect = pymssql.MSDialect_pymssql()
for vers in [
"Microsoft SQL Server Blah - 11.0.9216.62",
"Microsoft SQL Server (XYZ) - 11.0.9216.62 \n"
"Jul 18 2014 22:00:21 \nCopyright (c) Microsoft Corporation",
"Microsoft SQL Azure (RTM) - 11.0.9216.62 \n"
"Jul 18 2014 22:00:21 \nCopyright (c) Microsoft Corporation"
]:
conn = Mock(scalar=Mock(return_value=vers))
eq_(
dialect._get_server_version_info(conn),
(11, 0, 9216, 62)
)
|
wfxiang08/sqlalchemy
|
test/dialect/mssql/test_engine.py
|
Python
|
mit
| 8,017
|
[
"ASE"
] |
7a4d200f5d53c2a6be01bede1f296fcc5aa358eb779145e20958a77ce9bdba18
|
""" Test functions for stats module
WRITTEN BY LOUIS LUANGKESORN <lluang@yahoo.com> FOR THE STATS MODULE
BASED ON WILKINSON'S STATISTICS QUIZ
https://www.stanford.edu/~clint/bench/wilk.txt
Additional tests by a host of SciPy developers.
"""
from __future__ import division, print_function, absolute_import
import os
import sys
import warnings
from collections import namedtuple
from numpy.testing import (assert_, assert_equal,
assert_almost_equal, assert_array_almost_equal,
assert_array_equal, assert_approx_equal,
assert_allclose, assert_warns)
import pytest
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
import numpy.ma.testutils as mat
from numpy import array, arange, float32, float64, power
import numpy as np
import scipy.stats as stats
import scipy.stats.mstats as mstats
import scipy.stats.mstats_basic as mstats_basic
from scipy._lib._version import NumpyVersion
from scipy._lib.six import xrange
from .common_tests import check_named_results
from scipy.special import kv
from scipy.sparse.sputils import matrix
from scipy.integrate import quad
""" Numbers in docstrings beginning with 'W' refer to the section numbers
and headings found in the STATISTICS QUIZ of Leland Wilkinson. These are
considered to be essential functionality. True testing and
evaluation of a statistics package requires use of the
NIST Statistical test data. See McCoullough(1999) Assessing The Reliability
of Statistical Software for a test methodology and its
implementation in testing SAS, SPSS, and S-Plus
"""
# Datasets
# These data sets are from the nasty.dat sets used by Wilkinson
# For completeness, I should write the relevant tests and count them as failures
# Somewhat acceptable, since this is still beta software. It would count as a
# good target for 1.0 status
X = array([1,2,3,4,5,6,7,8,9], float)
ZERO = array([0,0,0,0,0,0,0,0,0], float)
BIG = array([99999991,99999992,99999993,99999994,99999995,99999996,99999997,
99999998,99999999], float)
LITTLE = array([0.99999991,0.99999992,0.99999993,0.99999994,0.99999995,0.99999996,
0.99999997,0.99999998,0.99999999], float)
HUGE = array([1e+12,2e+12,3e+12,4e+12,5e+12,6e+12,7e+12,8e+12,9e+12], float)
TINY = array([1e-12,2e-12,3e-12,4e-12,5e-12,6e-12,7e-12,8e-12,9e-12], float)
ROUND = array([0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5], float)
class TestTrimmedStats(object):
# TODO: write these tests to handle missing values properly
dprec = np.finfo(np.float64).precision
def test_tmean(self):
y = stats.tmean(X, (2, 8), (True, True))
assert_approx_equal(y, 5.0, significant=self.dprec)
y1 = stats.tmean(X, limits=(2, 8), inclusive=(False, False))
y2 = stats.tmean(X, limits=None)
assert_approx_equal(y1, y2, significant=self.dprec)
def test_tvar(self):
y = stats.tvar(X, limits=(2, 8), inclusive=(True, True))
assert_approx_equal(y, 4.6666666666666661, significant=self.dprec)
y = stats.tvar(X, limits=None)
assert_approx_equal(y, X.var(ddof=1), significant=self.dprec)
x_2d = arange(63, dtype=float64).reshape((9, 7))
y = stats.tvar(x_2d, axis=None)
assert_approx_equal(y, x_2d.var(ddof=1), significant=self.dprec)
y = stats.tvar(x_2d, axis=0)
assert_array_almost_equal(y[0], np.full((1, 7), 367.50000000), decimal=8)
y = stats.tvar(x_2d, axis=1)
assert_array_almost_equal(y[0], np.full((1, 9), 4.66666667), decimal=8)
y = stats.tvar(x_2d[3, :])
assert_approx_equal(y, 4.666666666666667, significant=self.dprec)
with suppress_warnings() as sup:
r = sup.record(RuntimeWarning, "Degrees of freedom <= 0 for slice.")
# Limiting some values along one axis
y = stats.tvar(x_2d, limits=(1, 5), axis=1, inclusive=(True, True))
assert_approx_equal(y[0], 2.5, significant=self.dprec)
# Limiting all values along one axis
y = stats.tvar(x_2d, limits=(0, 6), axis=1, inclusive=(True, True))
assert_approx_equal(y[0], 4.666666666666667, significant=self.dprec)
assert_equal(y[1], np.nan)
def test_tstd(self):
y = stats.tstd(X, (2, 8), (True, True))
assert_approx_equal(y, 2.1602468994692865, significant=self.dprec)
y = stats.tstd(X, limits=None)
assert_approx_equal(y, X.std(ddof=1), significant=self.dprec)
def test_tmin(self):
assert_equal(stats.tmin(4), 4)
x = np.arange(10)
assert_equal(stats.tmin(x), 0)
assert_equal(stats.tmin(x, lowerlimit=0), 0)
assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), 1)
x = x.reshape((5, 2))
assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), [2, 1])
assert_equal(stats.tmin(x, axis=1), [0, 2, 4, 6, 8])
assert_equal(stats.tmin(x, axis=None), 0)
x = np.arange(10.)
x[9] = np.nan
with suppress_warnings() as sup:
r = sup.record(RuntimeWarning, "invalid value*")
assert_equal(stats.tmin(x), np.nan)
assert_equal(stats.tmin(x, nan_policy='omit'), 0.)
assert_raises(ValueError, stats.tmin, x, nan_policy='raise')
assert_raises(ValueError, stats.tmin, x, nan_policy='foobar')
msg = "'propagate', 'raise', 'omit'"
with assert_raises(ValueError, match=msg):
stats.tmin(x, nan_policy='foo')
def test_tmax(self):
assert_equal(stats.tmax(4), 4)
x = np.arange(10)
assert_equal(stats.tmax(x), 9)
assert_equal(stats.tmax(x, upperlimit=9), 9)
assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), 8)
x = x.reshape((5, 2))
assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), [8, 7])
assert_equal(stats.tmax(x, axis=1), [1, 3, 5, 7, 9])
assert_equal(stats.tmax(x, axis=None), 9)
x = np.arange(10.)
x[6] = np.nan
with suppress_warnings() as sup:
r = sup.record(RuntimeWarning, "invalid value*")
assert_equal(stats.tmax(x), np.nan)
assert_equal(stats.tmax(x, nan_policy='omit'), 9.)
assert_raises(ValueError, stats.tmax, x, nan_policy='raise')
assert_raises(ValueError, stats.tmax, x, nan_policy='foobar')
def test_tsem(self):
y = stats.tsem(X, limits=(3, 8), inclusive=(False, True))
y_ref = np.array([4, 5, 6, 7, 8])
assert_approx_equal(y, y_ref.std(ddof=1) / np.sqrt(y_ref.size),
significant=self.dprec)
assert_approx_equal(stats.tsem(X, limits=[-1, 10]),
stats.tsem(X, limits=None),
significant=self.dprec)
class TestCorrPearsonr(object):
""" W.II.D. Compute a correlation matrix on all the variables.
All the correlations, except for ZERO and MISS, should be exactly 1.
ZERO and MISS should have undefined or missing correlations with the
other variables. The same should go for SPEARMAN correlations, if
your program has them.
"""
def test_pXX(self):
y = stats.pearsonr(X,X)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXBIG(self):
y = stats.pearsonr(X,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXLITTLE(self):
y = stats.pearsonr(X,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXHUGE(self):
y = stats.pearsonr(X,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXTINY(self):
y = stats.pearsonr(X,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXROUND(self):
y = stats.pearsonr(X,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGBIG(self):
y = stats.pearsonr(BIG,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGLITTLE(self):
y = stats.pearsonr(BIG,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGHUGE(self):
y = stats.pearsonr(BIG,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGTINY(self):
y = stats.pearsonr(BIG,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGROUND(self):
y = stats.pearsonr(BIG,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLELITTLE(self):
y = stats.pearsonr(LITTLE,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLEHUGE(self):
y = stats.pearsonr(LITTLE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLETINY(self):
y = stats.pearsonr(LITTLE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLEROUND(self):
y = stats.pearsonr(LITTLE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pHUGEHUGE(self):
y = stats.pearsonr(HUGE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pHUGETINY(self):
y = stats.pearsonr(HUGE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pHUGEROUND(self):
y = stats.pearsonr(HUGE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pTINYTINY(self):
y = stats.pearsonr(TINY,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pTINYROUND(self):
y = stats.pearsonr(TINY,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pROUNDROUND(self):
y = stats.pearsonr(ROUND,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_r_almost_exactly_pos1(self):
a = arange(3.0)
r, prob = stats.pearsonr(a, a)
assert_allclose(r, 1.0, atol=1e-15)
# With n = len(a) = 3, the error in prob grows like the
# square root of the error in r.
assert_allclose(prob, 0.0, atol=np.sqrt(2*np.spacing(1.0)))
def test_r_almost_exactly_neg1(self):
a = arange(3.0)
r, prob = stats.pearsonr(a, -a)
assert_allclose(r, -1.0, atol=1e-15)
# With n = len(a) = 3, the error in prob grows like the
# square root of the error in r.
assert_allclose(prob, 0.0, atol=np.sqrt(2*np.spacing(1.0)))
def test_basic(self):
# A basic test, with a correlation coefficient
# that is not 1 or -1.
a = array([-1, 0, 1])
b = array([0, 0, 3])
r, prob = stats.pearsonr(a, b)
assert_approx_equal(r, np.sqrt(3)/2)
assert_approx_equal(prob, 1/3)
def test_constant_input(self):
# Zero variance input
# See https://github.com/scipy/scipy/issues/3728
with assert_warns(stats.PearsonRConstantInputWarning):
r, p = stats.pearsonr([0.667, 0.667, 0.667], [0.123, 0.456, 0.789])
assert_equal(r, np.nan)
assert_equal(p, np.nan)
def test_near_constant_input(self):
# Near constant input (but not constant):
x = [2, 2, 2 + np.spacing(2)]
y = [3, 3, 3 + 6*np.spacing(3)]
with assert_warns(stats.PearsonRNearConstantInputWarning):
# r and p are garbage, so don't bother checking them in this case.
# (The exact value of r would be 1.)
r, p = stats.pearsonr(x, y)
def test_very_small_input_values(self):
# Very small values in an input. A naive implementation will
# suffer from underflow.
# See https://github.com/scipy/scipy/issues/9353
x = [0.004434375, 0.004756007, 0.003911996, 0.0038005, 0.003409971]
y = [2.48e-188, 7.41e-181, 4.09e-208, 2.08e-223, 2.66e-245]
r, p = stats.pearsonr(x,y)
# The expected values were computed using mpmath with 80 digits
# of precision.
assert_allclose(r, 0.7272930540750450)
assert_allclose(p, 0.1637805429533202)
def test_very_large_input_values(self):
# Very large values in an input. A naive implementation will
# suffer from overflow.
# See https://github.com/scipy/scipy/issues/8980
x = 1e90*np.array([0, 0, 0, 1, 1, 1, 1])
y = 1e90*np.arange(7)
r, p = stats.pearsonr(x, y)
# The expected values were computed using mpmath with 80 digits
# of precision.
assert_allclose(r, 0.8660254037844386)
assert_allclose(p, 0.011724811003954638)
def test_extremely_large_input_values(self):
# Extremely large values in x and y. These values would cause the
# product sigma_x * sigma_y to overflow if the two factors were
# computed independently.
x = np.array([2.3e200, 4.5e200, 6.7e200, 8e200])
y = np.array([1.2e199, 5.5e200, 3.3e201, 1.0e200])
r, p = stats.pearsonr(x, y)
# The expected values were computed using mpmath with 80 digits
# of precision.
assert_allclose(r, 0.351312332103289)
assert_allclose(p, 0.648687667896711)
def test_length_two_pos1(self):
# Inputs with length 2.
# See https://github.com/scipy/scipy/issues/7730
r, p = stats.pearsonr([1, 2], [3, 5])
assert_equal(r, 1)
assert_equal(p, 1)
def test_length_two_neg2(self):
# Inputs with length 2.
# See https://github.com/scipy/scipy/issues/7730
r, p = stats.pearsonr([2, 1], [3, 5])
assert_equal(r, -1)
assert_equal(p, 1)
def test_more_basic_examples(self):
x = [1, 2, 3, 4]
y = [0, 1, 0.5, 1]
r, p = stats.pearsonr(x, y)
# The expected values were computed using mpmath with 80 digits
# of precision.
assert_allclose(r, 0.674199862463242)
assert_allclose(p, 0.325800137536758)
x = [1, 2, 3]
y = [5, -4, -13]
r, p = stats.pearsonr(x, y)
# The expected r and p are exact.
assert_allclose(r, -1.0)
assert_allclose(p, 0.0, atol=1e-7)
def test_unequal_lengths(self):
x = [1, 2, 3]
y = [4, 5]
assert_raises(ValueError, stats.pearsonr, x, y)
def test_len1(self):
x = [1]
y = [2]
assert_raises(ValueError, stats.pearsonr, x, y)
class TestFisherExact(object):
"""Some tests to show that fisher_exact() works correctly.
Note that in SciPy 0.9.0 this was not working well for large numbers due to
inaccuracy of the hypergeom distribution (see #1218). Fixed now.
Also note that R and SciPy have different argument formats for their
hypergeometric distribution functions.
R:
> phyper(18999, 99000, 110000, 39000, lower.tail = FALSE)
[1] 1.701815e-09
"""
def test_basic(self):
fisher_exact = stats.fisher_exact
res = fisher_exact([[14500, 20000], [30000, 40000]])[1]
assert_approx_equal(res, 0.01106, significant=4)
res = fisher_exact([[100, 2], [1000, 5]])[1]
assert_approx_equal(res, 0.1301, significant=4)
res = fisher_exact([[2, 7], [8, 2]])[1]
assert_approx_equal(res, 0.0230141, significant=6)
res = fisher_exact([[5, 1], [10, 10]])[1]
assert_approx_equal(res, 0.1973244, significant=6)
res = fisher_exact([[5, 15], [20, 20]])[1]
assert_approx_equal(res, 0.0958044, significant=6)
res = fisher_exact([[5, 16], [20, 25]])[1]
assert_approx_equal(res, 0.1725862, significant=6)
res = fisher_exact([[10, 5], [10, 1]])[1]
assert_approx_equal(res, 0.1973244, significant=6)
res = fisher_exact([[5, 0], [1, 4]])[1]
assert_approx_equal(res, 0.04761904, significant=6)
res = fisher_exact([[0, 1], [3, 2]])[1]
assert_approx_equal(res, 1.0)
res = fisher_exact([[0, 2], [6, 4]])[1]
assert_approx_equal(res, 0.4545454545)
res = fisher_exact([[2, 7], [8, 2]])
assert_approx_equal(res[1], 0.0230141, significant=6)
assert_approx_equal(res[0], 4.0 / 56)
def test_precise(self):
# results from R
#
# R defines oddsratio differently (see Notes section of fisher_exact
# docstring), so those will not match. We leave them in anyway, in
# case they will be useful later on. We test only the p-value.
tablist = [
([[100, 2], [1000, 5]], (2.505583993422285e-001, 1.300759363430016e-001)),
([[2, 7], [8, 2]], (8.586235135736206e-002, 2.301413756522114e-002)),
([[5, 1], [10, 10]], (4.725646047336584e+000, 1.973244147157190e-001)),
([[5, 15], [20, 20]], (3.394396617440852e-001, 9.580440012477637e-002)),
([[5, 16], [20, 25]], (3.960558326183334e-001, 1.725864953812994e-001)),
([[10, 5], [10, 1]], (2.116112781158483e-001, 1.973244147157190e-001)),
([[10, 5], [10, 0]], (0.000000000000000e+000, 6.126482213438734e-002)),
([[5, 0], [1, 4]], (np.inf, 4.761904761904762e-002)),
([[0, 5], [1, 4]], (0.000000000000000e+000, 1.000000000000000e+000)),
([[5, 1], [0, 4]], (np.inf, 4.761904761904758e-002)),
([[0, 1], [3, 2]], (0.000000000000000e+000, 1.000000000000000e+000))
]
for table, res_r in tablist:
res = stats.fisher_exact(np.asarray(table))
np.testing.assert_almost_equal(res[1], res_r[1], decimal=11,
verbose=True)
@pytest.mark.slow
def test_large_numbers(self):
# Test with some large numbers. Regression test for #1401
pvals = [5.56e-11, 2.666e-11, 1.363e-11] # from R
for pval, num in zip(pvals, [75, 76, 77]):
res = stats.fisher_exact([[17704, 496], [1065, num]])[1]
assert_approx_equal(res, pval, significant=4)
res = stats.fisher_exact([[18000, 80000], [20000, 90000]])[1]
assert_approx_equal(res, 0.2751, significant=4)
def test_raises(self):
# test we raise an error for wrong shape of input.
assert_raises(ValueError, stats.fisher_exact,
np.arange(6).reshape(2, 3))
def test_row_or_col_zero(self):
tables = ([[0, 0], [5, 10]],
[[5, 10], [0, 0]],
[[0, 5], [0, 10]],
[[5, 0], [10, 0]])
for table in tables:
oddsratio, pval = stats.fisher_exact(table)
assert_equal(pval, 1.0)
assert_equal(oddsratio, np.nan)
def test_less_greater(self):
tables = (
# Some tables to compare with R:
[[2, 7], [8, 2]],
[[200, 7], [8, 300]],
[[28, 21], [6, 1957]],
[[190, 800], [200, 900]],
# Some tables with simple exact values
# (includes regression test for ticket #1568):
[[0, 2], [3, 0]],
[[1, 1], [2, 1]],
[[2, 0], [1, 2]],
[[0, 1], [2, 3]],
[[1, 0], [1, 4]],
)
pvals = (
# from R:
[0.018521725952066501, 0.9990149169715733],
[1.0, 2.0056578803889148e-122],
[1.0, 5.7284374608319831e-44],
[0.7416227, 0.2959826],
# Exact:
[0.1, 1.0],
[0.7, 0.9],
[1.0, 0.3],
[2./3, 1.0],
[1.0, 1./3],
)
for table, pval in zip(tables, pvals):
res = []
res.append(stats.fisher_exact(table, alternative="less")[1])
res.append(stats.fisher_exact(table, alternative="greater")[1])
assert_allclose(res, pval, atol=0, rtol=1e-7)
def test_gh3014(self):
# check if issue #3014 has been fixed.
# before, this would have risen a ValueError
odds, pvalue = stats.fisher_exact([[1, 2], [9, 84419233]])
class TestCorrSpearmanr(object):
""" W.II.D. Compute a correlation matrix on all the variables.
All the correlations, except for ZERO and MISS, should be exactly 1.
ZERO and MISS should have undefined or missing correlations with the
other variables. The same should go for SPEARMAN corelations, if
your program has them.
"""
def test_scalar(self):
y = stats.spearmanr(4., 2.)
assert_(np.isnan(y).all())
def test_uneven_lengths(self):
assert_raises(ValueError, stats.spearmanr, [1, 2, 1], [8, 9])
assert_raises(ValueError, stats.spearmanr, [1, 2, 1], 8)
def test_uneven_2d_shapes(self):
# Different number of columns should work - those just get concatenated.
np.random.seed(232324)
x = np.random.randn(4, 3)
y = np.random.randn(4, 2)
assert stats.spearmanr(x, y).correlation.shape == (5, 5)
assert stats.spearmanr(x.T, y.T, axis=1).pvalue.shape == (5, 5)
assert_raises(ValueError, stats.spearmanr, x, y, axis=1)
assert_raises(ValueError, stats.spearmanr, x.T, y.T)
def test_ndim_too_high(self):
np.random.seed(232324)
x = np.random.randn(4, 3, 2)
assert_raises(ValueError, stats.spearmanr, x)
assert_raises(ValueError, stats.spearmanr, x, x)
assert_raises(ValueError, stats.spearmanr, x, None, None)
# But should work with axis=None (raveling axes) for two input arrays
assert_allclose(stats.spearmanr(x, x, axis=None),
stats.spearmanr(x.flatten(), x.flatten(), axis=0))
def test_nan_policy(self):
x = np.arange(10.)
x[9] = np.nan
assert_array_equal(stats.spearmanr(x, x), (np.nan, np.nan))
assert_array_equal(stats.spearmanr(x, x, nan_policy='omit'),
(1.0, 0.0))
assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='raise')
assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='foobar')
def test_sXX(self):
y = stats.spearmanr(X,X)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXBIG(self):
y = stats.spearmanr(X,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXLITTLE(self):
y = stats.spearmanr(X,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXHUGE(self):
y = stats.spearmanr(X,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXTINY(self):
y = stats.spearmanr(X,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXROUND(self):
y = stats.spearmanr(X,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGBIG(self):
y = stats.spearmanr(BIG,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGLITTLE(self):
y = stats.spearmanr(BIG,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGHUGE(self):
y = stats.spearmanr(BIG,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGTINY(self):
y = stats.spearmanr(BIG,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGROUND(self):
y = stats.spearmanr(BIG,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLELITTLE(self):
y = stats.spearmanr(LITTLE,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLEHUGE(self):
y = stats.spearmanr(LITTLE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLETINY(self):
y = stats.spearmanr(LITTLE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLEROUND(self):
y = stats.spearmanr(LITTLE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sHUGEHUGE(self):
y = stats.spearmanr(HUGE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sHUGETINY(self):
y = stats.spearmanr(HUGE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sHUGEROUND(self):
y = stats.spearmanr(HUGE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sTINYTINY(self):
y = stats.spearmanr(TINY,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sTINYROUND(self):
y = stats.spearmanr(TINY,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sROUNDROUND(self):
y = stats.spearmanr(ROUND,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_spearmanr_result_attributes(self):
res = stats.spearmanr(X, X)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes)
def test_1d_vs_2d(self):
x1 = [1, 2, 3, 4, 5, 6]
x2 = [1, 2, 3, 4, 6, 5]
res1 = stats.spearmanr(x1, x2)
res2 = stats.spearmanr(np.asarray([x1, x2]).T)
assert_allclose(res1, res2)
def test_1d_vs_2d_nans(self):
# Now the same with NaNs present. Regression test for gh-9103.
for nan_policy in ['propagate', 'omit']:
x1 = [1, np.nan, 3, 4, 5, 6]
x2 = [1, 2, 3, 4, 6, np.nan]
res1 = stats.spearmanr(x1, x2, nan_policy=nan_policy)
res2 = stats.spearmanr(np.asarray([x1, x2]).T, nan_policy=nan_policy)
assert_allclose(res1, res2)
def test_3cols(self):
x1 = np.arange(6)
x2 = -x1
x3 = np.array([0, 1, 2, 3, 5, 4])
x = np.asarray([x1, x2, x3]).T
actual = stats.spearmanr(x)
expected_corr = np.array([[1, -1, 0.94285714],
[-1, 1, -0.94285714],
[0.94285714, -0.94285714, 1]])
expected_pvalue = np.zeros((3, 3), dtype=float)
expected_pvalue[2, 0:2] = 0.00480466472
expected_pvalue[0:2, 2] = 0.00480466472
assert_allclose(actual.correlation, expected_corr)
assert_allclose(actual.pvalue, expected_pvalue)
def test_gh_9103(self):
# Regression test for gh-9103.
x = np.array([[np.nan, 3.0, 4.0, 5.0, 5.1, 6.0, 9.2],
[5.0, np.nan, 4.1, 4.8, 4.9, 5.0, 4.1],
[0.5, 4.0, 7.1, 3.8, 8.0, 5.1, 7.6]]).T
corr = np.array([[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 1.]])
assert_allclose(stats.spearmanr(x, nan_policy='propagate').correlation,
corr)
res = stats.spearmanr(x, nan_policy='omit').correlation
assert_allclose((res[0][1], res[0][2], res[1][2]),
(0.2051957, 0.4857143, -0.4707919), rtol=1e-6)
def test_gh_8111(self):
# Regression test for gh-8111 (different result for float/int/bool).
n = 100
np.random.seed(234568)
x = np.random.rand(n)
m = np.random.rand(n) > 0.7
# bool against float, no nans
a = (x > .5)
b = np.array(x)
res1 = stats.spearmanr(a, b, nan_policy='omit').correlation
# bool against float with NaNs
b[m] = np.nan
res2 = stats.spearmanr(a, b, nan_policy='omit').correlation
# int against float with NaNs
a = a.astype(np.int32)
res3 = stats.spearmanr(a, b, nan_policy='omit').correlation
expected = [0.865895477, 0.866100381, 0.866100381]
assert_allclose([res1, res2, res3], expected)
def test_spearmanr():
# Cross-check with R:
# cor.test(c(1,2,3,4,5),c(5,6,7,8,7),method="spearmanr")
x1 = [1, 2, 3, 4, 5]
x2 = [5, 6, 7, 8, 7]
expected = (0.82078268166812329, 0.088587005313543798)
res = stats.spearmanr(x1, x2)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
attributes = ('correlation', 'pvalue')
res = stats.spearmanr(x1, x2)
check_named_results(res, attributes)
# with only ties in one or both inputs
with np.errstate(invalid="ignore"):
assert_equal(stats.spearmanr([2,2,2], [2,2,2]), (np.nan, np.nan))
assert_equal(stats.spearmanr([2,0,2], [2,2,2]), (np.nan, np.nan))
assert_equal(stats.spearmanr([2,2,2], [2,0,2]), (np.nan, np.nan))
# empty arrays provided as input
assert_equal(stats.spearmanr([], []), (np.nan, np.nan))
np.random.seed(7546)
x = np.array([np.random.normal(loc=1, scale=1, size=500),
np.random.normal(loc=1, scale=1, size=500)])
corr = [[1.0, 0.3],
[0.3, 1.0]]
x = np.dot(np.linalg.cholesky(corr), x)
expected = (0.28659685838743354, 6.579862219051161e-11)
res = stats.spearmanr(x[0], x[1])
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
assert_approx_equal(stats.spearmanr([1,1,2], [1,1,2])[0], 1.0)
# test nan_policy
x = np.arange(10.)
x[9] = np.nan
assert_array_equal(stats.spearmanr(x, x), (np.nan, np.nan))
assert_allclose(stats.spearmanr(x, x, nan_policy='omit'),
(1.0, 0))
assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='raise')
assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='foobar')
# test unequal length inputs
x = np.arange(10.)
y = np.arange(20.)
assert_raises(ValueError, stats.spearmanr, x, y)
#test paired value
x1 = [1, 2, 3, 4]
x2 = [8, 7, 6, np.nan]
res1 = stats.spearmanr(x1, x2, nan_policy='omit')
res2 = stats.spearmanr(x1[:3], x2[:3], nan_policy='omit')
assert_equal(res1, res2)
# Regression test for GitHub issue #6061 - Overflow on Windows
x = list(range(2000))
y = list(range(2000))
y[0], y[9] = y[9], y[0]
y[10], y[434] = y[434], y[10]
y[435], y[1509] = y[1509], y[435]
# rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1))
# = 1 - (1 / 500)
# = 0.998
x.append(np.nan)
y.append(3.0)
assert_almost_equal(stats.spearmanr(x, y, nan_policy='omit')[0], 0.998)
class TestCorrSpearmanrTies(object):
"""Some tests of tie-handling by the spearmanr function."""
def test_tie1(self):
# Data
x = [1.0, 2.0, 3.0, 4.0]
y = [1.0, 2.0, 2.0, 3.0]
# Ranks of the data, with tie-handling.
xr = [1.0, 2.0, 3.0, 4.0]
yr = [1.0, 2.5, 2.5, 4.0]
# Result of spearmanr should be the same as applying
# pearsonr to the ranks.
sr = stats.spearmanr(x, y)
pr = stats.pearsonr(xr, yr)
assert_almost_equal(sr, pr)
def test_tie2(self):
# Test tie-handling if inputs contain nan's
# Data without nan's
x1 = [1, 2, 2.5, 2]
y1 = [1, 3, 2.5, 4]
# Same data with nan's
x2 = [1, 2, 2.5, 2, np.nan]
y2 = [1, 3, 2.5, 4, np.nan]
# Results for two data sets should be the same if nan's are ignored
sr1 = stats.spearmanr(x1, y1)
sr2 = stats.spearmanr(x2, y2, nan_policy='omit')
assert_almost_equal(sr1, sr2)
# W.II.E. Tabulate X against X, using BIG as a case weight. The values
# should appear on the diagonal and the total should be 899999955.
# If the table cannot hold these values, forget about working with
# census data. You can also tabulate HUGE against TINY. There is no
# reason a tabulation program should not be able to distinguish
# different values regardless of their magnitude.
# I need to figure out how to do this one.
def test_kendalltau():
# simple case without ties
x = np.arange(10)
y = np.arange(10)
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (1.0, 5.511463844797e-07)
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# swap a couple of values
b = y[1]
y[1] = y[2]
y[2] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (0.9555555555555556, 5.511463844797e-06)
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# swap a couple more
b = y[5]
y[5] = y[6]
y[6] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (0.9111111111111111, 2.976190476190e-05)
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# same in opposite direction
x = np.arange(10)
y = np.arange(10)[::-1]
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (-1.0, 5.511463844797e-07)
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# swap a couple of values
b = y[1]
y[1] = y[2]
y[2] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (-0.9555555555555556, 5.511463844797e-06)
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# swap a couple more
b = y[5]
y[5] = y[6]
y[6] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (-0.9111111111111111, 2.976190476190e-05)
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# check exception in case of ties
y[2] = y[1]
assert_raises(ValueError, stats.kendalltau, x, y, method='exact')
# check exception in case of invalid method keyword
assert_raises(ValueError, stats.kendalltau, x, y, method='banana')
# with some ties
# Cross-check with R:
# cor.test(c(12,2,1,12,2),c(1,4,7,1,0),method="kendall",exact=FALSE)
x1 = [12, 2, 1, 12, 2]
x2 = [1, 4, 7, 1, 0]
expected = (-0.47140452079103173, 0.28274545993277478)
res = stats.kendalltau(x1, x2)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# test for namedtuple attribute results
attributes = ('correlation', 'pvalue')
res = stats.kendalltau(x1, x2)
check_named_results(res, attributes)
# with only ties in one or both inputs
assert_equal(stats.kendalltau([2,2,2], [2,2,2]), (np.nan, np.nan))
assert_equal(stats.kendalltau([2,0,2], [2,2,2]), (np.nan, np.nan))
assert_equal(stats.kendalltau([2,2,2], [2,0,2]), (np.nan, np.nan))
# empty arrays provided as input
assert_equal(stats.kendalltau([], []), (np.nan, np.nan))
# check with larger arrays
np.random.seed(7546)
x = np.array([np.random.normal(loc=1, scale=1, size=500),
np.random.normal(loc=1, scale=1, size=500)])
corr = [[1.0, 0.3],
[0.3, 1.0]]
x = np.dot(np.linalg.cholesky(corr), x)
expected = (0.19291382765531062, 1.1337095377742629e-10)
res = stats.kendalltau(x[0], x[1])
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# and do we get a tau of 1 for identical inputs?
assert_approx_equal(stats.kendalltau([1,1,2], [1,1,2])[0], 1.0)
# test nan_policy
x = np.arange(10.)
x[9] = np.nan
assert_array_equal(stats.kendalltau(x, x), (np.nan, np.nan))
assert_allclose(stats.kendalltau(x, x, nan_policy='omit'),
(1.0, 5.5114638e-6), rtol=1e-06)
assert_allclose(stats.kendalltau(x, x, nan_policy='omit', method='asymptotic'),
(1.0, 0.00017455009626808976), rtol=1e-06)
assert_raises(ValueError, stats.kendalltau, x, x, nan_policy='raise')
assert_raises(ValueError, stats.kendalltau, x, x, nan_policy='foobar')
# test unequal length inputs
x = np.arange(10.)
y = np.arange(20.)
assert_raises(ValueError, stats.kendalltau, x, y)
# test all ties
tau, p_value = stats.kendalltau([], [])
assert_equal(np.nan, tau)
assert_equal(np.nan, p_value)
tau, p_value = stats.kendalltau([0], [0])
assert_equal(np.nan, tau)
assert_equal(np.nan, p_value)
# Regression test for GitHub issue #6061 - Overflow on Windows
x = np.arange(2000, dtype=float)
x = np.ma.masked_greater(x, 1995)
y = np.arange(2000, dtype=float)
y = np.concatenate((y[1000:], y[:1000]))
assert_(np.isfinite(stats.kendalltau(x,y)[1]))
def test_kendalltau_vs_mstats_basic():
np.random.seed(42)
for s in range(2,10):
a = []
# Generate rankings with ties
for i in range(s):
a += [i]*i
b = list(a)
np.random.shuffle(a)
np.random.shuffle(b)
expected = mstats_basic.kendalltau(a, b)
actual = stats.kendalltau(a, b)
assert_approx_equal(actual[0], expected[0])
assert_approx_equal(actual[1], expected[1])
def test_kendalltau_nan_2nd_arg():
# regression test for gh-6134: nans in the second arg were not handled
x = [1., 2., 3., 4.]
y = [np.nan, 2.4, 3.4, 3.4]
r1 = stats.kendalltau(x, y, nan_policy='omit')
r2 = stats.kendalltau(x[1:], y[1:])
assert_allclose(r1.correlation, r2.correlation, atol=1e-15)
def test_weightedtau():
x = [12, 2, 1, 12, 2]
y = [1, 4, 7, 1, 0]
tau, p_value = stats.weightedtau(x, y)
assert_approx_equal(tau, -0.56694968153682723)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(x, y, additive=False)
assert_approx_equal(tau, -0.62205716951801038)
assert_equal(np.nan, p_value)
# This must be exactly Kendall's tau
tau, p_value = stats.weightedtau(x, y, weigher=lambda x: 1)
assert_approx_equal(tau, -0.47140452079103173)
assert_equal(np.nan, p_value)
# Asymmetric, ranked version
tau, p_value = stats.weightedtau(x, y, rank=None)
assert_approx_equal(tau, -0.4157652301037516)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(y, x, rank=None)
assert_approx_equal(tau, -0.7181341329699029)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(x, y, rank=None, additive=False)
assert_approx_equal(tau, -0.40644850966246893)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(y, x, rank=None, additive=False)
assert_approx_equal(tau, -0.83766582937355172)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(x, y, rank=False)
assert_approx_equal(tau, -0.51604397940261848)
assert_equal(np.nan, p_value)
# This must be exactly Kendall's tau
tau, p_value = stats.weightedtau(x, y, rank=True, weigher=lambda x: 1)
assert_approx_equal(tau, -0.47140452079103173)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(y, x, rank=True, weigher=lambda x: 1)
assert_approx_equal(tau, -0.47140452079103173)
assert_equal(np.nan, p_value)
# Test argument conversion
tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.float64), y)
assert_approx_equal(tau, -0.56694968153682723)
tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.int16), y)
assert_approx_equal(tau, -0.56694968153682723)
tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.float64), np.asarray(y, dtype=np.float64))
assert_approx_equal(tau, -0.56694968153682723)
# All ties
tau, p_value = stats.weightedtau([], [])
assert_equal(np.nan, tau)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau([0], [0])
assert_equal(np.nan, tau)
assert_equal(np.nan, p_value)
# Size mismatches
assert_raises(ValueError, stats.weightedtau, [0, 1], [0, 1, 2])
assert_raises(ValueError, stats.weightedtau, [0, 1], [0, 1], [0])
# NaNs
x = [12, 2, 1, 12, 2]
y = [1, 4, 7, 1, np.nan]
tau, p_value = stats.weightedtau(x, y)
assert_approx_equal(tau, -0.56694968153682723)
x = [12, 2, np.nan, 12, 2]
tau, p_value = stats.weightedtau(x, y)
assert_approx_equal(tau, -0.56694968153682723)
def test_kendall_tau_large():
n = 172.
x = np.arange(n)
y = np.arange(n)
_, pval = stats.kendalltau(x, y, method='exact')
assert_equal(pval, 0.0)
y[-1], y[-2] = y[-2], y[-1]
_, pval = stats.kendalltau(x, y, method='exact')
assert_equal(pval, 0.0)
y[-3], y[-4] = y[-4], y[-3]
_, pval = stats.kendalltau(x, y, method='exact')
assert_equal(pval, 0.0)
def test_weightedtau_vs_quadratic():
# Trivial quadratic implementation, all parameters mandatory
def wkq(x, y, rank, weigher, add):
tot = conc = disc = u = v = 0
for i in range(len(x)):
for j in range(len(x)):
w = weigher(rank[i]) + weigher(rank[j]) if add else weigher(rank[i]) * weigher(rank[j])
tot += w
if x[i] == x[j]:
u += w
if y[i] == y[j]:
v += w
if x[i] < x[j] and y[i] < y[j] or x[i] > x[j] and y[i] > y[j]:
conc += w
elif x[i] < x[j] and y[i] > y[j] or x[i] > x[j] and y[i] < y[j]:
disc += w
return (conc - disc) / np.sqrt(tot - u) / np.sqrt(tot - v)
np.random.seed(42)
for s in range(3,10):
a = []
# Generate rankings with ties
for i in range(s):
a += [i]*i
b = list(a)
np.random.shuffle(a)
np.random.shuffle(b)
# First pass: use element indices as ranks
rank = np.arange(len(a), dtype=np.intp)
for _ in range(2):
for add in [True, False]:
expected = wkq(a, b, rank, lambda x: 1./(x+1), add)
actual = stats.weightedtau(a, b, rank, lambda x: 1./(x+1), add).correlation
assert_approx_equal(expected, actual)
# Second pass: use a random rank
np.random.shuffle(rank)
class TestFindRepeats(object):
def test_basic(self):
a = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 5]
res, nums = stats.find_repeats(a)
assert_array_equal(res, [1, 2, 3, 4])
assert_array_equal(nums, [3, 3, 2, 2])
def test_empty_result(self):
# Check that empty arrays are returned when there are no repeats.
for a in [[10, 20, 50, 30, 40], []]:
repeated, counts = stats.find_repeats(a)
assert_array_equal(repeated, [])
assert_array_equal(counts, [])
class TestRegression(object):
def test_linregressBIGX(self):
# W.II.F. Regress BIG on X.
# The constant should be 99999990 and the regression coefficient should be 1.
y = stats.linregress(X,BIG)
intercept = y[1]
r = y[2]
assert_almost_equal(intercept,99999990)
assert_almost_equal(r,1.0)
def test_regressXX(self):
# W.IV.B. Regress X on X.
# The constant should be exactly 0 and the regression coefficient should be 1.
# This is a perfectly valid regression. The program should not complain.
y = stats.linregress(X,X)
intercept = y[1]
r = y[2]
assert_almost_equal(intercept,0.0)
assert_almost_equal(r,1.0)
# W.IV.C. Regress X on BIG and LITTLE (two predictors). The program
# should tell you that this model is "singular" because BIG and
# LITTLE are linear combinations of each other. Cryptic error
# messages are unacceptable here. Singularity is the most
# fundamental regression error.
# Need to figure out how to handle multiple linear regression. Not obvious
def test_regressZEROX(self):
# W.IV.D. Regress ZERO on X.
# The program should inform you that ZERO has no variance or it should
# go ahead and compute the regression and report a correlation and
# total sum of squares of exactly 0.
y = stats.linregress(X,ZERO)
intercept = y[1]
r = y[2]
assert_almost_equal(intercept,0.0)
assert_almost_equal(r,0.0)
def test_regress_simple(self):
# Regress a line with sinusoidal noise.
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
res = stats.linregress(x, y)
assert_almost_equal(res[4], 2.3957814497838803e-3)
def test_regress_simple_onearg_rows(self):
# Regress a line w sinusoidal noise, with a single input of shape (2, N).
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
rows = np.vstack((x, y))
res = stats.linregress(rows)
assert_almost_equal(res[4], 2.3957814497838803e-3)
def test_regress_simple_onearg_cols(self):
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
cols = np.hstack((np.expand_dims(x, 1), np.expand_dims(y, 1)))
res = stats.linregress(cols)
assert_almost_equal(res[4], 2.3957814497838803e-3)
def test_regress_shape_error(self):
# Check that a single input argument to linregress with wrong shape
# results in a ValueError.
assert_raises(ValueError, stats.linregress, np.ones((3, 3)))
def test_linregress(self):
# compared with multivariate ols with pinv
x = np.arange(11)
y = np.arange(5,16)
y[[(1),(-2)]] -= 1
y[[(0),(-1)]] += 1
res = (1.0, 5.0, 0.98229948625750, 7.45259691e-008, 0.063564172616372733)
assert_array_almost_equal(stats.linregress(x,y),res,decimal=14)
def test_regress_simple_negative_cor(self):
# If the slope of the regression is negative the factor R tend to -1 not 1.
# Sometimes rounding errors makes it < -1 leading to stderr being NaN
a, n = 1e-71, 100000
x = np.linspace(a, 2 * a, n)
y = np.linspace(2 * a, a, n)
stats.linregress(x, y)
res = stats.linregress(x, y)
assert_(res[2] >= -1) # propagated numerical errors were not corrected
assert_almost_equal(res[2], -1) # perfect negative correlation case
assert_(not np.isnan(res[4])) # stderr should stay finite
def test_linregress_result_attributes(self):
# Regress a line with sinusoidal noise.
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
res = stats.linregress(x, y)
attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')
check_named_results(res, attributes)
def test_regress_two_inputs(self):
# Regress a simple line formed by two points.
x = np.arange(2)
y = np.arange(3, 5)
res = stats.linregress(x, y)
assert_almost_equal(res[3], 0.0) # non-horizontal line
assert_almost_equal(res[4], 0.0) # zero stderr
def test_regress_two_inputs_horizontal_line(self):
# Regress a horizontal line formed by two points.
x = np.arange(2)
y = np.ones(2)
res = stats.linregress(x, y)
assert_almost_equal(res[3], 1.0) # horizontal line
assert_almost_equal(res[4], 0.0) # zero stderr
def test_nist_norris(self):
x = [0.2, 337.4, 118.2, 884.6, 10.1, 226.5, 666.3, 996.3, 448.6, 777.0,
558.2, 0.4, 0.6, 775.5, 666.9, 338.0, 447.5, 11.6, 556.0, 228.1,
995.8, 887.6, 120.2, 0.3, 0.3, 556.8, 339.1, 887.2, 999.0, 779.0,
11.1, 118.3, 229.2, 669.1, 448.9, 0.5]
y = [0.1, 338.8, 118.1, 888.0, 9.2, 228.1, 668.5, 998.5, 449.1, 778.9,
559.2, 0.3, 0.1, 778.1, 668.8, 339.3, 448.9, 10.8, 557.7, 228.3,
998.0, 888.8, 119.6, 0.3, 0.6, 557.6, 339.3, 888.0, 998.5, 778.9,
10.2, 117.6, 228.9, 668.4, 449.2, 0.2]
# Expected values
exp_slope = 1.00211681802045
exp_intercept = -0.262323073774029
exp_rsquared = 0.999993745883712
actual = stats.linregress(x, y)
assert_almost_equal(actual.slope, exp_slope)
assert_almost_equal(actual.intercept, exp_intercept)
assert_almost_equal(actual.rvalue**2, exp_rsquared)
def test_empty_input(self):
assert_raises(ValueError, stats.linregress, [], [])
def test_nan_input(self):
x = np.arange(10.)
x[9] = np.nan
with np.errstate(invalid="ignore"):
assert_array_equal(stats.linregress(x, x),
(np.nan, np.nan, np.nan, np.nan, np.nan))
def test_theilslopes():
# Basic slope test.
slope, intercept, lower, upper = stats.theilslopes([0,1,1])
assert_almost_equal(slope, 0.5)
assert_almost_equal(intercept, 0.5)
# Test of confidence intervals.
x = [1, 2, 3, 4, 10, 12, 18]
y = [9, 15, 19, 20, 45, 55, 78]
slope, intercept, lower, upper = stats.theilslopes(y, x, 0.07)
assert_almost_equal(slope, 4)
assert_almost_equal(upper, 4.38, decimal=2)
assert_almost_equal(lower, 3.71, decimal=2)
def test_cumfreq():
x = [1, 4, 2, 1, 3, 1]
cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4)
assert_array_almost_equal(cumfreqs, np.array([3., 4., 5., 6.]))
cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4,
defaultreallimits=(1.5, 5))
assert_(extrapoints == 3)
# test for namedtuple attribute results
attributes = ('cumcount', 'lowerlimit', 'binsize', 'extrapoints')
res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
check_named_results(res, attributes)
def test_relfreq():
a = np.array([1, 4, 2, 1, 3, 1])
relfreqs, lowlim, binsize, extrapoints = stats.relfreq(a, numbins=4)
assert_array_almost_equal(relfreqs,
array([0.5, 0.16666667, 0.16666667, 0.16666667]))
# test for namedtuple attribute results
attributes = ('frequency', 'lowerlimit', 'binsize', 'extrapoints')
res = stats.relfreq(a, numbins=4)
check_named_results(res, attributes)
# check array_like input is accepted
relfreqs2, lowlim, binsize, extrapoints = stats.relfreq([1, 4, 2, 1, 3, 1],
numbins=4)
assert_array_almost_equal(relfreqs, relfreqs2)
class TestScoreatpercentile(object):
def setup_method(self):
self.a1 = [3, 4, 5, 10, -3, -5, 6]
self.a2 = [3, -6, -2, 8, 7, 4, 2, 1]
self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0]
def test_basic(self):
x = arange(8) * 0.5
assert_equal(stats.scoreatpercentile(x, 0), 0.)
assert_equal(stats.scoreatpercentile(x, 100), 3.5)
assert_equal(stats.scoreatpercentile(x, 50), 1.75)
def test_fraction(self):
scoreatperc = stats.scoreatpercentile
# Test defaults
assert_equal(scoreatperc(list(range(10)), 50), 4.5)
assert_equal(scoreatperc(list(range(10)), 50, (2,7)), 4.5)
assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8)), 4.5)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10,100)), 55)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10)), 5.5)
# explicitly specify interpolation_method 'fraction' (the default)
assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='fraction'),
4.5)
assert_equal(scoreatperc(list(range(10)), 50, limit=(2, 7),
interpolation_method='fraction'),
4.5)
assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8),
interpolation_method='fraction'),
4.5)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10, 100),
interpolation_method='fraction'),
55)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10),
interpolation_method='fraction'),
5.5)
def test_lower_higher(self):
scoreatperc = stats.scoreatpercentile
# interpolation_method 'lower'/'higher'
assert_equal(scoreatperc(list(range(10)), 50,
interpolation_method='lower'), 4)
assert_equal(scoreatperc(list(range(10)), 50,
interpolation_method='higher'), 5)
assert_equal(scoreatperc(list(range(10)), 50, (2,7),
interpolation_method='lower'), 4)
assert_equal(scoreatperc(list(range(10)), 50, limit=(2,7),
interpolation_method='higher'), 5)
assert_equal(scoreatperc(list(range(100)), 50, (1,8),
interpolation_method='lower'), 4)
assert_equal(scoreatperc(list(range(100)), 50, (1,8),
interpolation_method='higher'), 5)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (10, 100),
interpolation_method='lower'), 10)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(10, 100),
interpolation_method='higher'), 100)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (1, 10),
interpolation_method='lower'), 1)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(1, 10),
interpolation_method='higher'), 10)
def test_sequence_per(self):
x = arange(8) * 0.5
expected = np.array([0, 3.5, 1.75])
res = stats.scoreatpercentile(x, [0, 100, 50])
assert_allclose(res, expected)
assert_(isinstance(res, np.ndarray))
# Test with ndarray. Regression test for gh-2861
assert_allclose(stats.scoreatpercentile(x, np.array([0, 100, 50])),
expected)
# Also test combination of 2-D array, axis not None and array-like per
res2 = stats.scoreatpercentile(np.arange(12).reshape((3,4)),
np.array([0, 1, 100, 100]), axis=1)
expected2 = array([[0, 4, 8],
[0.03, 4.03, 8.03],
[3, 7, 11],
[3, 7, 11]])
assert_allclose(res2, expected2)
def test_axis(self):
scoreatperc = stats.scoreatpercentile
x = arange(12).reshape(3, 4)
assert_equal(scoreatperc(x, (25, 50, 100)), [2.75, 5.5, 11.0])
r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]]
assert_equal(scoreatperc(x, (25, 50, 100), axis=0), r0)
r1 = [[0.75, 4.75, 8.75], [1.5, 5.5, 9.5], [3, 7, 11]]
assert_equal(scoreatperc(x, (25, 50, 100), axis=1), r1)
x = array([[1, 1, 1],
[1, 1, 1],
[4, 4, 3],
[1, 1, 1],
[1, 1, 1]])
score = stats.scoreatpercentile(x, 50)
assert_equal(score.shape, ())
assert_equal(score, 1.0)
score = stats.scoreatpercentile(x, 50, axis=0)
assert_equal(score.shape, (3,))
assert_equal(score, [1, 1, 1])
def test_exception(self):
assert_raises(ValueError, stats.scoreatpercentile, [1, 2], 56,
interpolation_method='foobar')
assert_raises(ValueError, stats.scoreatpercentile, [1], 101)
assert_raises(ValueError, stats.scoreatpercentile, [1], -1)
def test_empty(self):
assert_equal(stats.scoreatpercentile([], 50), np.nan)
assert_equal(stats.scoreatpercentile(np.array([[], []]), 50), np.nan)
assert_equal(stats.scoreatpercentile([], [50, 99]), [np.nan, np.nan])
class TestItemfreq(object):
a = [5, 7, 1, 2, 1, 5, 7] * 10
b = [1, 2, 5, 7]
def test_numeric_types(self):
# Check itemfreq works for all dtypes (adapted from np.unique tests)
def _check_itemfreq(dt):
a = np.array(self.a, dt)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
v = stats.itemfreq(a)
assert_array_equal(v[:, 0], [1, 2, 5, 7])
assert_array_equal(v[:, 1], np.array([20, 10, 20, 20], dtype=dt))
dtypes = [np.int32, np.int64, np.float32, np.float64,
np.complex64, np.complex128]
for dt in dtypes:
_check_itemfreq(dt)
def test_object_arrays(self):
a, b = self.a, self.b
dt = 'O'
aa = np.empty(len(a), dt)
aa[:] = a
bb = np.empty(len(b), dt)
bb[:] = b
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
v = stats.itemfreq(aa)
assert_array_equal(v[:, 0], bb)
def test_structured_arrays(self):
a, b = self.a, self.b
dt = [('', 'i'), ('', 'i')]
aa = np.array(list(zip(a, a)), dt)
bb = np.array(list(zip(b, b)), dt)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
v = stats.itemfreq(aa)
# Arrays don't compare equal because v[:,0] is object array
assert_equal(tuple(v[2, 0]), tuple(bb[2]))
class TestMode(object):
def test_empty(self):
vals, counts = stats.mode([])
assert_equal(vals, np.array([]))
assert_equal(counts, np.array([]))
def test_scalar(self):
vals, counts = stats.mode(4.)
assert_equal(vals, np.array([4.]))
assert_equal(counts, np.array([1]))
def test_basic(self):
data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]
vals = stats.mode(data1)
assert_equal(vals[0][0], 6)
assert_equal(vals[1][0], 3)
def test_axes(self):
data1 = [10, 10, 30, 40]
data2 = [10, 10, 10, 10]
data3 = [20, 10, 20, 20]
data4 = [30, 30, 30, 30]
data5 = [40, 30, 30, 30]
arr = np.array([data1, data2, data3, data4, data5])
vals = stats.mode(arr, axis=None)
assert_equal(vals[0], np.array([30]))
assert_equal(vals[1], np.array([8]))
vals = stats.mode(arr, axis=0)
assert_equal(vals[0], np.array([[10, 10, 30, 30]]))
assert_equal(vals[1], np.array([[2, 3, 3, 2]]))
vals = stats.mode(arr, axis=1)
assert_equal(vals[0], np.array([[10], [10], [20], [30], [30]]))
assert_equal(vals[1], np.array([[2], [4], [3], [4], [3]]))
def test_strings(self):
data1 = ['rain', 'showers', 'showers']
vals = stats.mode(data1)
assert_equal(vals[0][0], 'showers')
assert_equal(vals[1][0], 2)
def test_mixed_objects(self):
objects = [10, True, np.nan, 'hello', 10]
arr = np.empty((5,), dtype=object)
arr[:] = objects
vals = stats.mode(arr)
assert_equal(vals[0][0], 10)
assert_equal(vals[1][0], 2)
def test_objects(self):
# Python objects must be sortable (le + eq) and have ne defined
# for np.unique to work. hash is for set.
class Point(object):
def __init__(self, x):
self.x = x
def __eq__(self, other):
return self.x == other.x
def __ne__(self, other):
return self.x != other.x
def __lt__(self, other):
return self.x < other.x
def __hash__(self):
return hash(self.x)
points = [Point(x) for x in [1, 2, 3, 4, 3, 2, 2, 2]]
arr = np.empty((8,), dtype=object)
arr[:] = points
assert_(len(set(points)) == 4)
assert_equal(np.unique(arr).shape, (4,))
vals = stats.mode(arr)
assert_equal(vals[0][0], Point(2))
assert_equal(vals[1][0], 4)
def test_mode_result_attributes(self):
data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]
data2 = []
actual = stats.mode(data1)
attributes = ('mode', 'count')
check_named_results(actual, attributes)
actual2 = stats.mode(data2)
check_named_results(actual2, attributes)
def test_mode_nan(self):
data1 = [3, np.nan, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]
actual = stats.mode(data1)
assert_equal(actual, (6, 3))
actual = stats.mode(data1, nan_policy='omit')
assert_equal(actual, (6, 3))
assert_raises(ValueError, stats.mode, data1, nan_policy='raise')
assert_raises(ValueError, stats.mode, data1, nan_policy='foobar')
@pytest.mark.parametrize("data", [
[3, 5, 1, 1, 3],
[3, np.nan, 5, 1, 1, 3],
[3, 5, 1],
[3, np.nan, 5, 1],
])
def test_smallest_equal(self, data):
result = stats.mode(data, nan_policy='omit')
assert_equal(result[0][0], 1)
def test_obj_arrays_ndim(self):
# regression test for gh-9645: `mode` fails for object arrays w/ndim > 1
data = [['Oxidation'], ['Oxidation'], ['Polymerization'], ['Reduction']]
ar = np.array(data, dtype=object)
m = stats.mode(ar, axis=0)
assert np.all(m.mode == 'Oxidation') and m.mode.shape == (1, 1)
assert np.all(m.count == 2) and m.count.shape == (1, 1)
data1 = data + [[np.nan]]
ar1 = np.array(data1, dtype=object)
m = stats.mode(ar1, axis=0)
assert np.all(m.mode == 'Oxidation') and m.mode.shape == (1, 1)
assert np.all(m.count == 2) and m.count.shape == (1, 1)
class TestVariability(object):
testcase = [1,2,3,4]
scalar_testcase = 4.
def test_sem(self):
# This is not in R, so used:
# sqrt(var(testcase)*3/4)/sqrt(3)
# y = stats.sem(self.shoes[0])
# assert_approx_equal(y,0.775177399)
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
y = stats.sem(self.scalar_testcase)
assert_(np.isnan(y))
y = stats.sem(self.testcase)
assert_approx_equal(y, 0.6454972244)
n = len(self.testcase)
assert_allclose(stats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),
stats.sem(self.testcase, ddof=2))
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.sem(x), np.nan)
assert_equal(stats.sem(x, nan_policy='omit'), 0.9128709291752769)
assert_raises(ValueError, stats.sem, x, nan_policy='raise')
assert_raises(ValueError, stats.sem, x, nan_policy='foobar')
def test_zmap(self):
# not in R, so tested by using:
# (testcase[i] - mean(testcase, axis=0)) / sqrt(var(testcase) * 3/4)
y = stats.zmap(self.testcase,self.testcase)
desired = ([-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999])
assert_array_almost_equal(desired,y,decimal=12)
def test_zmap_axis(self):
# Test use of 'axis' keyword in zmap.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 2.0],
[2.0, 0.0, 2.0, 0.0]])
t1 = 1.0/np.sqrt(2.0/3)
t2 = np.sqrt(3.)/3
t3 = np.sqrt(2.)
z0 = stats.zmap(x, x, axis=0)
z1 = stats.zmap(x, x, axis=1)
z0_expected = [[-t1, -t3/2, -t3/2, 0.0],
[0.0, t3, -t3/2, t1],
[t1, -t3/2, t3, -t1]]
z1_expected = [[-1.0, -1.0, 1.0, 1.0],
[-t2, -t2, -t2, np.sqrt(3.)],
[1.0, -1.0, 1.0, -1.0]]
assert_array_almost_equal(z0, z0_expected)
assert_array_almost_equal(z1, z1_expected)
def test_zmap_ddof(self):
# Test use of 'ddof' keyword in zmap.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 2.0, 3.0]])
z = stats.zmap(x, x, axis=1, ddof=1)
z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3))
z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3))
assert_array_almost_equal(z[0], z0_expected)
assert_array_almost_equal(z[1], z1_expected)
def test_zscore(self):
# not in R, so tested by using:
# (testcase[i] - mean(testcase, axis=0)) / sqrt(var(testcase) * 3/4)
y = stats.zscore(self.testcase)
desired = ([-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999])
assert_array_almost_equal(desired,y,decimal=12)
def test_zscore_axis(self):
# Test use of 'axis' keyword in zscore.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 2.0],
[2.0, 0.0, 2.0, 0.0]])
t1 = 1.0/np.sqrt(2.0/3)
t2 = np.sqrt(3.)/3
t3 = np.sqrt(2.)
z0 = stats.zscore(x, axis=0)
z1 = stats.zscore(x, axis=1)
z0_expected = [[-t1, -t3/2, -t3/2, 0.0],
[0.0, t3, -t3/2, t1],
[t1, -t3/2, t3, -t1]]
z1_expected = [[-1.0, -1.0, 1.0, 1.0],
[-t2, -t2, -t2, np.sqrt(3.)],
[1.0, -1.0, 1.0, -1.0]]
assert_array_almost_equal(z0, z0_expected)
assert_array_almost_equal(z1, z1_expected)
def test_zscore_ddof(self):
# Test use of 'ddof' keyword in zscore.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 2.0, 3.0]])
z = stats.zscore(x, axis=1, ddof=1)
z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3))
z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3))
assert_array_almost_equal(z[0], z0_expected)
assert_array_almost_equal(z[1], z1_expected)
def test_mad(self):
dat = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03,
3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7, 3.7,
3.7, 3.7,3.77, 5.28, 28.95])
assert_almost_equal(stats.median_absolute_deviation(dat, axis=None), 0.526323)
dat = dat.reshape(6, 4)
mad = stats.median_absolute_deviation(dat, axis=0)
mad_expected = np.asarray([0.644931, 0.7413, 0.66717, 0.59304])
assert_array_almost_equal(mad, mad_expected)
def test_mad_empty(self):
dat = []
mad = stats.median_absolute_deviation(dat)
assert_equal(mad, np.nan)
def test_mad_nan_propagate(self):
dat = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03,
3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7, 3.7,
3.7, 3.7,3.77, 5.28, np.nan])
mad = stats.median_absolute_deviation(dat, nan_policy='propagate')
assert_equal(mad, np.nan)
def test_mad_nan_raise(self):
dat = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03,
3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7, 3.7,
3.7, 3.7,3.77, 5.28, np.nan])
with assert_raises(ValueError):
stats.median_absolute_deviation(dat, nan_policy='raise')
def test_mad_nan_omit(self):
dat = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03,
3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7, 3.7,
3.7, 3.7,3.77, 5.28, np.nan])
mad = stats.median_absolute_deviation(dat, nan_policy='omit')
assert_almost_equal(mad, 0.504084)
class _numpy_version_warn_context_mgr(object):
"""
A simple context manager class to avoid retyping the same code for
different versions of numpy when the only difference is that older
versions raise warnings.
This manager does not apply for cases where the old code returns
different values.
"""
def __init__(self, min_numpy_version, warning_type, num_warnings):
if NumpyVersion(np.__version__) < min_numpy_version:
self.numpy_is_old = True
self.warning_type = warning_type
self.num_warnings = num_warnings
self.delegate = warnings.catch_warnings(record = True)
else:
self.numpy_is_old = False
def __enter__(self):
if self.numpy_is_old:
self.warn_list = self.delegate.__enter__()
warnings.simplefilter("always")
return None
def __exit__(self, exc_type, exc_value, traceback):
if self.numpy_is_old:
self.delegate.__exit__(exc_type, exc_value, traceback)
_check_warnings(self.warn_list, self.warning_type, self.num_warnings)
def _check_warnings(warn_list, expected_type, expected_len):
"""
Checks that all of the warnings from a list returned by
`warnings.catch_all(record=True)` are of the required type and that the list
contains expected number of warnings.
"""
assert_equal(len(warn_list), expected_len, "number of warnings")
for warn_ in warn_list:
assert_(warn_.category is expected_type)
class TestIQR(object):
def test_basic(self):
x = np.arange(8) * 0.5
np.random.shuffle(x)
assert_equal(stats.iqr(x), 1.75)
def test_api(self):
d = np.ones((5, 5))
stats.iqr(d)
stats.iqr(d, None)
stats.iqr(d, 1)
stats.iqr(d, (0, 1))
stats.iqr(d, None, (10, 90))
stats.iqr(d, None, (30, 20), 'raw')
stats.iqr(d, None, (25, 75), 1.5, 'propagate')
if NumpyVersion(np.__version__) >= '1.9.0a':
stats.iqr(d, None, (50, 50), 'normal', 'raise', 'linear')
stats.iqr(d, None, (25, 75), -0.4, 'omit', 'lower', True)
def test_empty(self):
assert_equal(stats.iqr([]), np.nan)
assert_equal(stats.iqr(np.arange(0)), np.nan)
def test_constant(self):
# Constant array always gives 0
x = np.ones((7, 4))
assert_equal(stats.iqr(x), 0.0)
assert_array_equal(stats.iqr(x, axis=0), np.zeros(4))
assert_array_equal(stats.iqr(x, axis=1), np.zeros(7))
# Even for older versions, 'linear' does not raise a warning
with _numpy_version_warn_context_mgr('1.9.0a', RuntimeWarning, 4):
assert_equal(stats.iqr(x, interpolation='linear'), 0.0)
assert_equal(stats.iqr(x, interpolation='midpoint'), 0.0)
assert_equal(stats.iqr(x, interpolation='nearest'), 0.0)
assert_equal(stats.iqr(x, interpolation='lower'), 0.0)
assert_equal(stats.iqr(x, interpolation='higher'), 0.0)
# 0 only along constant dimensions
# This also tests much of `axis`
y = np.ones((4, 5, 6)) * np.arange(6)
assert_array_equal(stats.iqr(y, axis=0), np.zeros((5, 6)))
assert_array_equal(stats.iqr(y, axis=1), np.zeros((4, 6)))
assert_array_equal(stats.iqr(y, axis=2), np.full((4, 5), 2.5))
assert_array_equal(stats.iqr(y, axis=(0, 1)), np.zeros(6))
assert_array_equal(stats.iqr(y, axis=(0, 2)), np.full(5, 3.))
assert_array_equal(stats.iqr(y, axis=(1, 2)), np.full(4, 3.))
def test_scalarlike(self):
x = np.arange(1) + 7.0
assert_equal(stats.iqr(x[0]), 0.0)
assert_equal(stats.iqr(x), 0.0)
if NumpyVersion(np.__version__) >= '1.9.0a':
assert_array_equal(stats.iqr(x, keepdims=True), [0.0])
else:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert_array_equal(stats.iqr(x, keepdims=True), 0.0)
_check_warnings(w, RuntimeWarning, 1)
def test_2D(self):
x = np.arange(15).reshape((3, 5))
assert_equal(stats.iqr(x), 7.0)
assert_array_equal(stats.iqr(x, axis=0), np.full(5, 5.))
assert_array_equal(stats.iqr(x, axis=1), np.full(3, 2.))
assert_array_equal(stats.iqr(x, axis=(0, 1)), 7.0)
assert_array_equal(stats.iqr(x, axis=(1, 0)), 7.0)
def test_axis(self):
# The `axis` keyword is also put through its paces in `test_keepdims`.
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10) # x.shape = (71, 23, 10)
q = stats.iqr(o)
assert_equal(stats.iqr(x, axis=(0, 1)), q)
x = np.rollaxis(x, -1, 0) # x.shape = (10, 71, 23)
assert_equal(stats.iqr(x, axis=(2, 1)), q)
x = x.swapaxes(0, 1) # x.shape = (71, 10, 23)
assert_equal(stats.iqr(x, axis=(0, 2)), q)
x = x.swapaxes(0, 1) # x.shape = (10, 71, 23)
assert_equal(stats.iqr(x, axis=(0, 1, 2)),
stats.iqr(x, axis=None))
assert_equal(stats.iqr(x, axis=(0,)),
stats.iqr(x, axis=0))
d = np.arange(3 * 5 * 7 * 11)
# Older versions of numpy only shuffle along axis=0.
# Not sure about newer, don't care.
np.random.shuffle(d)
d = d.reshape((3, 5, 7, 11))
assert_equal(stats.iqr(d, axis=(0, 1, 2))[0],
stats.iqr(d[:,:,:, 0].ravel()))
assert_equal(stats.iqr(d, axis=(0, 1, 3))[1],
stats.iqr(d[:,:, 1,:].ravel()))
assert_equal(stats.iqr(d, axis=(3, 1, -4))[2],
stats.iqr(d[:,:, 2,:].ravel()))
assert_equal(stats.iqr(d, axis=(3, 1, 2))[2],
stats.iqr(d[2,:,:,:].ravel()))
assert_equal(stats.iqr(d, axis=(3, 2))[2, 1],
stats.iqr(d[2, 1,:,:].ravel()))
assert_equal(stats.iqr(d, axis=(1, -2))[2, 1],
stats.iqr(d[2, :, :, 1].ravel()))
assert_equal(stats.iqr(d, axis=(1, 3))[2, 2],
stats.iqr(d[2, :, 2,:].ravel()))
if NumpyVersion(np.__version__) >= '1.9.0a':
assert_raises(IndexError, stats.iqr, d, axis=4)
else:
assert_raises(ValueError, stats.iqr, d, axis=4)
assert_raises(ValueError, stats.iqr, d, axis=(0, 0))
def test_rng(self):
x = np.arange(5)
assert_equal(stats.iqr(x), 2)
assert_equal(stats.iqr(x, rng=(25, 87.5)), 2.5)
assert_equal(stats.iqr(x, rng=(12.5, 75)), 2.5)
assert_almost_equal(stats.iqr(x, rng=(10, 50)), 1.6) # 3-1.4
assert_raises(ValueError, stats.iqr, x, rng=(0, 101))
assert_raises(ValueError, stats.iqr, x, rng=(np.nan, 25))
assert_raises(TypeError, stats.iqr, x, rng=(0, 50, 60))
def test_interpolation(self):
x = np.arange(5)
y = np.arange(4)
# Default
assert_equal(stats.iqr(x), 2)
assert_equal(stats.iqr(y), 1.5)
if NumpyVersion(np.__version__) >= '1.9.0a':
# Linear
assert_equal(stats.iqr(x, interpolation='linear'), 2)
assert_equal(stats.iqr(y, interpolation='linear'), 1.5)
# Higher
assert_equal(stats.iqr(x, interpolation='higher'), 2)
assert_equal(stats.iqr(x, rng=(25, 80), interpolation='higher'), 3)
assert_equal(stats.iqr(y, interpolation='higher'), 2)
# Lower (will generally, but not always be the same as higher)
assert_equal(stats.iqr(x, interpolation='lower'), 2)
assert_equal(stats.iqr(x, rng=(25, 80), interpolation='lower'), 2)
assert_equal(stats.iqr(y, interpolation='lower'), 2)
# Nearest
assert_equal(stats.iqr(x, interpolation='nearest'), 2)
assert_equal(stats.iqr(y, interpolation='nearest'), 1)
# Midpoint
if NumpyVersion(np.__version__) >= '1.11.0a':
assert_equal(stats.iqr(x, interpolation='midpoint'), 2)
assert_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2.5)
assert_equal(stats.iqr(y, interpolation='midpoint'), 2)
else:
# midpoint did not work correctly before numpy 1.11.0
assert_equal(stats.iqr(x, interpolation='midpoint'), 2)
assert_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2)
assert_equal(stats.iqr(y, interpolation='midpoint'), 2)
else:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# Linear
assert_equal(stats.iqr(x, interpolation='linear'), 2)
assert_equal(stats.iqr(y, interpolation='linear'), 1.5)
# Higher
assert_equal(stats.iqr(x, interpolation='higher'), 2)
assert_almost_equal(stats.iqr(x, rng=(25, 80), interpolation='higher'), 2.2)
assert_equal(stats.iqr(y, interpolation='higher'), 1.5)
# Lower
assert_equal(stats.iqr(x, interpolation='lower'), 2)
assert_almost_equal(stats.iqr(x, rng=(25, 80), interpolation='lower'), 2.2)
assert_equal(stats.iqr(y, interpolation='lower'), 1.5)
# Nearest
assert_equal(stats.iqr(x, interpolation='nearest'), 2)
assert_equal(stats.iqr(y, interpolation='nearest'), 1.5)
# Midpoint
assert_equal(stats.iqr(x, interpolation='midpoint'), 2)
assert_almost_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2.2)
assert_equal(stats.iqr(y, interpolation='midpoint'), 1.5)
_check_warnings(w, RuntimeWarning, 11)
if NumpyVersion(np.__version__) >= '1.9.0a':
assert_raises(ValueError, stats.iqr, x, interpolation='foobar')
else:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert_equal(stats.iqr(x, interpolation='foobar'), 2)
_check_warnings(w, RuntimeWarning, 1)
def test_keepdims(self):
numpy_version = NumpyVersion(np.__version__)
# Also tests most of `axis`
x = np.ones((3, 5, 7, 11))
assert_equal(stats.iqr(x, axis=None, keepdims=False).shape, ())
assert_equal(stats.iqr(x, axis=2, keepdims=False).shape, (3, 5, 11))
assert_equal(stats.iqr(x, axis=(0, 1), keepdims=False).shape, (7, 11))
assert_equal(stats.iqr(x, axis=(0, 3), keepdims=False).shape, (5, 7))
assert_equal(stats.iqr(x, axis=(1,), keepdims=False).shape, (3, 7, 11))
assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=False).shape, ())
assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=False).shape, (7,))
if numpy_version >= '1.9.0a':
assert_equal(stats.iqr(x, axis=None, keepdims=True).shape, (1, 1, 1, 1))
assert_equal(stats.iqr(x, axis=2, keepdims=True).shape, (3, 5, 1, 11))
assert_equal(stats.iqr(x, axis=(0, 1), keepdims=True).shape, (1, 1, 7, 11))
assert_equal(stats.iqr(x, axis=(0, 3), keepdims=True).shape, (1, 5, 7, 1))
assert_equal(stats.iqr(x, axis=(1,), keepdims=True).shape, (3, 1, 7, 11))
assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=True).shape, (1, 1, 1, 1))
assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=True).shape, (1, 1, 7, 1))
else:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert_equal(stats.iqr(x, axis=None, keepdims=True).shape, ())
assert_equal(stats.iqr(x, axis=2, keepdims=True).shape, (3, 5, 11))
assert_equal(stats.iqr(x, axis=(0, 1), keepdims=True).shape, (7, 11))
assert_equal(stats.iqr(x, axis=(0, 3), keepdims=True).shape, (5, 7))
assert_equal(stats.iqr(x, axis=(1,), keepdims=True).shape, (3, 7, 11))
assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=True).shape, ())
assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=True).shape, (7,))
_check_warnings(w, RuntimeWarning, 7)
def test_nanpolicy(self):
numpy_version = NumpyVersion(np.__version__)
x = np.arange(15.0).reshape((3, 5))
# No NaNs
assert_equal(stats.iqr(x, nan_policy='propagate'), 7)
assert_equal(stats.iqr(x, nan_policy='omit'), 7)
assert_equal(stats.iqr(x, nan_policy='raise'), 7)
# Yes NaNs
x[1, 2] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
if numpy_version < '1.10.0a':
# Fails over to mishmash of omit/propagate, but mostly omit
# The first case showcases the "incorrect" behavior of np.percentile
assert_equal(stats.iqr(x, nan_policy='propagate'), 8)
assert_equal(stats.iqr(x, axis=0, nan_policy='propagate'), [5, 5, np.nan, 5, 5])
if numpy_version < '1.9.0a':
assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, 3, 2])
else:
# some fixes to percentile nan handling in 1.9
assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, np.nan, 2])
_check_warnings(w, RuntimeWarning, 3)
else:
assert_equal(stats.iqr(x, nan_policy='propagate'), np.nan)
assert_equal(stats.iqr(x, axis=0, nan_policy='propagate'), [5, 5, np.nan, 5, 5])
assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, np.nan, 2])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
if numpy_version < '1.9.0a':
# Fails over to mishmash of omit/propagate, but mostly omit
assert_equal(stats.iqr(x, nan_policy='omit'), 8)
assert_equal(stats.iqr(x, axis=0, nan_policy='omit'), [5, 5, np.nan, 5, 5])
assert_equal(stats.iqr(x, axis=1, nan_policy='omit'), [2, 3, 2])
_check_warnings(w, RuntimeWarning, 3)
else:
assert_equal(stats.iqr(x, nan_policy='omit'), 7.5)
assert_equal(stats.iqr(x, axis=0, nan_policy='omit'), np.full(5, 5))
assert_equal(stats.iqr(x, axis=1, nan_policy='omit'), [2, 2.5, 2])
assert_raises(ValueError, stats.iqr, x, nan_policy='raise')
assert_raises(ValueError, stats.iqr, x, axis=0, nan_policy='raise')
assert_raises(ValueError, stats.iqr, x, axis=1, nan_policy='raise')
# Bad policy
assert_raises(ValueError, stats.iqr, x, nan_policy='barfood')
def test_scale(self):
numpy_version = NumpyVersion(np.__version__)
x = np.arange(15.0).reshape((3, 5))
# No NaNs
assert_equal(stats.iqr(x, scale='raw'), 7)
assert_almost_equal(stats.iqr(x, scale='normal'), 7 / 1.3489795)
assert_equal(stats.iqr(x, scale=2.0), 3.5)
# Yes NaNs
x[1, 2] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
if numpy_version < '1.10.0a':
# Fails over to mishmash of omit/propagate, but mostly omit
assert_equal(stats.iqr(x, scale='raw', nan_policy='propagate'), 8)
assert_almost_equal(stats.iqr(x, scale='normal',
nan_policy='propagate'),
8 / 1.3489795)
assert_equal(stats.iqr(x, scale=2.0, nan_policy='propagate'), 4)
# axis=1 chosen to show behavior with both nans and without
if numpy_version < '1.9.0a':
assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, 3, 2])
assert_almost_equal(stats.iqr(x, axis=1, scale='normal',
nan_policy='propagate'),
np.array([2, 3, 2]) / 1.3489795)
assert_equal(stats.iqr(x, axis=1, scale=2.0,
nan_policy='propagate'), [1, 1.5, 1])
else:
# some fixes to percentile nan handling in 1.9
assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, np.nan, 2])
assert_almost_equal(stats.iqr(x, axis=1, scale='normal',
nan_policy='propagate'),
np.array([2, np.nan, 2]) / 1.3489795)
assert_equal(stats.iqr(x, axis=1, scale=2.0,
nan_policy='propagate'), [1, np.nan, 1])
_check_warnings(w, RuntimeWarning, 6)
else:
assert_equal(stats.iqr(x, scale='raw', nan_policy='propagate'), np.nan)
assert_equal(stats.iqr(x, scale='normal', nan_policy='propagate'), np.nan)
assert_equal(stats.iqr(x, scale=2.0, nan_policy='propagate'), np.nan)
# axis=1 chosen to show behavior with both nans and without
assert_equal(stats.iqr(x, axis=1, scale='raw',
nan_policy='propagate'), [2, np.nan, 2])
assert_almost_equal(stats.iqr(x, axis=1, scale='normal',
nan_policy='propagate'),
np.array([2, np.nan, 2]) / 1.3489795)
assert_equal(stats.iqr(x, axis=1, scale=2.0, nan_policy='propagate'),
[1, np.nan, 1])
# Since NumPy 1.17.0.dev, warnings are no longer emitted by
# np.percentile with nans, so we don't check the number of
# warnings here. See https://github.com/numpy/numpy/pull/12679.
if numpy_version < '1.9.0a':
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# Fails over to mishmash of omit/propagate, but mostly omit
assert_equal(stats.iqr(x, scale='raw', nan_policy='omit'), 8)
assert_almost_equal(stats.iqr(x, scale='normal', nan_policy='omit'),
8 / 1.3489795)
assert_equal(stats.iqr(x, scale=2.0, nan_policy='omit'), 4)
_check_warnings(w, RuntimeWarning, 3)
else:
assert_equal(stats.iqr(x, scale='raw', nan_policy='omit'), 7.5)
assert_almost_equal(stats.iqr(x, scale='normal', nan_policy='omit'),
7.5 / 1.3489795)
assert_equal(stats.iqr(x, scale=2.0, nan_policy='omit'), 3.75)
# Bad scale
assert_raises(ValueError, stats.iqr, x, scale='foobar')
class TestMoments(object):
"""
Comparison numbers are found using R v.1.5.1
note that length(testcase) = 4
testmathworks comes from documentation for the
Statistics Toolbox for Matlab and can be found at both
https://www.mathworks.com/help/stats/kurtosis.html
https://www.mathworks.com/help/stats/skewness.html
Note that both test cases came from here.
"""
testcase = [1,2,3,4]
scalar_testcase = 4.
np.random.seed(1234)
testcase_moment_accuracy = np.random.rand(42)
testmathworks = [1.165, 0.6268, 0.0751, 0.3516, -0.6965]
def test_moment(self):
# mean((testcase-mean(testcase))**power,axis=0),axis=0))**power))
y = stats.moment(self.scalar_testcase)
assert_approx_equal(y, 0.0)
y = stats.moment(self.testcase, 0)
assert_approx_equal(y, 1.0)
y = stats.moment(self.testcase, 1)
assert_approx_equal(y, 0.0, 10)
y = stats.moment(self.testcase, 2)
assert_approx_equal(y, 1.25)
y = stats.moment(self.testcase, 3)
assert_approx_equal(y, 0.0)
y = stats.moment(self.testcase, 4)
assert_approx_equal(y, 2.5625)
# check array_like input for moment
y = stats.moment(self.testcase, [1, 2, 3, 4])
assert_allclose(y, [0, 1.25, 0, 2.5625])
# check moment input consists only of integers
y = stats.moment(self.testcase, 0.0)
assert_approx_equal(y, 1.0)
assert_raises(ValueError, stats.moment, self.testcase, 1.2)
y = stats.moment(self.testcase, [1.0, 2, 3, 4.0])
assert_allclose(y, [0, 1.25, 0, 2.5625])
# test empty input
y = stats.moment([])
assert_equal(y, np.nan)
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.moment(x, 2), np.nan)
assert_almost_equal(stats.moment(x, nan_policy='omit'), 0.0)
assert_raises(ValueError, stats.moment, x, nan_policy='raise')
assert_raises(ValueError, stats.moment, x, nan_policy='foobar')
def test_moment_propagate_nan(self):
# Check that the shape of the result is the same for inputs
# with and without nans, cf gh-5817
a = np.arange(8).reshape(2, -1).astype(float)
a[1, 0] = np.nan
mm = stats.moment(a, 2, axis=1, nan_policy="propagate")
np.testing.assert_allclose(mm, [1.25, np.nan], atol=1e-15)
def test_variation(self):
# variation = samplestd / mean
y = stats.variation(self.scalar_testcase)
assert_approx_equal(y, 0.0)
y = stats.variation(self.testcase)
assert_approx_equal(y, 0.44721359549996, 10)
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.variation(x), np.nan)
assert_almost_equal(stats.variation(x, nan_policy='omit'),
0.6454972243679028)
assert_raises(ValueError, stats.variation, x, nan_policy='raise')
assert_raises(ValueError, stats.variation, x, nan_policy='foobar')
def test_variation_propagate_nan(self):
# Check that the shape of the result is the same for inputs
# with and without nans, cf gh-5817
a = np.arange(8).reshape(2, -1).astype(float)
a[1, 0] = np.nan
vv = stats.variation(a, axis=1, nan_policy="propagate")
np.testing.assert_allclose(vv, [0.7453559924999299, np.nan], atol=1e-15)
def test_skewness(self):
# Scalar test case
y = stats.skew(self.scalar_testcase)
assert_approx_equal(y, 0.0)
# sum((testmathworks-mean(testmathworks,axis=0))**3,axis=0) /
# ((sqrt(var(testmathworks)*4/5))**3)/5
y = stats.skew(self.testmathworks)
assert_approx_equal(y, -0.29322304336607, 10)
y = stats.skew(self.testmathworks, bias=0)
assert_approx_equal(y, -0.437111105023940, 10)
y = stats.skew(self.testcase)
assert_approx_equal(y, 0.0, 10)
x = np.arange(10.)
x[9] = np.nan
with np.errstate(invalid='ignore'):
assert_equal(stats.skew(x), np.nan)
assert_equal(stats.skew(x, nan_policy='omit'), 0.)
assert_raises(ValueError, stats.skew, x, nan_policy='raise')
assert_raises(ValueError, stats.skew, x, nan_policy='foobar')
def test_skewness_scalar(self):
# `skew` must return a scalar for 1-dim input
assert_equal(stats.skew(arange(10)), 0.0)
def test_skew_propagate_nan(self):
# Check that the shape of the result is the same for inputs
# with and without nans, cf gh-5817
a = np.arange(8).reshape(2, -1).astype(float)
a[1, 0] = np.nan
with np.errstate(invalid='ignore'):
s = stats.skew(a, axis=1, nan_policy="propagate")
np.testing.assert_allclose(s, [0, np.nan], atol=1e-15)
def test_kurtosis(self):
# Scalar test case
y = stats.kurtosis(self.scalar_testcase)
assert_approx_equal(y, -3.0)
# sum((testcase-mean(testcase,axis=0))**4,axis=0)/((sqrt(var(testcase)*3/4))**4)/4
# sum((test2-mean(testmathworks,axis=0))**4,axis=0)/((sqrt(var(testmathworks)*4/5))**4)/5
# Set flags for axis = 0 and
# fisher=0 (Pearson's defn of kurtosis for compatibility with Matlab)
y = stats.kurtosis(self.testmathworks, 0, fisher=0, bias=1)
assert_approx_equal(y, 2.1658856802973, 10)
# Note that MATLAB has confusing docs for the following case
# kurtosis(x,0) gives an unbiased estimate of Pearson's skewness
# kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)
# The MATLAB docs imply that both should give Fisher's
y = stats.kurtosis(self.testmathworks, fisher=0, bias=0)
assert_approx_equal(y, 3.663542721189047, 10)
y = stats.kurtosis(self.testcase, 0, 0)
assert_approx_equal(y, 1.64)
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.kurtosis(x), np.nan)
assert_almost_equal(stats.kurtosis(x, nan_policy='omit'), -1.230000)
assert_raises(ValueError, stats.kurtosis, x, nan_policy='raise')
assert_raises(ValueError, stats.kurtosis, x, nan_policy='foobar')
def test_kurtosis_array_scalar(self):
assert_equal(type(stats.kurtosis([1,2,3])), float)
def test_kurtosis_propagate_nan(self):
# Check that the shape of the result is the same for inputs
# with and without nans, cf gh-5817
a = np.arange(8).reshape(2, -1).astype(float)
a[1, 0] = np.nan
k = stats.kurtosis(a, axis=1, nan_policy="propagate")
np.testing.assert_allclose(k, [-1.36, np.nan], atol=1e-15)
def test_moment_accuracy(self):
# 'moment' must have a small enough error compared to the slower
# but very accurate numpy.power() implementation.
tc_no_mean = self.testcase_moment_accuracy - \
np.mean(self.testcase_moment_accuracy)
assert_allclose(np.power(tc_no_mean, 42).mean(),
stats.moment(self.testcase_moment_accuracy, 42))
class TestStudentTest(object):
X1 = np.array([-1, 0, 1])
X2 = np.array([0, 1, 2])
T1_0 = 0
P1_0 = 1
T1_1 = -1.732051
P1_1 = 0.2254033
T1_2 = -3.464102
P1_2 = 0.0741799
T2_0 = 1.732051
P2_0 = 0.2254033
def test_onesample(self):
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
t, p = stats.ttest_1samp(4., 3.)
assert_(np.isnan(t))
assert_(np.isnan(p))
t, p = stats.ttest_1samp(self.X1, 0)
assert_array_almost_equal(t, self.T1_0)
assert_array_almost_equal(p, self.P1_0)
res = stats.ttest_1samp(self.X1, 0)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
t, p = stats.ttest_1samp(self.X2, 0)
assert_array_almost_equal(t, self.T2_0)
assert_array_almost_equal(p, self.P2_0)
t, p = stats.ttest_1samp(self.X1, 1)
assert_array_almost_equal(t, self.T1_1)
assert_array_almost_equal(p, self.P1_1)
t, p = stats.ttest_1samp(self.X1, 2)
assert_array_almost_equal(t, self.T1_2)
assert_array_almost_equal(p, self.P1_2)
# check nan policy
np.random.seed(7654567)
x = stats.norm.rvs(loc=5, scale=10, size=51)
x[50] = np.nan
with np.errstate(invalid="ignore"):
assert_array_equal(stats.ttest_1samp(x, 5.0), (np.nan, np.nan))
assert_array_almost_equal(stats.ttest_1samp(x, 5.0, nan_policy='omit'),
(-1.6412624074367159, 0.107147027334048005))
assert_raises(ValueError, stats.ttest_1samp, x, 5.0, nan_policy='raise')
assert_raises(ValueError, stats.ttest_1samp, x, 5.0,
nan_policy='foobar')
def test_percentileofscore():
pcos = stats.percentileofscore
assert_equal(pcos([1,2,3,4,5,6,7,8,9,10],4), 40.0)
for (kind, result) in [('mean', 35.0),
('strict', 30.0),
('weak', 40.0)]:
assert_equal(pcos(np.arange(10) + 1, 4, kind=kind), result)
# multiple - 2
for (kind, result) in [('rank', 45.0),
('strict', 30.0),
('weak', 50.0),
('mean', 40.0)]:
assert_equal(pcos([1,2,3,4,4,5,6,7,8,9], 4, kind=kind), result)
# multiple - 3
assert_equal(pcos([1,2,3,4,4,4,5,6,7,8], 4), 50.0)
for (kind, result) in [('rank', 50.0),
('mean', 45.0),
('strict', 30.0),
('weak', 60.0)]:
assert_equal(pcos([1,2,3,4,4,4,5,6,7,8], 4, kind=kind), result)
# missing
for kind in ('rank', 'mean', 'strict', 'weak'):
assert_equal(pcos([1,2,3,5,6,7,8,9,10,11], 4, kind=kind), 30)
# larger numbers
for (kind, result) in [('mean', 35.0),
('strict', 30.0),
('weak', 40.0)]:
assert_equal(
pcos([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 40,
kind=kind), result)
for (kind, result) in [('mean', 45.0),
('strict', 30.0),
('weak', 60.0)]:
assert_equal(
pcos([10, 20, 30, 40, 40, 40, 50, 60, 70, 80],
40, kind=kind), result)
for kind in ('rank', 'mean', 'strict', 'weak'):
assert_equal(
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
40, kind=kind), 30.0)
# boundaries
for (kind, result) in [('rank', 10.0),
('mean', 5.0),
('strict', 0.0),
('weak', 10.0)]:
assert_equal(
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
10, kind=kind), result)
for (kind, result) in [('rank', 100.0),
('mean', 95.0),
('strict', 90.0),
('weak', 100.0)]:
assert_equal(
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
110, kind=kind), result)
# out of bounds
for (kind, score, result) in [('rank', 200, 100.0),
('mean', 200, 100.0),
('mean', 0, 0.0)]:
assert_equal(
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
score, kind=kind), result)
assert_raises(ValueError, pcos, [1, 2, 3, 3, 4], 3, kind='unrecognized')
PowerDivCase = namedtuple('Case', ['f_obs', 'f_exp', 'ddof', 'axis',
'chi2', # Pearson's
'log', # G-test (log-likelihood)
'mod_log', # Modified log-likelihood
'cr', # Cressie-Read (lambda=2/3)
])
# The details of the first two elements in power_div_1d_cases are used
# in a test in TestPowerDivergence. Check that code before making
# any changes here.
power_div_1d_cases = [
# Use the default f_exp.
PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=None, ddof=0, axis=None,
chi2=4,
log=2*(4*np.log(4/8) + 12*np.log(12/8)),
mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)),
cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)),
# Give a non-uniform f_exp.
PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=[2, 16, 12, 2], ddof=0, axis=None,
chi2=24,
log=2*(4*np.log(4/2) + 8*np.log(8/16) + 8*np.log(8/2)),
mod_log=2*(2*np.log(2/4) + 16*np.log(16/8) + 2*np.log(2/8)),
cr=(4*((4/2)**(2/3) - 1) + 8*((8/16)**(2/3) - 1) +
8*((8/2)**(2/3) - 1))/(5/9)),
# f_exp is a scalar.
PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=8, ddof=0, axis=None,
chi2=4,
log=2*(4*np.log(4/8) + 12*np.log(12/8)),
mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)),
cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)),
# f_exp equal to f_obs.
PowerDivCase(f_obs=[3, 5, 7, 9], f_exp=[3, 5, 7, 9], ddof=0, axis=0,
chi2=0, log=0, mod_log=0, cr=0),
]
power_div_empty_cases = [
# Shape is (0,)--a data set with length 0. The computed
# test statistic should be 0.
PowerDivCase(f_obs=[],
f_exp=None, ddof=0, axis=0,
chi2=0, log=0, mod_log=0, cr=0),
# Shape is (0, 3). This is 3 data sets, but each data set has
# length 0, so the computed test statistic should be [0, 0, 0].
PowerDivCase(f_obs=np.array([[],[],[]]).T,
f_exp=None, ddof=0, axis=0,
chi2=[0, 0, 0],
log=[0, 0, 0],
mod_log=[0, 0, 0],
cr=[0, 0, 0]),
# Shape is (3, 0). This represents an empty collection of
# data sets in which each data set has length 3. The test
# statistic should be an empty array.
PowerDivCase(f_obs=np.array([[],[],[]]),
f_exp=None, ddof=0, axis=0,
chi2=[],
log=[],
mod_log=[],
cr=[]),
]
class TestPowerDivergence(object):
def check_power_divergence(self, f_obs, f_exp, ddof, axis, lambda_,
expected_stat):
f_obs = np.asarray(f_obs)
if axis is None:
num_obs = f_obs.size
else:
b = np.broadcast(f_obs, f_exp)
num_obs = b.shape[axis]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice")
stat, p = stats.power_divergence(
f_obs=f_obs, f_exp=f_exp, ddof=ddof,
axis=axis, lambda_=lambda_)
assert_allclose(stat, expected_stat)
if lambda_ == 1 or lambda_ == "pearson":
# Also test stats.chisquare.
stat, p = stats.chisquare(f_obs=f_obs, f_exp=f_exp, ddof=ddof,
axis=axis)
assert_allclose(stat, expected_stat)
ddof = np.asarray(ddof)
expected_p = stats.distributions.chi2.sf(expected_stat,
num_obs - 1 - ddof)
assert_allclose(p, expected_p)
def test_basic(self):
for case in power_div_1d_cases:
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
None, case.chi2)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"pearson", case.chi2)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
1, case.chi2)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"log-likelihood", case.log)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"mod-log-likelihood", case.mod_log)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"cressie-read", case.cr)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
2/3, case.cr)
def test_basic_masked(self):
for case in power_div_1d_cases:
mobs = np.ma.array(case.f_obs)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
None, case.chi2)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
"pearson", case.chi2)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
1, case.chi2)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
"log-likelihood", case.log)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
"mod-log-likelihood", case.mod_log)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
"cressie-read", case.cr)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
2/3, case.cr)
def test_axis(self):
case0 = power_div_1d_cases[0]
case1 = power_div_1d_cases[1]
f_obs = np.vstack((case0.f_obs, case1.f_obs))
f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs),
case1.f_exp))
# Check the four computational code paths in power_divergence
# using a 2D array with axis=1.
self.check_power_divergence(
f_obs, f_exp, 0, 1,
"pearson", [case0.chi2, case1.chi2])
self.check_power_divergence(
f_obs, f_exp, 0, 1,
"log-likelihood", [case0.log, case1.log])
self.check_power_divergence(
f_obs, f_exp, 0, 1,
"mod-log-likelihood", [case0.mod_log, case1.mod_log])
self.check_power_divergence(
f_obs, f_exp, 0, 1,
"cressie-read", [case0.cr, case1.cr])
# Reshape case0.f_obs to shape (2,2), and use axis=None.
# The result should be the same.
self.check_power_divergence(
np.array(case0.f_obs).reshape(2, 2), None, 0, None,
"pearson", case0.chi2)
def test_ddof_broadcasting(self):
# Test that ddof broadcasts correctly.
# ddof does not affect the test statistic. It is broadcast
# with the computed test statistic for the computation of
# the p value.
case0 = power_div_1d_cases[0]
case1 = power_div_1d_cases[1]
# Create 4x2 arrays of observed and expected frequencies.
f_obs = np.vstack((case0.f_obs, case1.f_obs)).T
f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs),
case1.f_exp)).T
expected_chi2 = [case0.chi2, case1.chi2]
# ddof has shape (2, 1). This is broadcast with the computed
# statistic, so p will have shape (2,2).
ddof = np.array([[0], [1]])
stat, p = stats.power_divergence(f_obs, f_exp, ddof=ddof)
assert_allclose(stat, expected_chi2)
# Compute the p values separately, passing in scalars for ddof.
stat0, p0 = stats.power_divergence(f_obs, f_exp, ddof=ddof[0,0])
stat1, p1 = stats.power_divergence(f_obs, f_exp, ddof=ddof[1,0])
assert_array_equal(p, np.vstack((p0, p1)))
def test_empty_cases(self):
with warnings.catch_warnings():
for case in power_div_empty_cases:
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"pearson", case.chi2)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"log-likelihood", case.log)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"mod-log-likelihood", case.mod_log)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"cressie-read", case.cr)
def test_power_divergence_result_attributes(self):
f_obs = power_div_1d_cases[0].f_obs
f_exp = power_div_1d_cases[0].f_exp
ddof = power_div_1d_cases[0].ddof
axis = power_div_1d_cases[0].axis
res = stats.power_divergence(f_obs=f_obs, f_exp=f_exp, ddof=ddof,
axis=axis, lambda_="pearson")
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_chisquare_masked_arrays():
# Test masked arrays.
obs = np.array([[8, 8, 16, 32, -1], [-1, -1, 3, 4, 5]]).T
mask = np.array([[0, 0, 0, 0, 1], [1, 1, 0, 0, 0]]).T
mobs = np.ma.masked_array(obs, mask)
expected_chisq = np.array([24.0, 0.5])
expected_g = np.array([2*(2*8*np.log(0.5) + 32*np.log(2.0)),
2*(3*np.log(0.75) + 5*np.log(1.25))])
chi2 = stats.distributions.chi2
chisq, p = stats.chisquare(mobs)
mat.assert_array_equal(chisq, expected_chisq)
mat.assert_array_almost_equal(p, chi2.sf(expected_chisq,
mobs.count(axis=0) - 1))
g, p = stats.power_divergence(mobs, lambda_='log-likelihood')
mat.assert_array_almost_equal(g, expected_g, decimal=15)
mat.assert_array_almost_equal(p, chi2.sf(expected_g,
mobs.count(axis=0) - 1))
chisq, p = stats.chisquare(mobs.T, axis=1)
mat.assert_array_equal(chisq, expected_chisq)
mat.assert_array_almost_equal(p, chi2.sf(expected_chisq,
mobs.T.count(axis=1) - 1))
g, p = stats.power_divergence(mobs.T, axis=1, lambda_="log-likelihood")
mat.assert_array_almost_equal(g, expected_g, decimal=15)
mat.assert_array_almost_equal(p, chi2.sf(expected_g,
mobs.count(axis=0) - 1))
obs1 = np.ma.array([3, 5, 6, 99, 10], mask=[0, 0, 0, 1, 0])
exp1 = np.ma.array([2, 4, 8, 10, 99], mask=[0, 0, 0, 0, 1])
chi2, p = stats.chisquare(obs1, f_exp=exp1)
# Because of the mask at index 3 of obs1 and at index 4 of exp1,
# only the first three elements are included in the calculation
# of the statistic.
mat.assert_array_equal(chi2, 1/2 + 1/4 + 4/8)
# When axis=None, the two values should have type np.float64.
chisq, p = stats.chisquare(np.ma.array([1,2,3]), axis=None)
assert_(isinstance(chisq, np.float64))
assert_(isinstance(p, np.float64))
assert_equal(chisq, 1.0)
assert_almost_equal(p, stats.distributions.chi2.sf(1.0, 2))
# Empty arrays:
# A data set with length 0 returns a masked scalar.
with np.errstate(invalid='ignore'):
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice")
chisq, p = stats.chisquare(np.ma.array([]))
assert_(isinstance(chisq, np.ma.MaskedArray))
assert_equal(chisq.shape, ())
assert_(chisq.mask)
empty3 = np.ma.array([[],[],[]])
# empty3 is a collection of 0 data sets (whose lengths would be 3, if
# there were any), so the return value is an array with length 0.
chisq, p = stats.chisquare(empty3)
assert_(isinstance(chisq, np.ma.MaskedArray))
mat.assert_array_equal(chisq, [])
# empty3.T is an array containing 3 data sets, each with length 0,
# so an array of size (3,) is returned, with all values masked.
with np.errstate(invalid='ignore'):
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice")
chisq, p = stats.chisquare(empty3.T)
assert_(isinstance(chisq, np.ma.MaskedArray))
assert_equal(chisq.shape, (3,))
assert_(np.all(chisq.mask))
def test_power_divergence_against_cressie_read_data():
# Test stats.power_divergence against tables 4 and 5 from
# Cressie and Read, "Multimonial Goodness-of-Fit Tests",
# J. R. Statist. Soc. B (1984), Vol 46, No. 3, pp. 440-464.
# This tests the calculation for several values of lambda.
# `table4` holds just the second and third columns from Table 4.
table4 = np.array([
# observed, expected,
15, 15.171,
11, 13.952,
14, 12.831,
17, 11.800,
5, 10.852,
11, 9.9796,
10, 9.1777,
4, 8.4402,
8, 7.7620,
10, 7.1383,
7, 6.5647,
9, 6.0371,
11, 5.5520,
3, 5.1059,
6, 4.6956,
1, 4.3183,
1, 3.9713,
4, 3.6522,
]).reshape(-1, 2)
table5 = np.array([
# lambda, statistic
-10.0, 72.2e3,
-5.0, 28.9e1,
-3.0, 65.6,
-2.0, 40.6,
-1.5, 34.0,
-1.0, 29.5,
-0.5, 26.5,
0.0, 24.6,
0.5, 23.4,
0.67, 23.1,
1.0, 22.7,
1.5, 22.6,
2.0, 22.9,
3.0, 24.8,
5.0, 35.5,
10.0, 21.4e1,
]).reshape(-1, 2)
for lambda_, expected_stat in table5:
stat, p = stats.power_divergence(table4[:,0], table4[:,1],
lambda_=lambda_)
assert_allclose(stat, expected_stat, rtol=5e-3)
def test_friedmanchisquare():
# see ticket:113
# verified with matlab and R
# From Demsar "Statistical Comparisons of Classifiers over Multiple Data Sets"
# 2006, Xf=9.28 (no tie handling, tie corrected Xf >=9.28)
x1 = [array([0.763, 0.599, 0.954, 0.628, 0.882, 0.936, 0.661, 0.583,
0.775, 1.0, 0.94, 0.619, 0.972, 0.957]),
array([0.768, 0.591, 0.971, 0.661, 0.888, 0.931, 0.668, 0.583,
0.838, 1.0, 0.962, 0.666, 0.981, 0.978]),
array([0.771, 0.590, 0.968, 0.654, 0.886, 0.916, 0.609, 0.563,
0.866, 1.0, 0.965, 0.614, 0.9751, 0.946]),
array([0.798, 0.569, 0.967, 0.657, 0.898, 0.931, 0.685, 0.625,
0.875, 1.0, 0.962, 0.669, 0.975, 0.970])]
# From "Bioestadistica para las ciencias de la salud" Xf=18.95 p<0.001:
x2 = [array([4,3,5,3,5,3,2,5,4,4,4,3]),
array([2,2,1,2,3,1,2,3,2,1,1,3]),
array([2,4,3,3,4,3,3,4,4,1,2,1]),
array([3,5,4,3,4,4,3,3,3,4,4,4])]
# From Jerrorl H. Zar, "Biostatistical Analysis"(example 12.6), Xf=10.68, 0.005 < p < 0.01:
# Probability from this example is inexact using Chisquare approximation of Friedman Chisquare.
x3 = [array([7.0,9.9,8.5,5.1,10.3]),
array([5.3,5.7,4.7,3.5,7.7]),
array([4.9,7.6,5.5,2.8,8.4]),
array([8.8,8.9,8.1,3.3,9.1])]
assert_array_almost_equal(stats.friedmanchisquare(x1[0],x1[1],x1[2],x1[3]),
(10.2283464566929, 0.0167215803284414))
assert_array_almost_equal(stats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]),
(18.9428571428571, 0.000280938375189499))
assert_array_almost_equal(stats.friedmanchisquare(x3[0],x3[1],x3[2],x3[3]),
(10.68, 0.0135882729582176))
assert_raises(ValueError, stats.friedmanchisquare,x3[0],x3[1])
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.friedmanchisquare(*x1)
check_named_results(res, attributes)
# test using mstats
assert_array_almost_equal(mstats.friedmanchisquare(x1[0], x1[1],
x1[2], x1[3]),
(10.2283464566929, 0.0167215803284414))
# the following fails
# assert_array_almost_equal(mstats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]),
# (18.9428571428571, 0.000280938375189499))
assert_array_almost_equal(mstats.friedmanchisquare(x3[0], x3[1],
x3[2], x3[3]),
(10.68, 0.0135882729582176))
assert_raises(ValueError, mstats.friedmanchisquare,x3[0],x3[1])
def test_kstest():
# comparing with values from R
x = np.linspace(-1,1,9)
D,p = stats.kstest(x,'norm')
assert_almost_equal(D, 0.15865525393145705, 12)
assert_almost_equal(p, 0.95164069201518386, 1)
x = np.linspace(-15,15,9)
D,p = stats.kstest(x,'norm')
assert_almost_equal(D, 0.44435602715924361, 15)
assert_almost_equal(p, 0.038850140086788665, 8)
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.kstest(x, 'norm')
check_named_results(res, attributes)
# the following tests rely on deterministicaly replicated rvs
np.random.seed(987654321)
x = stats.norm.rvs(loc=0.2, size=100)
D,p = stats.kstest(x, 'norm', mode='asymp')
assert_almost_equal(D, 0.12464329735846891, 15)
assert_almost_equal(p, 0.089444888711820769, 15)
assert_almost_equal(np.array(stats.kstest(x, 'norm', mode='asymp')),
np.array((0.12464329735846891, 0.089444888711820769)), 15)
assert_almost_equal(np.array(stats.kstest(x,'norm', alternative='less')),
np.array((0.12464329735846891, 0.040989164077641749)), 15)
# this 'greater' test fails with precision of decimal=14
assert_almost_equal(np.array(stats.kstest(x,'norm', alternative='greater')),
np.array((0.0072115233216310994, 0.98531158590396228)), 12)
# missing: no test that uses *args
class TestKSTwoSamples(object):
def _testOne(self, x1, x2, alternative, expected_statistic, expected_prob, mode='auto'):
result = stats.ks_2samp(x1, x2, alternative, mode=mode)
expected = np.array([expected_statistic, expected_prob])
assert_array_almost_equal(np.array(result), expected)
def testSmall(self):
self._testOne([0], [1], 'two-sided', 1.0/1, 1.0)
self._testOne([0], [1], 'greater', 1.0/1, 0.5)
self._testOne([0], [1], 'less', 0.0/1, 1.0)
self._testOne([1], [0], 'two-sided', 1.0/1, 1.0)
self._testOne([1], [0], 'greater', 0.0/1, 1.0)
self._testOne([1], [0], 'less', 1.0/1, 0.5)
def testTwoVsThree(self):
data1 = np.array([1.0, 2.0])
data1p = data1 + 0.01
data1m = data1 - 0.01
data2 = np.array([1.0, 2.0, 3.0])
self._testOne(data1p, data2, 'two-sided', 1.0 / 3, 1.0)
self._testOne(data1p, data2, 'greater', 1.0 / 3, 0.7)
self._testOne(data1p, data2, 'less', 1.0 / 3, 0.7)
self._testOne(data1m, data2, 'two-sided', 2.0 / 3, 0.6)
self._testOne(data1m, data2, 'greater', 2.0 / 3, 0.3)
self._testOne(data1m, data2, 'less', 0, 1.0)
def testTwoVsFour(self):
data1 = np.array([1.0, 2.0])
data1p = data1 + 0.01
data1m = data1 - 0.01
data2 = np.array([1.0, 2.0, 3.0, 4.0])
self._testOne(data1p, data2, 'two-sided', 2.0 / 4, 14.0/15)
self._testOne(data1p, data2, 'greater', 2.0 / 4, 8.0/15)
self._testOne(data1p, data2, 'less', 1.0 / 4, 12.0/15)
self._testOne(data1m, data2, 'two-sided', 3.0 / 4, 6.0/15)
self._testOne(data1m, data2, 'greater', 3.0 / 4, 3.0/15)
self._testOne(data1m, data2, 'less', 0, 1.0)
def test100_100(self):
x100 = np.linspace(1, 100, 100)
x100_2_p1 = x100 + 2 + 0.1
x100_2_m1 = x100 + 2 - 0.1
self._testOne(x100, x100_2_p1, 'two-sided', 3.0 / 100, 0.9999999999962055)
self._testOne(x100, x100_2_p1, 'greater', 3.0 / 100, 0.9143290114276248)
self._testOne(x100, x100_2_p1, 'less', 0, 1.0)
self._testOne(x100, x100_2_m1, 'two-sided', 2.0 / 100, 1.0)
self._testOne(x100, x100_2_m1, 'greater', 2.0 / 100, 0.960978450786184)
self._testOne(x100, x100_2_m1, 'less', 0, 1.0)
def test100_110(self):
x100 = np.linspace(1, 100, 100)
x110 = np.linspace(1, 100, 110)
x110_20_p1 = x110 + 20 + 0.1
x110_20_m1 = x110 + 20 - 0.1
# 100, 110
self._testOne(x100, x110_20_p1, 'two-sided', 232.0 / 1100, 0.015739183865607353)
self._testOne(x100, x110_20_p1, 'greater', 232.0 / 1100, 0.007869594319053203)
self._testOne(x100, x110_20_p1, 'less', 0, 1)
self._testOne(x100, x110_20_m1, 'two-sided', 229.0 / 1100, 0.017803803861026313)
self._testOne(x100, x110_20_m1, 'greater', 229.0 / 1100, 0.008901905958245056)
self._testOne(x100, x110_20_m1, 'less', 0.0, 1.0)
def testRepeatedValues(self):
x2233 = np.array([2] * 3 + [3] * 4 + [5] * 5 + [6] * 4, dtype=int)
x3344 = x2233 + 1
x2356 = np.array([2] * 3 + [3] * 4 + [5] * 10 + [6] * 4, dtype=int)
x3467 = np.array([3] * 10 + [4] * 2 + [6] * 10 + [7] * 4, dtype=int)
self._testOne(x2233, x3344, 'two-sided', 5.0/16, 0.4262934613454952)
self._testOne(x2233, x3344, 'greater', 5.0/16, 0.21465428276573786)
self._testOne(x2233, x3344, 'less', 0.0/16, 1.0)
self._testOne(x2356, x3467, 'two-sided', 190.0/21/26, 0.0919245790168125)
self._testOne(x2356, x3467, 'greater', 190.0/21/26, 0.0459633806858544)
self._testOne(x2356, x3467, 'less', 70.0/21/26, 0.6121593130022775)
def testEqualSizes(self):
data2 = np.array([1.0, 2.0, 3.0])
self._testOne(data2, data2+1, 'two-sided', 1.0/3, 1.0)
self._testOne(data2, data2+1, 'greater', 1.0/3, 0.75)
self._testOne(data2, data2+1, 'less', 0.0/3, 1.)
self._testOne(data2, data2+0.5, 'two-sided', 1.0/3, 1.0)
self._testOne(data2, data2+0.5, 'greater', 1.0/3, 0.75)
self._testOne(data2, data2+0.5, 'less', 0.0/3, 1.)
self._testOne(data2, data2-0.5, 'two-sided', 1.0/3, 1.0)
self._testOne(data2, data2-0.5, 'greater', 0.0/3, 1.0)
self._testOne(data2, data2-0.5, 'less', 1.0/3, 0.75)
def testMiddlingBoth(self):
# 500, 600
n1, n2 = 500, 600
delta = 1.0/n1/n2/2/2
x = np.linspace(1, 200, n1) - delta
y = np.linspace(2, 200, n2)
self._testOne(x, y, 'two-sided', 2000.0 / n1 / n2, 1.0, mode='auto')
self._testOne(x, y, 'two-sided', 2000.0 / n1 / n2, 1.0, mode='asymp')
self._testOne(x, y, 'greater', 2000.0 / n1 / n2, 0.9697596024683929, mode='asymp')
self._testOne(x, y, 'less', 500.0 / n1 / n2, 0.9968735843165021, mode='asymp')
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "ks_2samp: Exact calculation overflowed. Switching to mode=asymp")
self._testOne(x, y, 'greater', 2000.0 / n1 / n2, 0.9697596024683929, mode='exact')
self._testOne(x, y, 'less', 500.0 / n1 / n2, 0.9968735843165021, mode='exact')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self._testOne(x, y, 'less', 500.0 / n1 / n2, 0.9968735843165021, mode='exact')
_check_warnings(w, RuntimeWarning, 1)
def testMediumBoth(self):
# 1000, 1100
n1, n2 = 1000, 1100
delta = 1.0/n1/n2/2/2
x = np.linspace(1, 200, n1) - delta
y = np.linspace(2, 200, n2)
self._testOne(x, y, 'two-sided', 6600.0 / n1 / n2, 1.0, mode='asymp')
self._testOne(x, y, 'two-sided', 6600.0 / n1 / n2, 1.0, mode='auto')
self._testOne(x, y, 'greater', 6600.0 / n1 / n2, 0.9573185808092622, mode='asymp')
self._testOne(x, y, 'less', 1000.0 / n1 / n2, 0.9982410869433984, mode='asymp')
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "ks_2samp: Exact calculation overflowed. Switching to mode=asymp")
self._testOne(x, y, 'greater', 6600.0 / n1 / n2, 0.9573185808092622, mode='exact')
self._testOne(x, y, 'less', 1000.0 / n1 / n2, 0.9982410869433984, mode='exact')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self._testOne(x, y, 'less', 1000.0 / n1 / n2, 0.9982410869433984, mode='exact')
_check_warnings(w, RuntimeWarning, 1)
def testLarge(self):
# 10000, 110
n1, n2 = 10000, 110
lcm = n1*11.0
delta = 1.0/n1/n2/2/2
x = np.linspace(1, 200, n1) - delta
y = np.linspace(2, 100, n2)
self._testOne(x, y, 'two-sided', 55275.0 / lcm, 4.2188474935755949e-15)
self._testOne(x, y, 'greater', 561.0 / lcm, 0.99115454582047591)
self._testOne(x, y, 'less', 55275.0 / lcm, 3.1317328311518713e-26)
@pytest.mark.slow
def testLargeBoth(self):
# 10000, 11000
n1, n2 = 10000, 11000
lcm = n1*11.0
delta = 1.0/n1/n2/2/2
x = np.linspace(1, 200, n1) - delta
y = np.linspace(2, 200, n2)
self._testOne(x, y, 'two-sided', 563.0 / lcm, 0.99915729949018561, mode='asymp')
self._testOne(x, y, 'two-sided', 563.0 / lcm, 1.0, mode='exact')
self._testOne(x, y, 'two-sided', 563.0 / lcm, 0.99915729949018561, mode='auto')
self._testOne(x, y, 'greater', 563.0 / lcm, 0.7561851877420673)
self._testOne(x, y, 'less', 10.0 / lcm, 0.9998239693191724)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "ks_2samp: Exact calculation overflowed. Switching to mode=asymp")
self._testOne(x, y, 'greater', 563.0 / lcm, 0.7561851877420673, mode='exact')
self._testOne(x, y, 'less', 10.0 / lcm, 0.9998239693191724, mode='exact')
def testNamedAttributes(self):
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.ks_2samp([1, 2], [3])
check_named_results(res, attributes)
def test_some_code_paths(self):
# Check that some code paths are executed
from scipy.stats.stats import _count_paths_outside_method, _compute_prob_inside_method
_compute_prob_inside_method(1, 1, 1, 1)
_count_paths_outside_method(1000, 1, 1, 1001)
assert_raises(FloatingPointError, _count_paths_outside_method, 1100, 1099, 1, 1)
assert_raises(FloatingPointError, _count_paths_outside_method, 2000, 1000, 1, 1)
def test_argument_checking(self):
# Check that an empty array causes a ValueError
assert_raises(ValueError, stats.ks_2samp, [], [1])
assert_raises(ValueError, stats.ks_2samp, [1], [])
assert_raises(ValueError, stats.ks_2samp, [], [])
def test_ttest_rel():
# regression test
tr,pr = 0.81248591389165692, 0.41846234511362157
tpr = ([tr,-tr],[pr,pr])
rvs1 = np.linspace(1,100,100)
rvs2 = np.linspace(1.01,99.989,100)
rvs1_2D = np.array([np.linspace(1,100,100), np.linspace(1.01,99.989,100)])
rvs2_2D = np.array([np.linspace(1.01,99.989,100), np.linspace(1,100,100)])
t,p = stats.ttest_rel(rvs1, rvs2, axis=0)
assert_array_almost_equal([t,p],(tr,pr))
t,p = stats.ttest_rel(rvs1_2D.T, rvs2_2D.T, axis=0)
assert_array_almost_equal([t,p],tpr)
t,p = stats.ttest_rel(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal([t,p],tpr)
# test scalars
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
t, p = stats.ttest_rel(4., 3.)
assert_(np.isnan(t))
assert_(np.isnan(p))
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.ttest_rel(rvs1, rvs2, axis=0)
check_named_results(res, attributes)
# test on 3 dimensions
rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
t,p = stats.ttest_rel(rvs1_3D, rvs2_3D, axis=1)
assert_array_almost_equal(np.abs(t), tr)
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
t,p = stats.ttest_rel(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2)
assert_array_almost_equal(np.abs(t), tr)
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
# check nan policy
np.random.seed(12345678)
x = stats.norm.rvs(loc=5, scale=10, size=501)
x[500] = np.nan
y = (stats.norm.rvs(loc=5, scale=10, size=501) +
stats.norm.rvs(scale=0.2, size=501))
y[500] = np.nan
with np.errstate(invalid="ignore"):
assert_array_equal(stats.ttest_rel(x, x), (np.nan, np.nan))
assert_array_almost_equal(stats.ttest_rel(x, y, nan_policy='omit'),
(0.25299925303978066, 0.8003729814201519))
assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='raise')
assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='foobar')
# test zero division problem
t, p = stats.ttest_rel([0, 0, 0], [1, 1, 1])
assert_equal((np.abs(t), p), (np.inf, 0))
with np.errstate(invalid="ignore"):
assert_equal(stats.ttest_rel([0, 0, 0], [0, 0, 0]), (np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1, np.nan], [-1, 1]])
assert_equal(stats.ttest_rel(anan, np.zeros((2, 2))),
([0, np.nan], [1, np.nan]))
# test incorrect input shape raise an error
x = np.arange(24)
assert_raises(ValueError, stats.ttest_rel, x.reshape((8, 3)),
x.reshape((2, 3, 4)))
def test_ttest_rel_nan_2nd_arg():
# regression test for gh-6134: nans in the second arg were not handled
x = [np.nan, 2.0, 3.0, 4.0]
y = [1.0, 2.0, 1.0, 2.0]
r1 = stats.ttest_rel(x, y, nan_policy='omit')
r2 = stats.ttest_rel(y, x, nan_policy='omit')
assert_allclose(r2.statistic, -r1.statistic, atol=1e-15)
assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15)
# NB: arguments are paired when NaNs are dropped
r3 = stats.ttest_rel(y[1:], x[1:])
assert_allclose(r2, r3, atol=1e-15)
# .. and this is consistent with R. R code:
# x = c(NA, 2.0, 3.0, 4.0)
# y = c(1.0, 2.0, 1.0, 2.0)
# t.test(x, y, paired=TRUE)
assert_allclose(r2, (-2, 0.1835), atol=1e-4)
def _desc_stats(x1, x2, axis=0):
def _stats(x, axis=0):
x = np.asarray(x)
mu = np.mean(x, axis=axis)
std = np.std(x, axis=axis, ddof=1)
nobs = x.shape[axis]
return mu, std, nobs
return _stats(x1, axis) + _stats(x2, axis)
def test_ttest_ind():
# regression test
tr = 1.0912746897927283
pr = 0.27647818616351882
tpr = ([tr,-tr],[pr,pr])
rvs2 = np.linspace(1,100,100)
rvs1 = np.linspace(5,105,100)
rvs1_2D = np.array([rvs1, rvs2])
rvs2_2D = np.array([rvs2, rvs1])
t,p = stats.ttest_ind(rvs1, rvs2, axis=0)
assert_array_almost_equal([t,p],(tr,pr))
# test from_stats API
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,
rvs2)),
[t, p])
t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args),
[t, p])
t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args),
[t, p])
# test scalars
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
t, p = stats.ttest_ind(4., 3.)
assert_(np.isnan(t))
assert_(np.isnan(p))
# test on 3 dimensions
rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1)
assert_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
t,p = stats.ttest_ind(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2)
assert_array_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
# check nan policy
np.random.seed(12345678)
x = stats.norm.rvs(loc=5, scale=10, size=501)
x[500] = np.nan
y = stats.norm.rvs(loc=5, scale=10, size=500)
with np.errstate(invalid="ignore"):
assert_array_equal(stats.ttest_ind(x, y), (np.nan, np.nan))
assert_array_almost_equal(stats.ttest_ind(x, y, nan_policy='omit'),
(0.24779670949091914, 0.80434267337517906))
assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='raise')
assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='foobar')
# test zero division problem
t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1])
assert_equal((np.abs(t), p), (np.inf, 0))
with np.errstate(invalid="ignore"):
assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0]), (np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1, np.nan], [-1, 1]])
assert_equal(stats.ttest_ind(anan, np.zeros((2, 2))),
([0, np.nan], [1, np.nan]))
def test_ttest_ind_with_uneq_var():
# check vs. R
a = (1, 2, 3)
b = (1.1, 2.9, 4.2)
pr = 0.53619490753126731
tr = -0.68649512735572582
t, p = stats.ttest_ind(a, b, equal_var=False)
assert_array_almost_equal([t,p], [tr, pr])
# test from desc stats API
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b),
equal_var=False),
[t, p])
a = (1, 2, 3, 4)
pr = 0.84354139131608286
tr = -0.2108663315950719
t, p = stats.ttest_ind(a, b, equal_var=False)
assert_array_almost_equal([t,p], [tr, pr])
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b),
equal_var=False),
[t, p])
# regression test
tr = 1.0912746897927283
tr_uneq_n = 0.66745638708050492
pr = 0.27647831993021388
pr_uneq_n = 0.50873585065616544
tpr = ([tr,-tr],[pr,pr])
rvs3 = np.linspace(1,100, 25)
rvs2 = np.linspace(1,100,100)
rvs1 = np.linspace(5,105,100)
rvs1_2D = np.array([rvs1, rvs2])
rvs2_2D = np.array([rvs2, rvs1])
t,p = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False)
assert_array_almost_equal([t,p],(tr,pr))
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,
rvs2),
equal_var=False),
(t, p))
t,p = stats.ttest_ind(rvs1, rvs3, axis=0, equal_var=False)
assert_array_almost_equal([t,p], (tr_uneq_n, pr_uneq_n))
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,
rvs3),
equal_var=False),
(t, p))
t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0, equal_var=False)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args,
equal_var=False),
(t, p))
t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1, equal_var=False)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args,
equal_var=False),
(t, p))
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False)
check_named_results(res, attributes)
# test on 3 dimensions
rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1, equal_var=False)
assert_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
args = _desc_stats(rvs1_3D, rvs2_3D, axis=1)
t, p = stats.ttest_ind_from_stats(*args, equal_var=False)
assert_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
t,p = stats.ttest_ind(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2),
axis=2, equal_var=False)
assert_array_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
args = _desc_stats(np.rollaxis(rvs1_3D, 2),
np.rollaxis(rvs2_3D, 2), axis=2)
t, p = stats.ttest_ind_from_stats(*args, equal_var=False)
assert_array_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
# test zero division problem
t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False)
assert_equal((np.abs(t), p), (np.inf, 0))
with np.errstate(all='ignore'):
assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0], equal_var=False),
(np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1, np.nan], [-1, 1]])
assert_equal(stats.ttest_ind(anan, np.zeros((2, 2)), equal_var=False),
([0, np.nan], [1, np.nan]))
def test_ttest_ind_nan_2nd_arg():
# regression test for gh-6134: nans in the second arg were not handled
x = [np.nan, 2.0, 3.0, 4.0]
y = [1.0, 2.0, 1.0, 2.0]
r1 = stats.ttest_ind(x, y, nan_policy='omit')
r2 = stats.ttest_ind(y, x, nan_policy='omit')
assert_allclose(r2.statistic, -r1.statistic, atol=1e-15)
assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15)
# NB: arguments are not paired when NaNs are dropped
r3 = stats.ttest_ind(y, x[1:])
assert_allclose(r2, r3, atol=1e-15)
# .. and this is consistent with R. R code:
# x = c(NA, 2.0, 3.0, 4.0)
# y = c(1.0, 2.0, 1.0, 2.0)
# t.test(x, y, var.equal=TRUE)
assert_allclose(r2, (-2.5354627641855498, 0.052181400457057901), atol=1e-15)
def test_gh5686():
mean1, mean2 = np.array([1, 2]), np.array([3, 4])
std1, std2 = np.array([5, 3]), np.array([4, 5])
nobs1, nobs2 = np.array([130, 140]), np.array([100, 150])
# This will raise a TypeError unless gh-5686 is fixed.
stats.ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2)
def test_ttest_1samp_new():
n1, n2, n3 = (10,15,20)
rvn1 = stats.norm.rvs(loc=5,scale=10,size=(n1,n2,n3))
# check multidimensional array and correct axis handling
# deterministic rvn1 and rvn2 would be better as in test_ttest_rel
t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n2,n3)),axis=0)
t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=0)
t3,p3 = stats.ttest_1samp(rvn1[:,0,0], 1)
assert_array_almost_equal(t1,t2, decimal=14)
assert_almost_equal(t1[0,0],t3, decimal=14)
assert_equal(t1.shape, (n2,n3))
t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n3)),axis=1)
t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=1)
t3,p3 = stats.ttest_1samp(rvn1[0,:,0], 1)
assert_array_almost_equal(t1,t2, decimal=14)
assert_almost_equal(t1[0,0],t3, decimal=14)
assert_equal(t1.shape, (n1,n3))
t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n2)),axis=2)
t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=2)
t3,p3 = stats.ttest_1samp(rvn1[0,0,:], 1)
assert_array_almost_equal(t1,t2, decimal=14)
assert_almost_equal(t1[0,0],t3, decimal=14)
assert_equal(t1.shape, (n1,n2))
# test zero division problem
t, p = stats.ttest_1samp([0, 0, 0], 1)
assert_equal((np.abs(t), p), (np.inf, 0))
with np.errstate(all='ignore'):
assert_equal(stats.ttest_1samp([0, 0, 0], 0), (np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1, np.nan],[-1, 1]])
assert_equal(stats.ttest_1samp(anan, 0), ([0, np.nan], [1, np.nan]))
class TestDescribe(object):
def test_describe_scalar(self):
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
n, mm, m, v, sk, kurt = stats.describe(4.)
assert_equal(n, 1)
assert_equal(mm, (4.0, 4.0))
assert_equal(m, 4.0)
assert_(np.isnan(v))
assert_array_almost_equal(sk, 0.0, decimal=13)
assert_array_almost_equal(kurt, -3.0, decimal=13)
def test_describe_numbers(self):
x = np.vstack((np.ones((3,4)), np.full((2, 4), 2)))
nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.]))
mc = np.array([1.4, 1.4, 1.4, 1.4])
vc = np.array([0.3, 0.3, 0.3, 0.3])
skc = [0.40824829046386357] * 4
kurtc = [-1.833333333333333] * 4
n, mm, m, v, sk, kurt = stats.describe(x)
assert_equal(n, nc)
assert_equal(mm, mmc)
assert_equal(m, mc)
assert_equal(v, vc)
assert_array_almost_equal(sk, skc, decimal=13)
assert_array_almost_equal(kurt, kurtc, decimal=13)
n, mm, m, v, sk, kurt = stats.describe(x.T, axis=1)
assert_equal(n, nc)
assert_equal(mm, mmc)
assert_equal(m, mc)
assert_equal(v, vc)
assert_array_almost_equal(sk, skc, decimal=13)
assert_array_almost_equal(kurt, kurtc, decimal=13)
x = np.arange(10.)
x[9] = np.nan
nc, mmc = (9, (0.0, 8.0))
mc = 4.0
vc = 7.5
skc = 0.0
kurtc = -1.2300000000000002
n, mm, m, v, sk, kurt = stats.describe(x, nan_policy='omit')
assert_equal(n, nc)
assert_equal(mm, mmc)
assert_equal(m, mc)
assert_equal(v, vc)
assert_array_almost_equal(sk, skc)
assert_array_almost_equal(kurt, kurtc, decimal=13)
assert_raises(ValueError, stats.describe, x, nan_policy='raise')
assert_raises(ValueError, stats.describe, x, nan_policy='foobar')
def test_describe_result_attributes(self):
actual = stats.describe(np.arange(5))
attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis')
check_named_results(actual, attributes)
def test_describe_ddof(self):
x = np.vstack((np.ones((3, 4)), np.full((2, 4), 2)))
nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.]))
mc = np.array([1.4, 1.4, 1.4, 1.4])
vc = np.array([0.24, 0.24, 0.24, 0.24])
skc = [0.40824829046386357] * 4
kurtc = [-1.833333333333333] * 4
n, mm, m, v, sk, kurt = stats.describe(x, ddof=0)
assert_equal(n, nc)
assert_allclose(mm, mmc, rtol=1e-15)
assert_allclose(m, mc, rtol=1e-15)
assert_allclose(v, vc, rtol=1e-15)
assert_array_almost_equal(sk, skc, decimal=13)
assert_array_almost_equal(kurt, kurtc, decimal=13)
def test_describe_axis_none(self):
x = np.vstack((np.ones((3, 4)), np.full((2, 4), 2)))
# expected values
e_nobs, e_minmax = (20, (1.0, 2.0))
e_mean = 1.3999999999999999
e_var = 0.25263157894736848
e_skew = 0.4082482904638634
e_kurt = -1.8333333333333333
# actual values
a = stats.describe(x, axis=None)
assert_equal(a.nobs, e_nobs)
assert_almost_equal(a.minmax, e_minmax)
assert_almost_equal(a.mean, e_mean)
assert_almost_equal(a.variance, e_var)
assert_array_almost_equal(a.skewness, e_skew, decimal=13)
assert_array_almost_equal(a.kurtosis, e_kurt, decimal=13)
def test_describe_empty(self):
assert_raises(ValueError, stats.describe, [])
def test_normalitytests():
assert_raises(ValueError, stats.skewtest, 4.)
assert_raises(ValueError, stats.kurtosistest, 4.)
assert_raises(ValueError, stats.normaltest, 4.)
# numbers verified with R: dagoTest in package fBasics
st_normal, st_skew, st_kurt = (3.92371918, 1.98078826, -0.01403734)
pv_normal, pv_skew, pv_kurt = (0.14059673, 0.04761502, 0.98880019)
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
attributes = ('statistic', 'pvalue')
assert_array_almost_equal(stats.normaltest(x), (st_normal, pv_normal))
check_named_results(stats.normaltest(x), attributes)
assert_array_almost_equal(stats.skewtest(x), (st_skew, pv_skew))
check_named_results(stats.skewtest(x), attributes)
assert_array_almost_equal(stats.kurtosistest(x), (st_kurt, pv_kurt))
check_named_results(stats.kurtosistest(x), attributes)
# Test axis=None (equal to axis=0 for 1-D input)
assert_array_almost_equal(stats.normaltest(x, axis=None),
(st_normal, pv_normal))
assert_array_almost_equal(stats.skewtest(x, axis=None),
(st_skew, pv_skew))
assert_array_almost_equal(stats.kurtosistest(x, axis=None),
(st_kurt, pv_kurt))
x = np.arange(10.)
x[9] = np.nan
with np.errstate(invalid="ignore"):
assert_array_equal(stats.skewtest(x), (np.nan, np.nan))
expected = (1.0184643553962129, 0.30845733195153502)
assert_array_almost_equal(stats.skewtest(x, nan_policy='omit'), expected)
with np.errstate(all='ignore'):
assert_raises(ValueError, stats.skewtest, x, nan_policy='raise')
assert_raises(ValueError, stats.skewtest, x, nan_policy='foobar')
x = np.arange(30.)
x[29] = np.nan
with np.errstate(all='ignore'):
assert_array_equal(stats.kurtosistest(x), (np.nan, np.nan))
expected = (-2.2683547379505273, 0.023307594135872967)
assert_array_almost_equal(stats.kurtosistest(x, nan_policy='omit'),
expected)
assert_raises(ValueError, stats.kurtosistest, x, nan_policy='raise')
assert_raises(ValueError, stats.kurtosistest, x, nan_policy='foobar')
with np.errstate(all='ignore'):
assert_array_equal(stats.normaltest(x), (np.nan, np.nan))
expected = (6.2260409514287449, 0.04446644248650191)
assert_array_almost_equal(stats.normaltest(x, nan_policy='omit'), expected)
assert_raises(ValueError, stats.normaltest, x, nan_policy='raise')
assert_raises(ValueError, stats.normaltest, x, nan_policy='foobar')
# regression test for issue gh-9033: x cleary non-normal but power of
# negtative denom needs to be handled correctly to reject normality
counts = [128, 0, 58, 7, 0, 41, 16, 0, 0, 167]
x = np.hstack([np.full(c, i) for i, c in enumerate(counts)])
assert_equal(stats.kurtosistest(x)[1] < 0.01, True)
class TestRankSums(object):
def test_ranksums_result_attributes(self):
res = stats.ranksums(np.arange(5), np.arange(25))
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestJarqueBera(object):
def test_jarque_bera_stats(self):
np.random.seed(987654321)
x = np.random.normal(0, 1, 100000)
y = np.random.chisquare(10000, 100000)
z = np.random.rayleigh(1, 100000)
assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(y)[1])
assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(z)[1])
assert_(stats.jarque_bera(y)[1] > stats.jarque_bera(z)[1])
def test_jarque_bera_array_like(self):
np.random.seed(987654321)
x = np.random.normal(0, 1, 100000)
JB1, p1 = stats.jarque_bera(list(x))
JB2, p2 = stats.jarque_bera(tuple(x))
JB3, p3 = stats.jarque_bera(x.reshape(2, 50000))
assert_(JB1 == JB2 == JB3)
assert_(p1 == p2 == p3)
def test_jarque_bera_size(self):
assert_raises(ValueError, stats.jarque_bera, [])
def test_skewtest_too_few_samples():
# Regression test for ticket #1492.
# skewtest requires at least 8 samples; 7 should raise a ValueError.
x = np.arange(7.0)
assert_raises(ValueError, stats.skewtest, x)
def test_kurtosistest_too_few_samples():
# Regression test for ticket #1425.
# kurtosistest requires at least 5 samples; 4 should raise a ValueError.
x = np.arange(4.0)
assert_raises(ValueError, stats.kurtosistest, x)
class TestMannWhitneyU(object):
X = [19.8958398126694, 19.5452691647182, 19.0577309166425, 21.716543054589,
20.3269502208702, 20.0009273294025, 19.3440043632957, 20.4216806548105,
19.0649894736528, 18.7808043120398, 19.3680942943298, 19.4848044069953,
20.7514611265663, 19.0894948874598, 19.4975522356628, 18.9971170734274,
20.3239606288208, 20.6921298083835, 19.0724259532507, 18.9825187935021,
19.5144462609601, 19.8256857844223, 20.5174677102032, 21.1122407995892,
17.9490854922535, 18.2847521114727, 20.1072217648826, 18.6439891962179,
20.4970638083542, 19.5567594734914]
Y = [19.2790668029091, 16.993808441865, 18.5416338448258, 17.2634018833575,
19.1577183624616, 18.5119655377495, 18.6068455037221, 18.8358343362655,
19.0366413269742, 18.1135025515417, 19.2201873866958, 17.8344909022841,
18.2894380745856, 18.6661374133922, 19.9688601693252, 16.0672254617636,
19.00596360572, 19.201561539032, 19.0487501090183, 19.0847908674356]
significant = 14
def test_mannwhitneyu_one_sided(self):
u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='less')
u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='greater')
u3, p3 = stats.mannwhitneyu(self.X, self.Y, alternative='greater')
u4, p4 = stats.mannwhitneyu(self.Y, self.X, alternative='less')
assert_equal(p1, p2)
assert_equal(p3, p4)
assert_(p1 != p3)
assert_equal(u1, 498)
assert_equal(u2, 102)
assert_equal(u3, 498)
assert_equal(u4, 102)
assert_approx_equal(p1, 0.999957683256589, significant=self.significant)
assert_approx_equal(p3, 4.5941632666275e-05, significant=self.significant)
def test_mannwhitneyu_two_sided(self):
u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='two-sided')
u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='two-sided')
assert_equal(p1, p2)
assert_equal(u1, 498)
assert_equal(u2, 102)
assert_approx_equal(p1, 9.188326533255e-05,
significant=self.significant)
def test_mannwhitneyu_default(self):
# The default value for alternative is None
with suppress_warnings() as sup:
sup.filter(DeprecationWarning,
"Calling `mannwhitneyu` without .*`alternative`")
u1, p1 = stats.mannwhitneyu(self.X, self.Y)
u2, p2 = stats.mannwhitneyu(self.Y, self.X)
u3, p3 = stats.mannwhitneyu(self.X, self.Y, alternative=None)
assert_equal(p1, p2)
assert_equal(p1, p3)
assert_equal(u1, 102)
assert_equal(u2, 102)
assert_equal(u3, 102)
assert_approx_equal(p1, 4.5941632666275e-05,
significant=self.significant)
def test_mannwhitneyu_no_correct_one_sided(self):
u1, p1 = stats.mannwhitneyu(self.X, self.Y, False,
alternative='less')
u2, p2 = stats.mannwhitneyu(self.Y, self.X, False,
alternative='greater')
u3, p3 = stats.mannwhitneyu(self.X, self.Y, False,
alternative='greater')
u4, p4 = stats.mannwhitneyu(self.Y, self.X, False,
alternative='less')
assert_equal(p1, p2)
assert_equal(p3, p4)
assert_(p1 != p3)
assert_equal(u1, 498)
assert_equal(u2, 102)
assert_equal(u3, 498)
assert_equal(u4, 102)
assert_approx_equal(p1, 0.999955905990004, significant=self.significant)
assert_approx_equal(p3, 4.40940099958089e-05, significant=self.significant)
def test_mannwhitneyu_no_correct_two_sided(self):
u1, p1 = stats.mannwhitneyu(self.X, self.Y, False,
alternative='two-sided')
u2, p2 = stats.mannwhitneyu(self.Y, self.X, False,
alternative='two-sided')
assert_equal(p1, p2)
assert_equal(u1, 498)
assert_equal(u2, 102)
assert_approx_equal(p1, 8.81880199916178e-05,
significant=self.significant)
def test_mannwhitneyu_no_correct_default(self):
# The default value for alternative is None
with suppress_warnings() as sup:
sup.filter(DeprecationWarning,
"Calling `mannwhitneyu` without .*`alternative`")
u1, p1 = stats.mannwhitneyu(self.X, self.Y, False)
u2, p2 = stats.mannwhitneyu(self.Y, self.X, False)
u3, p3 = stats.mannwhitneyu(self.X, self.Y, False,
alternative=None)
assert_equal(p1, p2)
assert_equal(p1, p3)
assert_equal(u1, 102)
assert_equal(u2, 102)
assert_equal(u3, 102)
assert_approx_equal(p1, 4.40940099958089e-05,
significant=self.significant)
def test_mannwhitneyu_ones(self):
x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1.])
y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,
1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,
2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,
1., 1., 1., 1.])
# p-value verified with matlab and R to 5 significant digits
assert_array_almost_equal(stats.stats.mannwhitneyu(x, y,
alternative='less'),
(16980.5, 2.8214327656317373e-005),
decimal=12)
def test_mannwhitneyu_result_attributes(self):
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.mannwhitneyu(self.X, self.Y, alternative="less")
check_named_results(res, attributes)
def test_pointbiserial():
# same as mstats test except for the nan
# Test data: https://web.archive.org/web/20060504220742/https://support.sas.com/ctx/samples/index.jsp?sid=490&tab=output
x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,
0,0,0,0,1]
y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,
2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,
0.8,0.7,0.6,0.5,0.2,0.2,0.1]
assert_almost_equal(stats.pointbiserialr(x, y)[0], 0.36149, 5)
# test for namedtuple attribute results
attributes = ('correlation', 'pvalue')
res = stats.pointbiserialr(x, y)
check_named_results(res, attributes)
def test_obrientransform():
# A couple tests calculated by hand.
x1 = np.array([0, 2, 4])
t1 = stats.obrientransform(x1)
expected = [7, -2, 7]
assert_allclose(t1[0], expected)
x2 = np.array([0, 3, 6, 9])
t2 = stats.obrientransform(x2)
expected = np.array([30, 0, 0, 30])
assert_allclose(t2[0], expected)
# Test two arguments.
a, b = stats.obrientransform(x1, x2)
assert_equal(a, t1[0])
assert_equal(b, t2[0])
# Test three arguments.
a, b, c = stats.obrientransform(x1, x2, x1)
assert_equal(a, t1[0])
assert_equal(b, t2[0])
assert_equal(c, t1[0])
# This is a regression test to check np.var replacement.
# The author of this test didn't separately verify the numbers.
x1 = np.arange(5)
result = np.array(
[[5.41666667, 1.04166667, -0.41666667, 1.04166667, 5.41666667],
[21.66666667, 4.16666667, -1.66666667, 4.16666667, 21.66666667]])
assert_array_almost_equal(stats.obrientransform(x1, 2*x1), result, decimal=8)
# Example from "O'Brien Test for Homogeneity of Variance"
# by Herve Abdi.
values = range(5, 11)
reps = np.array([5, 11, 9, 3, 2, 2])
data = np.repeat(values, reps)
transformed_values = np.array([3.1828, 0.5591, 0.0344,
1.6086, 5.2817, 11.0538])
expected = np.repeat(transformed_values, reps)
result = stats.obrientransform(data)
assert_array_almost_equal(result[0], expected, decimal=4)
def check_equal_gmean(array_like, desired, axis=None, dtype=None, rtol=1e-7):
# Note this doesn't test when axis is not specified
x = stats.gmean(array_like, axis=axis, dtype=dtype)
assert_allclose(x, desired, rtol=rtol)
assert_equal(x.dtype, dtype)
def check_equal_hmean(array_like, desired, axis=None, dtype=None, rtol=1e-7):
x = stats.hmean(array_like, axis=axis, dtype=dtype)
assert_allclose(x, desired, rtol=rtol)
assert_equal(x.dtype, dtype)
class TestHarMean(object):
def test_1d_list(self):
# Test a 1d list
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
desired = 34.1417152147
check_equal_hmean(a, desired)
a = [1, 2, 3, 4]
desired = 4. / (1. / 1 + 1. / 2 + 1. / 3 + 1. / 4)
check_equal_hmean(a, desired)
def test_1d_array(self):
# Test a 1d array
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
desired = 34.1417152147
check_equal_hmean(a, desired)
# Note the next tests use axis=None as default, not axis=0
def test_2d_list(self):
# Test a 2d list
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 38.6696271841
check_equal_hmean(a, desired)
def test_2d_array(self):
# Test a 2d array
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 38.6696271841
check_equal_hmean(np.array(a), desired)
def test_2d_axis0(self):
# Test a 2d list with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = np.array([22.88135593, 39.13043478, 52.90076336, 65.45454545])
check_equal_hmean(a, desired, axis=0)
def test_2d_axis1(self):
# Test a 2d list with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = np.array([19.2, 63.03939962, 103.80078637])
check_equal_hmean(a, desired, axis=1)
def test_2d_matrix_axis0(self):
# Test a 2d list with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = matrix([[22.88135593, 39.13043478, 52.90076336, 65.45454545]])
check_equal_hmean(matrix(a), desired, axis=0)
def test_2d_matrix_axis1(self):
# Test a 2d list with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = matrix([[19.2, 63.03939962, 103.80078637]]).T
check_equal_hmean(matrix(a), desired, axis=1)
class TestGeoMean(object):
def test_1d_list(self):
# Test a 1d list
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
desired = 45.2872868812
check_equal_gmean(a, desired)
a = [1, 2, 3, 4]
desired = power(1 * 2 * 3 * 4, 1. / 4.)
check_equal_gmean(a, desired, rtol=1e-14)
def test_1d_array(self):
# Test a 1d array
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
desired = 45.2872868812
check_equal_gmean(a, desired)
a = array([1, 2, 3, 4], float32)
desired = power(1 * 2 * 3 * 4, 1. / 4.)
check_equal_gmean(a, desired, dtype=float32)
# Note the next tests use axis=None as default, not axis=0
def test_2d_list(self):
# Test a 2d list
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 52.8885199
check_equal_gmean(a, desired)
def test_2d_array(self):
# Test a 2d array
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 52.8885199
check_equal_gmean(array(a), desired)
def test_2d_axis0(self):
# Test a 2d list with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = np.array([35.56893304, 49.32424149, 61.3579244, 72.68482371])
check_equal_gmean(a, desired, axis=0)
a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
desired = array([1, 2, 3, 4])
check_equal_gmean(a, desired, axis=0, rtol=1e-14)
def test_2d_axis1(self):
# Test a 2d list with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = np.array([22.13363839, 64.02171746, 104.40086817])
check_equal_gmean(a, desired, axis=1)
a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
v = power(1 * 2 * 3 * 4, 1. / 4.)
desired = array([v, v, v])
check_equal_gmean(a, desired, axis=1, rtol=1e-14)
def test_2d_matrix_axis0(self):
# Test a 2d list with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = matrix([[35.56893304, 49.32424149, 61.3579244, 72.68482371]])
check_equal_gmean(matrix(a), desired, axis=0)
a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
desired = matrix([1, 2, 3, 4])
check_equal_gmean(matrix(a), desired, axis=0, rtol=1e-14)
a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
desired = matrix(stats.gmean(a, axis=0))
check_equal_gmean(matrix(a), desired, axis=0, rtol=1e-14)
def test_2d_matrix_axis1(self):
# Test a 2d list with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = matrix([[22.13363839, 64.02171746, 104.40086817]]).T
check_equal_gmean(matrix(a), desired, axis=1)
a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
v = power(1 * 2 * 3 * 4, 1. / 4.)
desired = matrix([[v], [v], [v]])
check_equal_gmean(matrix(a), desired, axis=1, rtol=1e-14)
def test_large_values(self):
a = array([1e100, 1e200, 1e300])
desired = 1e200
check_equal_gmean(a, desired, rtol=1e-13)
def test_1d_list0(self):
# Test a 1d list with zero element
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 0]
desired = 0.0 # due to exp(-inf)=0
olderr = np.seterr(all='ignore')
try:
check_equal_gmean(a, desired)
finally:
np.seterr(**olderr)
def test_1d_array0(self):
# Test a 1d array with zero element
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])
desired = 0.0 # due to exp(-inf)=0
olderr = np.seterr(all='ignore')
try:
check_equal_gmean(a, desired)
finally:
np.seterr(**olderr)
class TestGeometricStandardDeviation(object):
# must add 1 as `gstd` is only defined for positive values
array_1d = np.arange(2 * 3 * 4) + 1
gstd_array_1d = 2.294407613602
array_3d = array_1d.reshape(2, 3, 4)
def test_1d_array(self):
gstd_actual = stats.gstd(self.array_1d)
assert_allclose(gstd_actual, self.gstd_array_1d)
def test_1d_numeric_array_like_input(self):
gstd_actual = stats.gstd(tuple(self.array_1d))
assert_allclose(gstd_actual, self.gstd_array_1d)
def test_raises_value_error_non_array_like_input(self):
with pytest.raises(ValueError, match='Invalid array input'):
stats.gstd('This should fail as it can not be cast to an array.')
def test_raises_value_error_zero_entry(self):
with pytest.raises(ValueError, match='Non positive value'):
stats.gstd(np.append(self.array_1d, [0]))
def test_raises_value_error_negative_entry(self):
with pytest.raises(ValueError, match='Non positive value'):
stats.gstd(np.append(self.array_1d, [-1]))
def test_raises_value_error_inf_entry(self):
with pytest.raises(ValueError, match='Infinite value'):
stats.gstd(np.append(self.array_1d, [np.inf]))
def test_propogates_nan_values(self):
a = array([[1, 1, 1, 16], [np.nan, 1, 2, 3]])
gstd_actual = stats.gstd(a, axis=1)
assert_allclose(gstd_actual, np.array([4, np.nan]))
def test_ddof_equal_to_number_of_observations(self):
with pytest.raises(ValueError, match='Degrees of freedom <= 0'):
stats.gstd(self.array_1d, ddof=self.array_1d.size)
def test_3d_array(self):
gstd_actual = stats.gstd(self.array_3d, axis=None)
assert_allclose(gstd_actual, self.gstd_array_1d)
def test_3d_array_axis_type_tuple(self):
gstd_actual = stats.gstd(self.array_3d, axis=(1,2))
assert_allclose(gstd_actual, [2.12939215, 1.22120169])
def test_3d_array_axis_0(self):
gstd_actual = stats.gstd(self.array_3d, axis=0)
gstd_desired = np.array([
[6.1330555493918, 3.958900210120, 3.1206598248344, 2.6651441426902],
[2.3758135028411, 2.174581428192, 2.0260062829505, 1.9115518327308],
[1.8205343606803, 1.746342404566, 1.6846557065742, 1.6325269194382]
])
assert_allclose(gstd_actual, gstd_desired)
def test_3d_array_axis_1(self):
gstd_actual = stats.gstd(self.array_3d, axis=1)
gstd_desired = np.array([
[3.118993630946, 2.275985934063, 1.933995977619, 1.742896469724],
[1.271693593916, 1.254158641801, 1.238774141609, 1.225164057869]
])
assert_allclose(gstd_actual, gstd_desired)
def test_3d_array_axis_2(self):
gstd_actual = stats.gstd(self.array_3d, axis=2)
gstd_desired = np.array([
[1.8242475707664, 1.2243686572447, 1.1318311657788],
[1.0934830582351, 1.0724479791887, 1.0591498540749]
])
assert_allclose(gstd_actual, gstd_desired)
def test_masked_3d_array(self):
ma = np.ma.masked_where(self.array_3d > 16, self.array_3d)
gstd_actual = stats.gstd(ma, axis=2)
gstd_desired = stats.gstd(self.array_3d, axis=2)
mask = [[0, 0, 0], [0, 1, 1]]
assert_allclose(gstd_actual, gstd_desired)
assert_equal(gstd_actual.mask, mask)
def test_binomtest():
# precision tests compared to R for ticket:986
pp = np.concatenate((np.linspace(0.1,0.2,5), np.linspace(0.45,0.65,5),
np.linspace(0.85,0.95,5)))
n = 501
x = 450
results = [0.0, 0.0, 1.0159969301994141e-304,
2.9752418572150531e-275, 7.7668382922535275e-250,
2.3381250925167094e-099, 7.8284591587323951e-081,
9.9155947819961383e-065, 2.8729390725176308e-050,
1.7175066298388421e-037, 0.0021070691951093692,
0.12044570587262322, 0.88154763174802508, 0.027120993063129286,
2.6102587134694721e-006]
for p, res in zip(pp,results):
assert_approx_equal(stats.binom_test(x, n, p), res,
significant=12, err_msg='fail forp=%f' % p)
assert_approx_equal(stats.binom_test(50,100,0.1), 5.8320387857343647e-024,
significant=12, err_msg='fail forp=%f' % p)
def test_binomtest2():
# test added for issue #2384
res2 = [
[1.0, 1.0],
[0.5,1.0,0.5],
[0.25,1.00,1.00,0.25],
[0.125,0.625,1.000,0.625,0.125],
[0.0625,0.3750,1.0000,1.0000,0.3750,0.0625],
[0.03125,0.21875,0.68750,1.00000,0.68750,0.21875,0.03125],
[0.015625,0.125000,0.453125,1.000000,1.000000,0.453125,0.125000,0.015625],
[0.0078125,0.0703125,0.2890625,0.7265625,1.0000000,0.7265625,0.2890625,
0.0703125,0.0078125],
[0.00390625,0.03906250,0.17968750,0.50781250,1.00000000,1.00000000,
0.50781250,0.17968750,0.03906250,0.00390625],
[0.001953125,0.021484375,0.109375000,0.343750000,0.753906250,1.000000000,
0.753906250,0.343750000,0.109375000,0.021484375,0.001953125]
]
for k in range(1, 11):
res1 = [stats.binom_test(v, k, 0.5) for v in range(k + 1)]
assert_almost_equal(res1, res2[k-1], decimal=10)
def test_binomtest3():
# test added for issue #2384
# test when x == n*p and neighbors
res3 = [stats.binom_test(v, v*k, 1./k) for v in range(1, 11)
for k in range(2, 11)]
assert_equal(res3, np.ones(len(res3), int))
#> bt=c()
#> for(i in as.single(1:10)){for(k in as.single(2:10)){bt = c(bt, binom.test(i-1, k*i,(1/k))$p.value); print(c(i+1, k*i,(1/k)))}}
binom_testm1 = np.array([
0.5, 0.5555555555555556, 0.578125, 0.5904000000000003,
0.5981224279835393, 0.603430543396034, 0.607304096221924,
0.610255656871054, 0.612579511000001, 0.625, 0.670781893004115,
0.68853759765625, 0.6980101120000006, 0.703906431368616,
0.70793209416498, 0.7108561134173507, 0.713076544331419,
0.714820192935702, 0.6875, 0.7268709038256367, 0.7418963909149174,
0.74986110468096, 0.7548015520398076, 0.7581671424768577,
0.760607984787832, 0.762459425024199, 0.7639120677676575, 0.7265625,
0.761553963657302, 0.774800934828818, 0.7818005980538996,
0.78613491480358, 0.789084353140195, 0.7912217659828884,
0.79284214559524, 0.794112956558801, 0.75390625, 0.7856929451142176,
0.7976688481430754, 0.8039848974727624, 0.807891868948366,
0.8105487660137676, 0.812473307174702, 0.8139318233591120,
0.815075399104785, 0.7744140625, 0.8037322594985427,
0.814742863657656, 0.8205425178645808, 0.8241275984172285,
0.8265645374416, 0.8283292196088257, 0.829666291102775,
0.8307144686362666, 0.7905273437499996, 0.8178712053954738,
0.828116983756619, 0.833508948940494, 0.8368403871552892,
0.839104213210105, 0.840743186196171, 0.84198481438049,
0.8429580531563676, 0.803619384765625, 0.829338573944648,
0.8389591907548646, 0.84401876783902, 0.84714369697889,
0.8492667010581667, 0.850803474598719, 0.851967542858308,
0.8528799045949524, 0.8145294189453126, 0.838881732845347,
0.847979024541911, 0.852760894015685, 0.8557134656773457,
0.8577190131799202, 0.85917058278431, 0.860270010472127,
0.861131648404582, 0.823802947998047, 0.846984756807511,
0.855635653643743, 0.860180994825685, 0.86298688573253,
0.864892525675245, 0.866271647085603, 0.867316125625004,
0.8681346531755114
])
# > bt=c()
# > for(i in as.single(1:10)){for(k in as.single(2:10)){bt = c(bt, binom.test(i+1, k*i,(1/k))$p.value); print(c(i+1, k*i,(1/k)))}}
binom_testp1 = np.array([
0.5, 0.259259259259259, 0.26171875, 0.26272, 0.2632244513031551,
0.2635138663069203, 0.2636951804161073, 0.2638162407564354,
0.2639010709000002, 0.625, 0.4074074074074074, 0.42156982421875,
0.4295746560000003, 0.43473045988554, 0.4383309503172684,
0.4409884859402103, 0.4430309389962837, 0.444649849401104, 0.6875,
0.4927602499618962, 0.5096031427383425, 0.5189636628480,
0.5249280070771274, 0.5290623300865124, 0.5320974248125793,
0.5344204730474308, 0.536255847400756, 0.7265625, 0.5496019313526808,
0.5669248746708034, 0.576436455045805, 0.5824538812831795,
0.5866053321547824, 0.589642781414643, 0.5919618019300193,
0.593790427805202, 0.75390625, 0.590868349763505, 0.607983393277209,
0.617303847446822, 0.623172512167948, 0.627208862156123,
0.6301556891501057, 0.632401894928977, 0.6341708982290303,
0.7744140625, 0.622562037497196, 0.639236102912278, 0.648263335014579,
0.65392850011132, 0.657816519817211, 0.660650782947676,
0.662808780346311, 0.6645068560246006, 0.7905273437499996,
0.6478843304312477, 0.6640468318879372, 0.6727589686071775,
0.6782129857784873, 0.681950188903695, 0.684671508668418,
0.686741824999918, 0.688369886732168, 0.803619384765625,
0.668716055304315, 0.684360013879534, 0.6927642396829181,
0.6980155964704895, 0.701609591890657, 0.7042244320992127,
0.7062125081341817, 0.707775152962577, 0.8145294189453126,
0.686243374488305, 0.7013873696358975, 0.709501223328243,
0.714563595144314, 0.718024953392931, 0.7205416252126137,
0.722454130389843, 0.723956813292035, 0.823802947998047,
0.701255953767043, 0.715928221686075, 0.723772209289768,
0.7286603031173616, 0.7319999279787631, 0.7344267920995765,
0.736270323773157, 0.737718376096348
])
res4_p1 = [stats.binom_test(v+1, v*k, 1./k) for v in range(1, 11)
for k in range(2, 11)]
res4_m1 = [stats.binom_test(v-1, v*k, 1./k) for v in range(1, 11)
for k in range(2, 11)]
assert_almost_equal(res4_p1, binom_testp1, decimal=13)
assert_almost_equal(res4_m1, binom_testm1, decimal=13)
class TestTrim(object):
# test trim functions
def test_trim1(self):
a = np.arange(11)
assert_equal(np.sort(stats.trim1(a, 0.1)), np.arange(10))
assert_equal(np.sort(stats.trim1(a, 0.2)), np.arange(9))
assert_equal(np.sort(stats.trim1(a, 0.2, tail='left')),
np.arange(2, 11))
assert_equal(np.sort(stats.trim1(a, 3/11., tail='left')),
np.arange(3, 11))
assert_equal(stats.trim1(a, 1.0), [])
assert_equal(stats.trim1(a, 1.0, tail='left'), [])
# empty input
assert_equal(stats.trim1([], 0.1), [])
assert_equal(stats.trim1([], 3/11., tail='left'), [])
assert_equal(stats.trim1([], 4/6.), [])
def test_trimboth(self):
a = np.arange(11)
assert_equal(np.sort(stats.trimboth(a, 3/11.)), np.arange(3, 8))
assert_equal(np.sort(stats.trimboth(a, 0.2)),
np.array([2, 3, 4, 5, 6, 7, 8]))
assert_equal(np.sort(stats.trimboth(np.arange(24).reshape(6, 4), 0.2)),
np.arange(4, 20).reshape(4, 4))
assert_equal(np.sort(stats.trimboth(np.arange(24).reshape(4, 6).T,
2/6.)),
np.array([[2, 8, 14, 20], [3, 9, 15, 21]]))
assert_raises(ValueError, stats.trimboth,
np.arange(24).reshape(4, 6).T, 4/6.)
# empty input
assert_equal(stats.trimboth([], 0.1), [])
assert_equal(stats.trimboth([], 3/11.), [])
assert_equal(stats.trimboth([], 4/6.), [])
def test_trim_mean(self):
# don't use pre-sorted arrays
a = np.array([4, 8, 2, 0, 9, 5, 10, 1, 7, 3, 6])
idx = np.array([3, 5, 0, 1, 2, 4])
a2 = np.arange(24).reshape(6, 4)[idx, :]
a3 = np.arange(24).reshape(6, 4, order='F')[idx, :]
assert_equal(stats.trim_mean(a3, 2/6.),
np.array([2.5, 8.5, 14.5, 20.5]))
assert_equal(stats.trim_mean(a2, 2/6.),
np.array([10., 11., 12., 13.]))
idx4 = np.array([1, 0, 3, 2])
a4 = np.arange(24).reshape(4, 6)[idx4, :]
assert_equal(stats.trim_mean(a4, 2/6.),
np.array([9., 10., 11., 12., 13., 14.]))
# shuffled arange(24) as array_like
a = [7, 11, 12, 21, 16, 6, 22, 1, 5, 0, 18, 10, 17, 9, 19, 15, 23,
20, 2, 14, 4, 13, 8, 3]
assert_equal(stats.trim_mean(a, 2/6.), 11.5)
assert_equal(stats.trim_mean([5,4,3,1,2,0], 2/6.), 2.5)
# check axis argument
np.random.seed(1234)
a = np.random.randint(20, size=(5, 6, 4, 7))
for axis in [0, 1, 2, 3, -1]:
res1 = stats.trim_mean(a, 2/6., axis=axis)
res2 = stats.trim_mean(np.rollaxis(a, axis), 2/6.)
assert_equal(res1, res2)
res1 = stats.trim_mean(a, 2/6., axis=None)
res2 = stats.trim_mean(a.ravel(), 2/6.)
assert_equal(res1, res2)
assert_raises(ValueError, stats.trim_mean, a, 0.6)
# empty input
assert_equal(stats.trim_mean([], 0.0), np.nan)
assert_equal(stats.trim_mean([], 0.6), np.nan)
class TestSigmaClip(object):
def test_sigmaclip1(self):
a = np.concatenate((np.linspace(9.5, 10.5, 31), np.linspace(0, 20, 5)))
fact = 4 # default
c, low, upp = stats.sigmaclip(a)
assert_(c.min() > low)
assert_(c.max() < upp)
assert_equal(low, c.mean() - fact*c.std())
assert_equal(upp, c.mean() + fact*c.std())
assert_equal(c.size, a.size)
def test_sigmaclip2(self):
a = np.concatenate((np.linspace(9.5, 10.5, 31), np.linspace(0, 20, 5)))
fact = 1.5
c, low, upp = stats.sigmaclip(a, fact, fact)
assert_(c.min() > low)
assert_(c.max() < upp)
assert_equal(low, c.mean() - fact*c.std())
assert_equal(upp, c.mean() + fact*c.std())
assert_equal(c.size, 4)
assert_equal(a.size, 36) # check original array unchanged
def test_sigmaclip3(self):
a = np.concatenate((np.linspace(9.5, 10.5, 11),
np.linspace(-100, -50, 3)))
fact = 1.8
c, low, upp = stats.sigmaclip(a, fact, fact)
assert_(c.min() > low)
assert_(c.max() < upp)
assert_equal(low, c.mean() - fact*c.std())
assert_equal(upp, c.mean() + fact*c.std())
assert_equal(c, np.linspace(9.5, 10.5, 11))
def test_sigmaclip_result_attributes(self):
a = np.concatenate((np.linspace(9.5, 10.5, 11),
np.linspace(-100, -50, 3)))
fact = 1.8
res = stats.sigmaclip(a, fact, fact)
attributes = ('clipped', 'lower', 'upper')
check_named_results(res, attributes)
def test_std_zero(self):
# regression test #8632
x = np.ones(10)
assert_equal(stats.sigmaclip(x)[0], x)
class TestFOneWay(object):
def test_trivial(self):
# A trivial test of stats.f_oneway, with F=0.
F, p = stats.f_oneway([0,2], [0,2])
assert_equal(F, 0.0)
def test_basic(self):
# Despite being a floating point calculation, this data should
# result in F being exactly 2.0.
F, p = stats.f_oneway([0,2], [2,4])
assert_equal(F, 2.0)
def test_large_integer_array(self):
a = np.array([655, 788], dtype=np.uint16)
b = np.array([789, 772], dtype=np.uint16)
F, p = stats.f_oneway(a, b)
assert_almost_equal(F, 0.77450216931805538)
def test_result_attributes(self):
a = np.array([655, 788], dtype=np.uint16)
b = np.array([789, 772], dtype=np.uint16)
res = stats.f_oneway(a, b)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_nist(self):
# These are the nist ANOVA files. They can be found at:
# https://www.itl.nist.gov/div898/strd/anova/anova.html
filenames = ['SiRstv.dat', 'SmLs01.dat', 'SmLs02.dat', 'SmLs03.dat',
'AtmWtAg.dat', 'SmLs04.dat', 'SmLs05.dat', 'SmLs06.dat',
'SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat']
for test_case in filenames:
rtol = 1e-7
fname = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/nist_anova', test_case))
with open(fname, 'r') as f:
content = f.read().split('\n')
certified = [line.split() for line in content[40:48]
if line.strip()]
dataf = np.loadtxt(fname, skiprows=60)
y, x = dataf.T
y = y.astype(int)
caty = np.unique(y)
f = float(certified[0][-1])
xlist = [x[y == i] for i in caty]
res = stats.f_oneway(*xlist)
# With the hard test cases we relax the tolerance a bit.
hard_tc = ('SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat')
if test_case in hard_tc:
rtol = 1e-4
assert_allclose(res[0], f, rtol=rtol,
err_msg='Failing testcase: %s' % test_case)
class TestKruskal(object):
def test_simple(self):
x = [1]
y = [2]
h, p = stats.kruskal(x, y)
assert_equal(h, 1.0)
assert_approx_equal(p, stats.distributions.chi2.sf(h, 1))
h, p = stats.kruskal(np.array(x), np.array(y))
assert_equal(h, 1.0)
assert_approx_equal(p, stats.distributions.chi2.sf(h, 1))
def test_basic(self):
x = [1, 3, 5, 7, 9]
y = [2, 4, 6, 8, 10]
h, p = stats.kruskal(x, y)
assert_approx_equal(h, 3./11, significant=10)
assert_approx_equal(p, stats.distributions.chi2.sf(3./11, 1))
h, p = stats.kruskal(np.array(x), np.array(y))
assert_approx_equal(h, 3./11, significant=10)
assert_approx_equal(p, stats.distributions.chi2.sf(3./11, 1))
def test_simple_tie(self):
x = [1]
y = [1, 2]
h_uncorr = 1.5**2 + 2*2.25**2 - 12
corr = 0.75
expected = h_uncorr / corr # 0.5
h, p = stats.kruskal(x, y)
# Since the expression is simple and the exact answer is 0.5, it
# should be safe to use assert_equal().
assert_equal(h, expected)
def test_another_tie(self):
x = [1, 1, 1, 2]
y = [2, 2, 2, 2]
h_uncorr = (12. / 8. / 9.) * 4 * (3**2 + 6**2) - 3 * 9
corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8)
expected = h_uncorr / corr
h, p = stats.kruskal(x, y)
assert_approx_equal(h, expected)
def test_three_groups(self):
# A test of stats.kruskal with three groups, with ties.
x = [1, 1, 1]
y = [2, 2, 2]
z = [2, 2]
h_uncorr = (12. / 8. / 9.) * (3*2**2 + 3*6**2 + 2*6**2) - 3 * 9 # 5.0
corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8)
expected = h_uncorr / corr # 7.0
h, p = stats.kruskal(x, y, z)
assert_approx_equal(h, expected)
assert_approx_equal(p, stats.distributions.chi2.sf(h, 2))
def test_empty(self):
# A test of stats.kruskal with three groups, with ties.
x = [1, 1, 1]
y = [2, 2, 2]
z = []
assert_equal(stats.kruskal(x, y, z), (np.nan, np.nan))
def test_kruskal_result_attributes(self):
x = [1, 3, 5, 7, 9]
y = [2, 4, 6, 8, 10]
res = stats.kruskal(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_nan_policy(self):
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.kruskal(x, x), (np.nan, np.nan))
assert_almost_equal(stats.kruskal(x, x, nan_policy='omit'), (0.0, 1.0))
assert_raises(ValueError, stats.kruskal, x, x, nan_policy='raise')
assert_raises(ValueError, stats.kruskal, x, x, nan_policy='foobar')
class TestCombinePvalues(object):
def test_fisher(self):
# Example taken from https://en.wikipedia.org/wiki/Fisher%27s_exact_test#Example
xsq, p = stats.combine_pvalues([.01, .2, .3], method='fisher')
assert_approx_equal(p, 0.02156, significant=4)
def test_stouffer(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer')
assert_approx_equal(p, 0.01651, significant=4)
def test_stouffer2(self):
Z, p = stats.combine_pvalues([.5, .5, .5], method='stouffer')
assert_approx_equal(p, 0.5, significant=4)
def test_weighted_stouffer(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer',
weights=np.ones(3))
assert_approx_equal(p, 0.01651, significant=4)
def test_weighted_stouffer2(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer',
weights=np.array((1, 4, 9)))
assert_approx_equal(p, 0.1464, significant=4)
def test_pearson(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='pearson')
assert_approx_equal(p, 0.97787, significant=4)
def test_tippett(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='tippett')
assert_approx_equal(p, 0.970299, significant=4)
def test_mudholkar_george(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='mudholkar_george')
assert_approx_equal(p, 3.7191571041915e-07, significant=4)
def test_mudholkar_george_equal_fisher_minus_pearson(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='mudholkar_george')
Z_f, p_f = stats.combine_pvalues([.01, .2, .3], method='fisher')
Z_p, p_p = stats.combine_pvalues([.01, .2, .3], method='pearson')
# 0.5 here is because logistic = log(u) - log(1-u), i.e. no 2 factors
assert_approx_equal(0.5 * (Z_f-Z_p), Z, significant=4)
class TestCdfDistanceValidation(object):
"""
Test that _cdf_distance() (via wasserstein_distance()) raises ValueErrors
for bad inputs.
"""
def test_distinct_value_and_weight_lengths(self):
# When the number of weights does not match the number of values,
# a ValueError should be raised.
assert_raises(ValueError, stats.wasserstein_distance,
[1], [2], [4], [3, 1])
assert_raises(ValueError, stats.wasserstein_distance, [1], [2], [1, 0])
def test_zero_weight(self):
# When a distribution is given zero weight, a ValueError should be
# raised.
assert_raises(ValueError, stats.wasserstein_distance,
[0, 1], [2], [0, 0])
assert_raises(ValueError, stats.wasserstein_distance,
[0, 1], [2], [3, 1], [0])
def test_negative_weights(self):
# A ValueError should be raised if there are any negative weights.
assert_raises(ValueError, stats.wasserstein_distance,
[0, 1], [2, 2], [1, 1], [3, -1])
def test_empty_distribution(self):
# A ValueError should be raised when trying to measure the distance
# between something and nothing.
assert_raises(ValueError, stats.wasserstein_distance, [], [2, 2])
assert_raises(ValueError, stats.wasserstein_distance, [1], [])
def test_inf_weight(self):
# An inf weight is not valid.
assert_raises(ValueError, stats.wasserstein_distance,
[1, 2, 1], [1, 1], [1, np.inf, 1], [1, 1])
class TestWassersteinDistance(object):
""" Tests for wasserstein_distance() output values.
"""
def test_simple(self):
# For basic distributions, the value of the Wasserstein distance is
# straightforward.
assert_almost_equal(
stats.wasserstein_distance([0, 1], [0], [1, 1], [1]),
.5)
assert_almost_equal(stats.wasserstein_distance(
[0, 1], [0], [3, 1], [1]),
.25)
assert_almost_equal(stats.wasserstein_distance(
[0, 2], [0], [1, 1], [1]),
1)
assert_almost_equal(stats.wasserstein_distance(
[0, 1, 2], [1, 2, 3]),
1)
def test_same_distribution(self):
# Any distribution moved to itself should have a Wasserstein distance of
# zero.
assert_equal(stats.wasserstein_distance([1, 2, 3], [2, 1, 3]), 0)
assert_equal(
stats.wasserstein_distance([1, 1, 1, 4], [4, 1],
[1, 1, 1, 1], [1, 3]),
0)
def test_shift(self):
# If the whole distribution is shifted by x, then the Wasserstein
# distance should be x.
assert_almost_equal(stats.wasserstein_distance([0], [1]), 1)
assert_almost_equal(stats.wasserstein_distance([-5], [5]), 10)
assert_almost_equal(
stats.wasserstein_distance([1, 2, 3, 4, 5], [11, 12, 13, 14, 15]),
10)
assert_almost_equal(
stats.wasserstein_distance([4.5, 6.7, 2.1], [4.6, 7, 9.2],
[3, 1, 1], [1, 3, 1]),
2.5)
def test_combine_weights(self):
# Assigning a weight w to a value is equivalent to including that value
# w times in the value array with weight of 1.
assert_almost_equal(
stats.wasserstein_distance(
[0, 0, 1, 1, 1, 1, 5], [0, 3, 3, 3, 3, 4, 4],
[1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]),
stats.wasserstein_distance([5, 0, 1], [0, 4, 3],
[1, 2, 4], [1, 2, 4]))
def test_collapse(self):
# Collapsing a distribution to a point distribution at zero is
# equivalent to taking the average of the absolute values of the values.
u = np.arange(-10, 30, 0.3)
v = np.zeros_like(u)
assert_almost_equal(
stats.wasserstein_distance(u, v),
np.mean(np.abs(u)))
u_weights = np.arange(len(u))
v_weights = u_weights[::-1]
assert_almost_equal(
stats.wasserstein_distance(u, v, u_weights, v_weights),
np.average(np.abs(u), weights=u_weights))
def test_zero_weight(self):
# Values with zero weight have no impact on the Wasserstein distance.
assert_almost_equal(
stats.wasserstein_distance([1, 2, 100000], [1, 1],
[1, 1, 0], [1, 1]),
stats.wasserstein_distance([1, 2], [1, 1], [1, 1], [1, 1]))
def test_inf_values(self):
# Inf values can lead to an inf distance or trigger a RuntimeWarning
# (and return NaN) if the distance is undefined.
assert_equal(
stats.wasserstein_distance([1, 2, np.inf], [1, 1]),
np.inf)
assert_equal(
stats.wasserstein_distance([1, 2, np.inf], [-np.inf, 1]),
np.inf)
assert_equal(
stats.wasserstein_distance([1, -np.inf, np.inf], [1, 1]),
np.inf)
with suppress_warnings() as sup:
r = sup.record(RuntimeWarning, "invalid value*")
assert_equal(
stats.wasserstein_distance([1, 2, np.inf], [np.inf, 1]),
np.nan)
class TestEnergyDistance(object):
""" Tests for energy_distance() output values.
"""
def test_simple(self):
# For basic distributions, the value of the energy distance is
# straightforward.
assert_almost_equal(
stats.energy_distance([0, 1], [0], [1, 1], [1]),
np.sqrt(2) * .5)
assert_almost_equal(stats.energy_distance(
[0, 1], [0], [3, 1], [1]),
np.sqrt(2) * .25)
assert_almost_equal(stats.energy_distance(
[0, 2], [0], [1, 1], [1]),
2 * .5)
assert_almost_equal(
stats.energy_distance([0, 1, 2], [1, 2, 3]),
np.sqrt(2) * (3*(1./3**2))**.5)
def test_same_distribution(self):
# Any distribution moved to itself should have a energy distance of
# zero.
assert_equal(stats.energy_distance([1, 2, 3], [2, 1, 3]), 0)
assert_equal(
stats.energy_distance([1, 1, 1, 4], [4, 1], [1, 1, 1, 1], [1, 3]),
0)
def test_shift(self):
# If a single-point distribution is shifted by x, then the energy
# distance should be sqrt(2) * sqrt(x).
assert_almost_equal(stats.energy_distance([0], [1]), np.sqrt(2))
assert_almost_equal(
stats.energy_distance([-5], [5]),
np.sqrt(2) * 10**.5)
def test_combine_weights(self):
# Assigning a weight w to a value is equivalent to including that value
# w times in the value array with weight of 1.
assert_almost_equal(
stats.energy_distance([0, 0, 1, 1, 1, 1, 5], [0, 3, 3, 3, 3, 4, 4],
[1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]),
stats.energy_distance([5, 0, 1], [0, 4, 3], [1, 2, 4], [1, 2, 4]))
def test_zero_weight(self):
# Values with zero weight have no impact on the energy distance.
assert_almost_equal(
stats.energy_distance([1, 2, 100000], [1, 1], [1, 1, 0], [1, 1]),
stats.energy_distance([1, 2], [1, 1], [1, 1], [1, 1]))
def test_inf_values(self):
# Inf values can lead to an inf distance or trigger a RuntimeWarning
# (and return NaN) if the distance is undefined.
assert_equal(stats.energy_distance([1, 2, np.inf], [1, 1]), np.inf)
assert_equal(
stats.energy_distance([1, 2, np.inf], [-np.inf, 1]),
np.inf)
assert_equal(
stats.energy_distance([1, -np.inf, np.inf], [1, 1]),
np.inf)
with suppress_warnings() as sup:
r = sup.record(RuntimeWarning, "invalid value*")
assert_equal(
stats.energy_distance([1, 2, np.inf], [np.inf, 1]),
np.nan)
class TestBrunnerMunzel(object):
# Data from (Lumley, 1996)
X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1]
Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
significant = 13
def test_brunnermunzel_one_sided(self):
# Results are compared with R's lawstat package.
u1, p1 = stats.brunnermunzel(self.X, self.Y, alternative='less')
u2, p2 = stats.brunnermunzel(self.Y, self.X, alternative='greater')
u3, p3 = stats.brunnermunzel(self.X, self.Y, alternative='greater')
u4, p4 = stats.brunnermunzel(self.Y, self.X, alternative='less')
assert_approx_equal(p1, p2, significant=self.significant)
assert_approx_equal(p3, p4, significant=self.significant)
assert_(p1 != p3)
assert_approx_equal(u1, 3.1374674823029505,
significant=self.significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=self.significant)
assert_approx_equal(u3, 3.1374674823029505,
significant=self.significant)
assert_approx_equal(u4, -3.1374674823029505,
significant=self.significant)
assert_approx_equal(p1, 0.0028931043330757342,
significant=self.significant)
assert_approx_equal(p3, 0.99710689566692423,
significant=self.significant)
def test_brunnermunzel_two_sided(self):
# Results are compared with R's lawstat package.
u1, p1 = stats.brunnermunzel(self.X, self.Y, alternative='two-sided')
u2, p2 = stats.brunnermunzel(self.Y, self.X, alternative='two-sided')
assert_approx_equal(p1, p2, significant=self.significant)
assert_approx_equal(u1, 3.1374674823029505,
significant=self.significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=self.significant)
assert_approx_equal(p1, 0.0057862086661515377,
significant=self.significant)
def test_brunnermunzel_default(self):
# The default value for alternative is two-sided
u1, p1 = stats.brunnermunzel(self.X, self.Y)
u2, p2 = stats.brunnermunzel(self.Y, self.X)
assert_approx_equal(p1, p2, significant=self.significant)
assert_approx_equal(u1, 3.1374674823029505,
significant=self.significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=self.significant)
assert_approx_equal(p1, 0.0057862086661515377,
significant=self.significant)
def test_brunnermunzel_alternative_error(self):
alternative = "error"
distribution = "t"
nan_policy = "propagate"
assert_(alternative not in ["two-sided", "greater", "less"])
assert_raises(ValueError,
stats.brunnermunzel,
self.X,
self.Y,
alternative,
distribution,
nan_policy)
def test_brunnermunzel_distribution_norm(self):
u1, p1 = stats.brunnermunzel(self.X, self.Y, distribution="normal")
u2, p2 = stats.brunnermunzel(self.Y, self.X, distribution="normal")
assert_approx_equal(p1, p2, significant=self.significant)
assert_approx_equal(u1, 3.1374674823029505,
significant=self.significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=self.significant)
assert_approx_equal(p1, 0.0017041417600383024,
significant=self.significant)
def test_brunnermunzel_distribution_error(self):
alternative = "two-sided"
distribution = "error"
nan_policy = "propagate"
assert_(alternative not in ["t", "normal"])
assert_raises(ValueError,
stats.brunnermunzel,
self.X,
self.Y,
alternative,
distribution,
nan_policy)
def test_brunnermunzel_empty_imput(self):
u1, p1 = stats.brunnermunzel(self.X, [])
u2, p2 = stats.brunnermunzel([], self.Y)
u3, p3 = stats.brunnermunzel([], [])
assert_equal(u1, np.nan)
assert_equal(p1, np.nan)
assert_equal(u2, np.nan)
assert_equal(p2, np.nan)
assert_equal(u3, np.nan)
assert_equal(p3, np.nan)
def test_brunnermunzel_nan_input_propagate(self):
X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan]
Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
u1, p1 = stats.brunnermunzel(X, Y, nan_policy="propagate")
u2, p2 = stats.brunnermunzel(Y, X, nan_policy="propagate")
assert_equal(u1, np.nan)
assert_equal(p1, np.nan)
assert_equal(u2, np.nan)
assert_equal(p2, np.nan)
def test_brunnermunzel_nan_input_raise(self):
X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan]
Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
alternative = "two-sided"
distribution = "t"
nan_policy = "raise"
assert_raises(ValueError,
stats.brunnermunzel,
X,
Y,
alternative,
distribution,
nan_policy)
assert_raises(ValueError,
stats.brunnermunzel,
Y,
X,
alternative,
distribution,
nan_policy)
def test_brunnermunzel_nan_input_omit(self):
X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan]
Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
u1, p1 = stats.brunnermunzel(X, Y, nan_policy="omit")
u2, p2 = stats.brunnermunzel(Y, X, nan_policy="omit")
assert_approx_equal(p1, p2, significant=self.significant)
assert_approx_equal(u1, 3.1374674823029505,
significant=self.significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=self.significant)
assert_approx_equal(p1, 0.0057862086661515377,
significant=self.significant)
class TestRatioUniforms(object):
""" Tests for rvs_ratio_uniforms.
"""
def test_rv_generation(self):
# use KS test to check distribution of rvs
# normal distribution
f = stats.norm.pdf
v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound
rvs = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=2500,
random_state=12345)
assert_equal(stats.kstest(rvs, 'norm')[1] > 0.25, True)
# exponential distribution
rvs = stats.rvs_ratio_uniforms(lambda x: np.exp(-x), umax=1,
vmin=0, vmax=2*np.exp(-1),
size=1000, random_state=12345)
assert_equal(stats.kstest(rvs, 'expon')[1] > 0.25, True)
def test_shape(self):
# test shape of return value depending on size parameter
f = stats.norm.pdf
v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound
r1 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=3,
random_state=1234)
r2 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3,),
random_state=1234)
r3 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 1),
random_state=1234)
assert_equal(r1, r2)
assert_equal(r2, r3.flatten())
assert_equal(r1.shape, (3,))
assert_equal(r3.shape, (3, 1))
r4 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 3, 3),
random_state=12)
r5 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=27,
random_state=12)
assert_equal(r4.flatten(), r5)
assert_equal(r4.shape, (3, 3, 3))
r6 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, random_state=1234)
r7 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=1,
random_state=1234)
r8 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(1, ),
random_state=1234)
assert_equal(r6, r7)
assert_equal(r7, r8)
def test_random_state(self):
f = stats.norm.pdf
v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound
np.random.seed(1234)
r1 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 4))
r2 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 4),
random_state=1234)
assert_equal(r1, r2)
def test_exceptions(self):
f = stats.norm.pdf
# need vmin < vmax
assert_raises(ValueError,
stats.rvs_ratio_uniforms, pdf=f, umax=1, vmin=3, vmax=1)
assert_raises(ValueError,
stats.rvs_ratio_uniforms, pdf=f, umax=1, vmin=1, vmax=1)
# need umax > 0
assert_raises(ValueError,
stats.rvs_ratio_uniforms, pdf=f, umax=-1, vmin=1, vmax=1)
assert_raises(ValueError,
stats.rvs_ratio_uniforms, pdf=f, umax=0, vmin=1, vmax=1)
def test_gig(self):
# test generalized inverse gaussian distribution
p, b = 0.5, 0.75
def gig_mode(p, b):
return b / (np.sqrt((p - 1)**2 + b**2) + 1 - p)
def gig_pdf(x, p, b):
c = 1/(2 * kv(p, b))
return c * x**(p - 1) * np.exp(- b * (x + 1/x) / 2)
def gig_cdf(x, p, b):
x = np.atleast_1d(x)
cdf = [quad(gig_pdf, 0, xi, args=(p, b))[0] for xi in x]
return np.array(cdf)
s = kv(p+2, b) / kv(p, b)
vmax = np.sqrt(gig_pdf(gig_mode(p + 2, b), p + 2, b) * s)
umax = np.sqrt(gig_pdf(gig_mode(p, b), p, b))
rvs = stats.rvs_ratio_uniforms(lambda x: gig_pdf(x, p, b), umax,
0, vmax, random_state=1234, size=1500)
assert_equal(stats.kstest(rvs, lambda x: gig_cdf(x, p, b))[1] > 0.25,
True)
class TestEppsSingleton(object):
def test_statistic_1(self):
# first example in Goerg & Kaiser, also in original paper of
# Epps & Singleton. Note: values do not match exactly, the
# value of the interquartile range varies depending on how
# quantiles are computed
x = np.array([-0.35, 2.55, 1.73, 0.73, 0.35, 2.69, 0.46, -0.94, -0.37, 12.07])
y = np.array([-1.15, -0.15, 2.48, 3.25, 3.71, 4.29, 5.00, 7.74, 8.38, 8.60])
w, p = stats.epps_singleton_2samp(x, y)
assert_almost_equal(w, 15.14, decimal=1)
assert_almost_equal(p, 0.00442, decimal=3)
def test_statistic_2(self):
# second example in Goerg & Kaiser, again not a perfect match
x = np.array((0, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 5, 5, 5, 5, 6, 10,
10, 10, 10))
y = np.array((10, 4, 0, 5, 10, 10, 0, 5, 6, 7, 10, 3, 1, 7, 0, 8, 1,
5, 8, 10))
w, p = stats.epps_singleton_2samp(x, y)
assert_allclose(w, 8.900, atol=0.001)
assert_almost_equal(p, 0.06364, decimal=3)
def test_epps_singleton_array_like(self):
np.random.seed(1234)
x, y = np.arange(30), np.arange(28)
w1, p1 = stats.epps_singleton_2samp(list(x), list(y))
w2, p2 = stats.epps_singleton_2samp(tuple(x), tuple(y))
w3, p3 = stats.epps_singleton_2samp(x, y)
assert_(w1 == w2 == w3)
assert_(p1 == p2 == p3)
def test_epps_singleton_size(self):
# raise error if less than 5 elements
x, y = (1, 2, 3, 4), np.arange(10)
assert_raises(ValueError, stats.epps_singleton_2samp, x, y)
def test_epps_singleton_nonfinite(self):
# raise error if there are non-finite values
x, y = (1, 2, 3, 4, 5, np.inf), np.arange(10)
assert_raises(ValueError, stats.epps_singleton_2samp, x, y)
x, y = np.arange(10), (1, 2, 3, 4, 5, np.nan)
assert_raises(ValueError, stats.epps_singleton_2samp, x, y)
def test_epps_singleton_1d_input(self):
x = np.arange(100).reshape(-1, 1)
assert_raises(ValueError, stats.epps_singleton_2samp, x, x)
def test_names(self):
x, y = np.arange(20), np.arange(30)
res = stats.epps_singleton_2samp(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
|
gertingold/scipy
|
scipy/stats/tests/test_stats.py
|
Python
|
bsd-3-clause
| 206,871
|
[
"Gaussian"
] |
d7790b0be7f5bef3c3c83e9de7742edce1627c397d4813ea63b1c7bbd7bf8bfc
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains the classes to build a ConversionElectrode.
"""
from typing import Iterable, Dict
from dataclasses import dataclass
from monty.dev import deprecated
from scipy.constants import N_A
from pymatgen.analysis.phase_diagram import PhaseDiagram
from pymatgen.analysis.reaction_calculator import BalancedReaction
from pymatgen.apps.battery.battery_abc import AbstractElectrode, AbstractVoltagePair
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
from pymatgen.core.units import Charge, Time
from pymatgen.entries.computed_entries import ComputedEntry
@dataclass
class ConversionElectrode(AbstractElectrode):
"""
Class representing a ConversionElectrode, since it is dataclass
this object can be constructed for the attributes.
However, it is usually easier to construct a ConversionElectrode using one of the classmethods
constructors provided.
Attribute:
voltage_pairs: The voltage pairs making up the Conversion Electrode.
working_ion_entry: A single ComputedEntry or PDEntry
representing the element that carries charge across the
battery, e.g. Li.
initial_comp_formula: Starting composition for ConversionElectrode represented
as a string/formula.
"""
initial_comp_formula: str
@property
def initial_comp(self) -> Composition:
"""
The pymatgen Composition representation of the initial composition
"""
return Composition(self.initial_comp_formula)
@classmethod
def from_composition_and_pd(cls, comp, pd, working_ion_symbol="Li", allow_unstable=False):
"""
Convenience constructor to make a ConversionElectrode from a
composition and a phase diagram.
Args:
comp:
Starting composition for ConversionElectrode, e.g.,
Composition("FeF3")
pd:
A PhaseDiagram of the relevant system (e.g., Li-Fe-F)
working_ion_symbol:
Element symbol of working ion. Defaults to Li.
allow_unstable:
Allow compositions that are unstable
"""
working_ion = Element(working_ion_symbol)
entry = None
working_ion_entry = None
for e in pd.stable_entries:
if e.composition.reduced_formula == comp.reduced_formula:
entry = e
elif e.is_element and e.composition.reduced_formula == working_ion_symbol:
working_ion_entry = e
if not allow_unstable and not entry:
raise ValueError(f"Not stable compound found at composition {comp}.")
profile = pd.get_element_profile(working_ion, comp)
# Need to reverse because voltage goes form most charged to most
# discharged.
profile.reverse()
if len(profile) < 2:
return None
working_ion_entry = working_ion_entry
working_ion = working_ion_entry.composition.elements[0].symbol
normalization_els = {}
for el, amt in comp.items():
if el != Element(working_ion):
normalization_els[el] = amt
framework = comp.as_dict()
if working_ion in framework:
framework.pop(working_ion)
framework = Composition(framework)
vpairs = [
ConversionVoltagePair.from_steps(
profile[i],
profile[i + 1],
normalization_els,
framework_formula=framework.reduced_formula,
)
for i in range(len(profile) - 1)
]
return ConversionElectrode( # pylint: disable=E1123
voltage_pairs=vpairs,
working_ion_entry=working_ion_entry,
initial_comp_formula=comp.reduced_formula,
framework_formula=framework.reduced_formula,
)
@classmethod
def from_composition_and_entries(cls, comp, entries_in_chemsys, working_ion_symbol="Li", allow_unstable=False):
"""
Convenience constructor to make a ConversionElectrode from a
composition and all entries in a chemical system.
Args:
comp: Starting composition for ConversionElectrode, e.g.,
Composition("FeF3")
entries_in_chemsys: Sequence containing all entries in a
chemical system. E.g., all Li-Fe-F containing entries.
working_ion_symbol: Element symbol of working ion. Defaults to Li.
allow_unstable: If True, allow any composition to be used as the
starting point of a conversion voltage curve, this is useful
for comparing with insertion electrodes
"""
pd = PhaseDiagram(entries_in_chemsys)
return ConversionElectrode.from_composition_and_pd(comp, pd, working_ion_symbol, allow_unstable)
def get_sub_electrodes(self, adjacent_only=True):
"""
If this electrode contains multiple voltage steps, then it is possible
to use only a subset of the voltage steps to define other electrodes.
For example, an LiTiO2 electrode might contain three subelectrodes:
[LiTiO2 --> TiO2, LiTiO2 --> Li0.5TiO2, Li0.5TiO2 --> TiO2]
This method can be used to return all the subelectrodes with some
options
Args:
adjacent_only: Only return electrodes from compounds that are
adjacent on the convex hull, i.e. no electrodes returned
will have multiple voltage steps if this is set true
Returns:
A list of ConversionElectrode objects
"""
# voltage_pairs = vpairs, working_ion_entry = working_ion_entry,
# _initial_comp_formula = comp.reduced_formula, framework_formula = framework.reduced_formula
if adjacent_only:
return [
ConversionElectrode( # pylint: disable=E1123
voltage_pairs=self.voltage_pairs[i : i + 1],
working_ion_entry=self.working_ion_entry,
initial_comp_formula=self.initial_comp_formula,
framework_formula=self.framework_formula,
)
for i in range(len(self.voltage_pairs))
]
sub_electrodes = []
for i in range(len(self.voltage_pairs)):
for j in range(i, len(self.voltage_pairs)):
sub_electrodes.append(
ConversionElectrode( # pylint: disable=E1123
voltage_pairs=self.voltage_pairs[i : j + 1],
working_ion_entry=self.working_ion_entry,
initial_comp_formula=self.initial_comp_formula,
framework_formula=self.framework_formula,
)
)
return sub_electrodes
def is_super_electrode(self, conversion_electrode):
"""
Checks if a particular conversion electrode is a sub electrode of the
current electrode. Starting from a more lithiated state may result in
a subelectrode that is essentially on the same path. For example, a
ConversionElectrode formed by starting from an FePO4 composition would
be a super_electrode of a ConversionElectrode formed from an LiFePO4
composition.
"""
for pair1 in conversion_electrode:
rxn1 = pair1.rxn
all_formulas1 = {
rxn1.all_comp[i].reduced_formula for i in range(len(rxn1.all_comp)) if abs(rxn1.coeffs[i]) > 1e-5
}
for pair2 in self:
rxn2 = pair2.rxn
all_formulas2 = {
rxn2.all_comp[i].reduced_formula for i in range(len(rxn2.all_comp)) if abs(rxn2.coeffs[i]) > 1e-5
}
if all_formulas1 == all_formulas2:
break
else:
return False
return True
def __eq__(self, conversion_electrode):
"""
Check if two electrodes are exactly the same:
"""
if len(self) != len(conversion_electrode):
return False
for pair1 in conversion_electrode:
rxn1 = pair1.rxn
all_formulas1 = {
rxn1.all_comp[i].reduced_formula for i in range(len(rxn1.all_comp)) if abs(rxn1.coeffs[i]) > 1e-5
}
for pair2 in self:
rxn2 = pair2.rxn
all_formulas2 = {
rxn2.all_comp[i].reduced_formula for i in range(len(rxn2.all_comp)) if abs(rxn2.coeffs[i]) > 1e-5
}
if all_formulas1 == all_formulas2:
break
else:
return False
return True
def __hash__(self):
return 7
def __str__(self):
return self.__repr__()
def __repr__(self):
output = [
"Conversion electrode with formula {} and nsteps {}".format(
self.initial_comp.reduced_formula, self.num_steps
),
"Avg voltage {} V, min voltage {} V, max voltage {} V".format(
self.get_average_voltage(), self.min_voltage, self.max_voltage
),
"Capacity (grav.) {} mAh/g, capacity (vol.) {} Ah/l".format(
self.get_capacity_grav(), self.get_capacity_vol()
),
"Specific energy {} Wh/kg, energy density {} Wh/l".format(
self.get_specific_energy(), self.get_energy_density()
),
]
return "\n".join(output)
def get_summary_dict(self, print_subelectrodes=True) -> Dict:
"""
Generate a summary dict.
Populates the summary dict with the basic information from the parent method then populates more information.
Since the parent method calls self.get_summary_dict(print_subelectrodes=True) for the subelectrodes.
The current methode will be called from within super().get_summary_dict.
Args:
print_subelectrodes: Also print data on all the possible
subelectrodes.
Returns:
A summary of this electrode"s properties in dict format.
"""
d = super().get_summary_dict(print_subelectrodes=print_subelectrodes)
d["reactions"] = []
d["reactant_compositions"] = []
comps = []
frac = []
for pair in self.voltage_pairs:
rxn = pair.rxn
frac.append(pair.frac_charge)
frac.append(pair.frac_discharge)
d["reactions"].append(str(rxn))
for i, v in enumerate(rxn.coeffs):
if abs(v) > 1e-5 and rxn.all_comp[i] not in comps:
comps.append(rxn.all_comp[i])
if abs(v) > 1e-5 and rxn.all_comp[i].reduced_formula != d["working_ion"]:
reduced_comp = rxn.all_comp[i].reduced_composition
comp_dict = reduced_comp.as_dict()
d["reactant_compositions"].append(comp_dict)
return d
@deprecated(
replacement=get_summary_dict,
message="Name and logic changed, will be as_dict_summary will be removed in the futurn.",
)
def as_dict_summary(self, print_subelectrodes=True):
"""
Args:
print_subelectrodes:
Also print data on all the possible subelectrodes
Returns:
a summary of this electrode"s properties in dictionary format
"""
d = {}
framework_comp = Composition(
{k: v for k, v in self.initial_comp.items() if k.symbol != self.working_ion.symbol}
)
d["framework"] = framework_comp.to_data_dict
d["framework_pretty"] = framework_comp.reduced_formula
d["average_voltage"] = self.get_average_voltage()
d["max_voltage"] = self.max_voltage
d["min_voltage"] = self.min_voltage
d["max_delta_volume"] = self.max_delta_volume
d["max_instability"] = 0
d["max_voltage_step"] = self.max_voltage_step
d["nsteps"] = self.num_steps
d["capacity_grav"] = self.get_capacity_grav()
d["capacity_vol"] = self.get_capacity_vol()
d["energy_grav"] = self.get_specific_energy()
d["energy_vol"] = self.get_energy_density()
d["working_ion"] = self.working_ion.symbol
d["reactions"] = []
d["reactant_compositions"] = []
comps = []
frac = []
for pair in self.voltage_pairs:
rxn = pair.rxn
frac.append(pair.frac_charge)
frac.append(pair.frac_discharge)
d["reactions"].append(str(rxn))
for i, v in enumerate(rxn.coeffs):
if abs(v) > 1e-5 and rxn.all_comp[i] not in comps:
comps.append(rxn.all_comp[i])
if abs(v) > 1e-5 and rxn.all_comp[i].reduced_formula != d["working_ion"]:
reduced_comp = rxn.all_comp[i].reduced_composition
comp_dict = reduced_comp.as_dict()
d["reactant_compositions"].append(comp_dict)
d["fracA_charge"] = min(frac)
d["fracA_discharge"] = max(frac)
d["nsteps"] = self.num_steps
if print_subelectrodes:
def f_dict(c):
return c.get_summary_dict(print_subelectrodes=False)
d["adj_pairs"] = list(map(f_dict, self.get_sub_electrodes(adjacent_only=True)))
d["all_pairs"] = list(map(f_dict, self.get_sub_electrodes(adjacent_only=False)))
return d
@dataclass
class ConversionVoltagePair(AbstractVoltagePair):
"""
A VoltagePair representing a Conversion Reaction with a defined voltage.
Typically not initialized directly but rather used by ConversionElectrode.
Attributes:
rxn (BalancedReaction): BalancedReaction for the step
voltage (float): Voltage for the step
mAh (float): Capacity of the step
vol_charge (float): Volume of charged state
vol_discharge (float): Volume of discharged state
mass_charge (float): Mass of charged state
mass_discharge (float): Mass of discharged state
frac_charge (float): Fraction of working ion in the charged state
frac_discharge (float): Fraction of working ion in the discharged state
entries_charge ([ComputedEntry]): Entries in the charged state
entries_discharge ([ComputedEntry]): Entries in discharged state
working_ion_entry (ComputedEntry): Entry of the working ion.
"""
rxn: BalancedReaction
entries_charge: Iterable[ComputedEntry]
entries_discharge: Iterable[ComputedEntry]
@classmethod
def from_steps(cls, step1, step2, normalization_els, framework_formula=None):
"""
Creates a ConversionVoltagePair from two steps in the element profile
from a PD analysis.
Args:
step1: Starting step
step2: Ending step
normalization_els: Elements to normalize the reaction by. To
ensure correct capacities.
"""
working_ion_entry = step1["element_reference"]
working_ion = working_ion_entry.composition.elements[0].symbol
working_ion_valence = max(Element(working_ion).oxidation_states)
voltage = (-step1["chempot"] + working_ion_entry.energy_per_atom) / working_ion_valence
mAh = (
(step2["evolution"] - step1["evolution"])
* Charge(1, "e").to("C")
* Time(1, "s").to("h")
* N_A
* 1000
* working_ion_valence
)
licomp = Composition(working_ion)
prev_rxn = step1["reaction"]
reactants = {comp: abs(prev_rxn.get_coeff(comp)) for comp in prev_rxn.products if comp != licomp}
curr_rxn = step2["reaction"]
products = {comp: abs(curr_rxn.get_coeff(comp)) for comp in curr_rxn.products if comp != licomp}
reactants[licomp] = step2["evolution"] - step1["evolution"]
rxn = BalancedReaction(reactants, products)
for el, amt in normalization_els.items():
if rxn.get_el_amount(el) > 1e-6:
rxn.normalize_to_element(el, amt)
break
prev_mass_dischg = (
sum(prev_rxn.all_comp[i].weight * abs(prev_rxn.coeffs[i]) for i in range(len(prev_rxn.all_comp))) / 2
)
vol_charge = sum(
abs(prev_rxn.get_coeff(e.composition)) * e.structure.volume
for e in step1["entries"]
if e.composition.reduced_formula != working_ion
)
mass_discharge = (
sum(curr_rxn.all_comp[i].weight * abs(curr_rxn.coeffs[i]) for i in range(len(curr_rxn.all_comp))) / 2
)
mass_charge = prev_mass_dischg
mass_discharge = mass_discharge
vol_discharge = sum(
abs(curr_rxn.get_coeff(e.composition)) * e.structure.volume
for e in step2["entries"]
if e.composition.reduced_formula != working_ion
)
totalcomp = Composition({})
for comp in prev_rxn.products:
if comp.reduced_formula != working_ion:
totalcomp += comp * abs(prev_rxn.get_coeff(comp))
frac_charge = totalcomp.get_atomic_fraction(Element(working_ion))
totalcomp = Composition({})
for comp in curr_rxn.products:
if comp.reduced_formula != working_ion:
totalcomp += comp * abs(curr_rxn.get_coeff(comp))
frac_discharge = totalcomp.get_atomic_fraction(Element(working_ion))
rxn = rxn
entries_charge = step2["entries"]
entries_discharge = step1["entries"]
return ConversionVoltagePair( # pylint: disable=E1123
rxn=rxn,
voltage=voltage,
mAh=mAh,
vol_charge=vol_charge,
vol_discharge=vol_discharge,
mass_charge=mass_charge,
mass_discharge=mass_discharge,
frac_charge=frac_charge,
frac_discharge=frac_discharge,
entries_charge=entries_charge,
entries_discharge=entries_discharge,
working_ion_entry=working_ion_entry,
framework_formula=framework_formula,
)
def __repr__(self):
output = [
f"Conversion voltage pair with working ion {self.working_ion_entry.composition.reduced_formula}",
f"Reaction : {self.rxn}",
f"V = {self.voltage}, mAh = {self.mAh}",
f"frac_charge = {self.frac_charge}, frac_discharge = {self.frac_discharge}",
f"mass_charge = {self.mass_charge}, mass_discharge = {self.mass_discharge}",
f"vol_charge = {self.vol_charge}, vol_discharge = {self.vol_discharge}",
]
return "\n".join(output)
def __str__(self):
return self.__repr__()
|
vorwerkc/pymatgen
|
pymatgen/apps/battery/conversion_battery.py
|
Python
|
mit
| 18,974
|
[
"pymatgen"
] |
30e067310e934ae1d0db32d6d3c187b71ac746bda3b93652ca755b0853104642
|
# This is a very simple implementation of the uct Monte Carlo SearchTree Search algorithm in Python 2.7.
# The function uct(root_state, __iter_max) is towards the bottom of the code.
# It aims to have the clearest and simplest possible code, and for the sake of clarity, the code
# is orders of magnitude less efficient than it could be made, particularly by using a
# state.GetRandomMove() or state.DoRandomRollout() function.
#
# Example GameState classes for Nim, Gobang, and Othello are included to give some idea of how you
# can write your own GameState use uct in your 2-player game. Change the game to be played in
# the uct_play_game() function at the bottom of the code.
#
# Written by Peter Cowling, Ed Powley, Daniel Whitehouse (University of York, UK) September 2012.
#
# Licence is granted to freely use and distribute for any sensible/legal purpose so long as this comment
# remains in any distributed code.
#
# For more information about Monte Carlo SearchTree Search check out our web site at www.mcts.ai
import optparse
import random
import sets
import math
ITER_MAX = 100
try:
import java.lang
PARALLEL_COUNT = java.lang.Runtime.getRuntime().availableProcessors()
except ImportError:
import multiprocessing
PARALLEL_COUNT = multiprocessing.cpu_count()
class GameState:
""" A state of the game, i.e. the game __board. These are the only functions which are
absolutely necessary to implement uct in any 2-player complete information deterministic
zero-sum game, although they can be enhanced and made quicker, for example by using a
GetRandomMove() function to generate a random move during rollout.
By convention the players are numbered 1 and 2.
"""
def __init__(self):
self.player_just_moved = 2 # At the root pretend the player just moved is player 2 - player 1 has the first move
def clone(self):
""" Create a deep clone of this game state.
"""
st = GameState()
st.player_just_moved = self.player_just_moved
return st
def do_move(self, move):
""" update a state by carrying out the given move.
Must update player_just_moved.
"""
self.player_just_moved = 3 - self.player_just_moved
def get_moves(self):
""" Get all possible moves from this state.
"""
def get_result(self, playerjm):
""" Get the game result from the viewpoint of playerjm.
"""
def __repr__(self):
""" Don't need this - but good style.
"""
pass
class NimState:
""" A state of the game Nim. In Nim, players alternately take 1,2 or 3 __chips with the
winner being the player to take the last chip.
In Nim any initial state of the form 4n+k for k = 1,2,3 is a win for player 1
(by choosing k) __chips.
Any initial state of the form 4n is a win for player 2.
"""
def __init__(self, chips):
self.player_just_moved = 2 # At the root pretend the player just moved is p2 - p1 has the first move
self.__chips = chips
def clone(self):
""" Create a deep clone of this game state.
"""
st = NimState(self.__chips)
st.player_just_moved = self.player_just_moved
return st
def do_move(self, move):
""" update a state by carrying out the given move.
Must update player_just_moved.
"""
assert move >= 1 and move == int(move)
self.__chips -= move
self.player_just_moved = 3 - self.player_just_moved
def get_moves(self):
""" Get all possible moves from this state.
"""
return range(1, min([4, self.__chips + 1]))
def get_result(self, playerjm):
""" Get the game result from the viewpoint of playerjm.
"""
assert self.__chips == 0
return 1.0 if self.player_just_moved == playerjm else 0.0
def __repr__(self):
s = "Chips:" + str(self.__chips) + " JustPlayed:" + str(self.player_just_moved)
return s
class OthelloState:
""" A state of the game of Othello, i.e. the game __board.
The __board is a 2D array where 0 = empty (.), 1 = player 1 (X), 2 = player 2 (O).
In Othello players alternately place pieces on a square __board - each piece played
has to sandwich opponent pieces between the piece played and pieces already on the
__board. Sandwiched pieces are flipped.
This implementation modifies the rules to allow variable sized square boards and
terminates the game as soon as the player about to move cannot make a move (whereas
the standard game allows for a pass move).
"""
__positions = [[(x, y) for x in range(s) for y in range(s)] for s in range(32)]
def __init__(self, size = 8):
assert size == int(size) and size % 2 == 0 # __size must be integral and even
self.player_just_moved = 2 # At the root pretend the player just moved is p2 - p1 has the first move
self.__board = [] # 0 = empty, 1 = player 1, 2 = player 2
self.__size = size
for y in range(size):
self.__board.append([0]*size)
self.__board[size/2][size/2] = self.__board[size/2-1][size/2-1] = 1
self.__board[size/2][size/2-1] = self.__board[size/2-1][size/2] = 2
def clone(self):
""" Create a deep clone of this game state.
"""
st = OthelloState()
st.player_just_moved = self.player_just_moved
st.__board = [self.__board[i][:] for i in range(self.__size)]
st.__size = self.__size
return st
def do_move(self, move):
""" update a state by carrying out the given move.
Must update playerToMove.
"""
(x,y) = (move[0],move[1])
assert x == int(x) and y == int(y) and self.is_on_board(x,y) and self.__board[x][y] == 0
m = self.get_all_sandwiched_counters(x,y)
self.player_just_moved = 3 - self.player_just_moved
self.__board[x][y] = self.player_just_moved
for (a,b) in m:
self.__board[a][b] = self.player_just_moved
def get_moves(self):
""" Get all possible moves from this state.
"""
return [(x,y) for (x, y) in self.__positions[self.__size] if self.__board[x][y] == 0 and self.exists_sandwiched_counter(x,y)]
def adjacent_enemy_directions(self,x,y):
""" Speeds up get_moves by only considering squares which are adjacent to an enemy-occupied square.
"""
return [(dx, dy) for (dx, dy) in [(0,+1),(+1,+1),(+1,0),(+1,-1),(0,-1),(-1,-1),(-1,0),(-1,+1)] if self.is_on_board(x+dx,y+dy) and self.__board[x+dx][y+dy] == self.player_just_moved]
def exists_sandwiched_counter(self,x,y):
""" Does there exist at least one counter which would be flipped if my counter was placed at (x,y)?
"""
for (dx,dy) in self.adjacent_enemy_directions(x,y):
if self.sandwiched_counters(x,y,dx,dy):
return True
return False
def get_all_sandwiched_counters(self, x, y):
""" Is (x,y) a possible move (i.e. opponent counters are sandwiched between (x,y) and my counter in some direction)?
"""
sandwiched = []
for (dx,dy) in self.adjacent_enemy_directions(x,y):
sandwiched.extend(self.sandwiched_counters(x,y,dx,dy))
return sandwiched
def sandwiched_counters(self, x, y, dx, dy):
""" Return the coordinates of all opponent counters sandwiched between (x,y) and my counter.
"""
x += dx
y += dy
sandwiched = []
while self.is_on_board(x,y) and self.__board[x][y] == self.player_just_moved:
sandwiched.append((x,y))
x += dx
y += dy
if self.is_on_board(x,y) and self.__board[x][y] == 3 - self.player_just_moved:
return sandwiched
else:
return [] # nothing sandwiched
def is_on_board(self, x, y):
return x >= 0 and x < self.__size and y >= 0 and y < self.__size
def get_result(self, playerjm):
""" Get the game result from the viewpoint of playerjm.
"""
jmcount = 0
notjmcount = 0
for (x, y) in self.__positions[self.__size]:
jmcount += self.__board[x][y] == playerjm
notjmcount += self.__board[x][y] == 3 - playerjm
if jmcount > notjmcount: return 1.0
elif notjmcount > jmcount: return 0.0
else: return 0.5 # draw
def __repr__(self):
Xs = 0
Os = 0
s = "JustPlayed:" + str(self.player_just_moved) + "\n"
for (x, y) in self.__positions[self.__size]:
s += ".XO"[self.__board[x][y]]
s += " "
s += ("\n" if y == self.__size - 1 else "")
Xs += self.__board[x][y] == 1
Os += self.__board[x][y] == 2
s += "Xs:" + str(Xs) + " Os:" + str(Os) + "\n"
return s
class GobangState:
""" A state of the game of Gobang, i.e. the game __board.
The __board is a 2D array where 0 = empty (.), 1 = player 1 (X), 2 = player 2 (O).
"""
__positions = [[(x, y) for x in range(s) for y in range(s)] for s in range(32)]
def __init__(self, size = 8, inrow = 5):
assert size == int(size) # __size must be integral
self.player_just_moved = 2 # At the root pretend the player just moved is p2 - p1 has the first move
self.__board = [] # 0 = empty, 1 = player 1, 2 = player 2
self.__size = size
self.__inrow = inrow
self.__terminated = False
for y in range(size):
self.__board.append([0]*size)
def clone(self):
""" Create a deep clone of this game state.
"""
st = GobangState()
st.player_just_moved = self.player_just_moved
st.__board = [self.__board[i][:] for i in range(self.__size)]
st.__size = self.__size
st.__inrow = self.__inrow
st.__terminated = self.__terminated
return st
def do_move(self, move):
""" update a state by carrying out the given move.
Must update playerToMove.
"""
(x,y) = (move[0],move[1])
assert x == int(x) and y == int(y) and self.is_on_board(x,y) and self.__board[x][y] == 0
self.player_just_moved = 3 - self.player_just_moved
self.__board[x][y] = self.player_just_moved
self.__terminated = self.check_termination(x, y)
def check_termination(self, x, y):
assert self.__board[x][y] == self.player_just_moved
for (dx, dy) in [(0,+1),(+1,+1),(+1,0),(+1,-1)]:
if self.count_stones_in_direction(x, y, dx, dy) + self.count_stones_in_direction(x, y, -dx, -dy) + 1 >= self.__inrow:
return True
return False
def count_stones_in_direction(self, x, y, dx, dy):
ret = 0
x += dx
y += dy
while self.is_on_board(x,y) and self.__board[x][y] == self.player_just_moved:
ret += 1
x += dx
y += dy
return ret
def get_moves(self):
""" Get all possible moves from this state.
"""
return [(x,y) for (x, y) in self.__positions[self.__size] if self.__board[x][y] == 0] if not self.__terminated else []
def is_on_board(self, x, y):
return x >= 0 and x < self.__size and y >= 0 and y < self.__size
def get_result(self, playerjm):
""" Get the game result from the viewpoint of playerjm.
"""
if self.__terminated:
return 1.0 if self.player_just_moved == playerjm else 0.0
else:
return 0.5
def __repr__(self):
s = "JustPlayed:" + str(self.player_just_moved) + "\n"
for (x, y) in self.__positions[self.__size]:
s += ".XO"[self.__board[x][y]]
s += " "
s += ("\n" if y == self.__size - 1 else "")
return s
def uct_play_game(uct, search_tree):
""" Play a sample game between two uct players
"""
state = [NimState(15), OthelloState(8), GobangState(8, 5)][1]
while state.get_moves():
print str(state)
print
if search_tree is not None:
m = uct(state, ITER_MAX, search_tree)
else:
m = uct(state, ITER_MAX)
print ">> Best move: " + str(m) + "\n"
state.do_move(m)
print "Game finished!\n\n" + str(state)
if state.get_result(state.player_just_moved) == 1.0:
print "Player " + str(state.player_just_moved) + " wins!"
elif state.get_result(state.player_just_moved) == 0.0:
print "Player " + str(3 - state.player_just_moved) + " wins!"
else: print "Nobody wins!"
class TreeNode:
"""
A tree node will be stored in a tree structure constantly during process running
"""
def __init__(self, state):
self.__wins = 0.0
self.__visits = 1.0;
self.__state = state.clone()
self.__child_nodes = {}
self.__untried_moves = state.get_moves() # future child nodes
def state(self):
return self.__state
def child_nodes(self):
return self.__child_nodes
def untried_moves(self):
return self.__untried_moves
def player_just_moved(self):
return self.__state.player_just_moved
def value(self):
return self.__wins / self.__visits
def ucb(self, parent, constant):
return self.value() + constant * math.sqrt(2 * math.log(parent.__visits) / self.__visits)
def update(self, get_result):
self.__visits += 1.0
self.__wins += float(get_result)
def add_child(self, fm, n):
if fm in self.__untried_moves:
self.__untried_moves.remove(fm)
if fm not in self.__child_nodes:
self.__child_nodes[fm] = n
def traverse(self, fun):
for c in self.child_nodes().values():
c.traverse(fun)
fun(self)
def tree2string(self, indent):
s = self.indent_string(indent) + str(self)
for c in self.child_nodes().values():
s += c.tree2string(indent+1)
return s
def indent_string(self, indent):
s = "\n"
for i in range (1, indent+1):
s += "| "
return s
def __repr__(self):
return "W/V:" + str(self.__wins) + "/" + str(self.__visits) + "(" + str(int(1000 * self.value()) / 1000.0) + ")" + " U:" + str(self.__untried_moves)
class SearchTree:
def __init__(self):
self.__pool = {}
def get_node(self, state, tree_node_creator=None):
key = str(state)
creator = tree_node_creator if tree_node_creator is not None else TreeNode
if key not in self.__pool:
self.__pool[key] = creator(state)
return self.__pool[key]
def clean_sub_tree(self, root_node, ignored_node):
ignore_set = sets.Set()
ignored_node.traverse(lambda n: ignore_set.add(n))
for (k, n) in self.__pool.items():
if n not in ignore_set:
del self.__pool[k]
ignore_set.clear()
def size(self):
return len(self.__pool)
class SearchNode:
""" A node in the game tree. Note wins is always from the viewpoint of player_just_moved.
Crashes if state not specified.
"""
def __init__(self, move=None, parent=None, tree_node=None):
self.move = move # the move that got us to this node - "None" for the root node
self.parent_node = parent # "None" for the root node
if parent:
self.depth = parent.depth + 1
else:
self.depth = 0
self.__tree_node = tree_node
def player_just_moved(self):
return self.__tree_node.player_just_moved()
def state(self):
return self.__tree_node.state()
def untried_moves(self):
return self.__tree_node.untried_moves()
def child_nodes(self):
return self.__tree_node.child_nodes()
def clean_sub_tree(self, ignored_node, tree):
tree.clean_sub_tree(self.__tree_node, ignored_node.__tree_node)
def uct_select_child(self, constant, search_node_creator=None):
""" Use the UCB1 formula to select a child node. Often a constant UCTK is applied so we have
lambda c: c.wins/c.visits + UCTK * sqrt(2*log(self.visits)/c.visits to vary the amount of
exploration versus exploitation.
"""
assert self.child_nodes()
creator = search_node_creator if search_node_creator is not None else SearchNode
(move, child) = max(self.child_nodes().items(), key=lambda (m, n): n.ucb(self.__tree_node, constant))
node = creator(move, self, child)
return node
def add_child(self, move, tree_node, search_node_creator=None):
""" Remove move from __untried_moves and add a new child node for this move.
Return the added child node
"""
creator = search_node_creator if search_node_creator is not None else SearchNode
self.__tree_node.add_child(move, tree_node)
node = creator(move, self, tree_node)
return node
def update(self, get_result):
""" update this node - one additional visit and get_result additional wins. get_result must be from the viewpoint of playerJustmoved.
"""
self.__tree_node.update(get_result)
def __repr__(self):
return "[M:" + str(self.move) + " " + str(self.__tree_node) + "]"
def tree2string(self, indent):
return self.__tree_node.tree2string(indent)
def children2string(self):
s = ""
for (k, v) in self.child_nodes().items():
s += "[M:" + str(k) + " " + str(v) + "]\n"
return s
def uct(root_state, iter_max, search_tree=None, verbose=True):
""" Conduct a uct search for __iter_max iterations starting from root_state.
Return the best move from the root_state.
Assumes 2 alternating players (player 1 starts), with game results in the range [0.0, 1.0]."""
should_clean = True
if search_tree is None:
search_tree = SearchTree()
should_clean = False
max_depth = 0
node_count = search_tree.size()
root_node = SearchNode(tree_node=search_tree.get_node(root_state))
for i in range(iter_max):
node = root_node
# Select
while not node.untried_moves() and node.child_nodes(): # node is fully expanded and non-terminal
node = node.uct_select_child(1.0)
state = node.state().clone()
# Expand
m = random.choice(node.untried_moves()) if node.untried_moves() else None
if m is not None: # if we can expand (i.e. state/node is non-terminal)
state.do_move(m)
node = node.add_child(m, search_tree.get_node(state)) # add child and descend search_tree
max_depth = max(node.depth, max_depth)
# Rollout - this can often be made orders of magnitude quicker using a state.GetRandomMove() function
moves = state.get_moves()
while moves: # while state is non-terminal
state.do_move(random.choice(moves))
moves = state.get_moves()
# Backpropagate
while node != None: # backpropagate from the expanded node and work back to the root node
node.update(state.get_result(node.player_just_moved())) # state is terminal. update node with get_result from POV of node.player_just_moved
node = node.parent_node
selected_node = root_node.uct_select_child(0.0)
if verbose:
print "Max search depth:", max_depth
print "Nodes generated:", str(search_tree.size() - node_count)
print
print root_node.children2string()
if should_clean:
root_node.clean_sub_tree(selected_node, search_tree)
if verbose:
print "Nodes remainning:", str(search_tree.size())
if verbose:
print
return selected_node.move
def main(uct, search_tree=None):
""" Play a single game to the end using uct for both players.
"""
global ITER_MAX
global PARALLEL_COUNT
usage = "Usage: %prog [options]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-i", "--itermax", type="int", dest="__iter_max", help="max iteration times")
parser.add_option("-p", "--parallel", type="int", dest="parallel_count", help="parallel count")
(options, args) = parser.parse_args()
ITER_MAX = options.__iter_max if options.__iter_max is not None else ITER_MAX
PARALLEL_COUNT = options.parallel_count if options.parallel_count is not None else PARALLEL_COUNT
print "Max iterations:", ITER_MAX
print "Parallel count:", PARALLEL_COUNT
print
uct_play_game(uct, search_tree)
|
aijunbai/uct
|
common.py
|
Python
|
bsd-2-clause
| 21,228
|
[
"VisIt"
] |
25a0e276bc73f64c818ed3c50226e7d0bf8eeb499a9860dd6862c0095be0fda5
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Truhlar) of non-hydrogen-transfer barrier height reactions.
| Geometries and Reaction energies from Truhlar and coworkers at site http://t1.chem.umn.edu/misc/database_group/database_therm_bh/non_H.htm.
- **cp** ``'off'``
- **rlxd** ``'off'``
- **subset**
- ``'small'``
- ``'large'``
"""
import re
import qcdb
# <<< NHTBH Database Module >>>
dbse = 'NHTBH'
isOS = 'true'
# <<< Database Members >>>
HRXN = range(1, 39)
HRXN_SM = [3, 4, 31, 32]
HRXN_LG = [36]
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV['%s-%s' % (dbse, 1)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'N2O' ),
'%s-%s-reagent' % (dbse, 'N2OHts') ]
RXNM['%s-%s' % (dbse, 1)] = dict(zip(ACTV['%s-%s' % (dbse, 1)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 2)] = ['%s-%s-reagent' % (dbse, 'OH' ),
'%s-%s-reagent' % (dbse, 'N2' ),
'%s-%s-reagent' % (dbse, 'N2OHts') ]
RXNM['%s-%s' % (dbse, 2)] = dict(zip(ACTV['%s-%s' % (dbse, 2)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 3)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'HF' ),
'%s-%s-reagent' % (dbse, 'HFHts') ]
RXNM['%s-%s' % (dbse, 3)] = dict(zip(ACTV['%s-%s' % (dbse, 3)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 4)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'HF' ),
'%s-%s-reagent' % (dbse, 'HFHts') ]
RXNM['%s-%s' % (dbse, 4)] = dict(zip(ACTV['%s-%s' % (dbse, 4)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 5)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'HCl' ),
'%s-%s-reagent' % (dbse, 'HClHts') ]
RXNM['%s-%s' % (dbse, 5)] = dict(zip(ACTV['%s-%s' % (dbse, 5)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 6)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'HCl' ),
'%s-%s-reagent' % (dbse, 'HClHts') ]
RXNM['%s-%s' % (dbse, 6)] = dict(zip(ACTV['%s-%s' % (dbse, 6)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 7)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'CH3F' ),
'%s-%s-reagent' % (dbse, 'HFCH3ts') ]
RXNM['%s-%s' % (dbse, 7)] = dict(zip(ACTV['%s-%s' % (dbse, 7)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 8)] = ['%s-%s-reagent' % (dbse, 'HF' ),
'%s-%s-reagent' % (dbse, 'CH3' ),
'%s-%s-reagent' % (dbse, 'HFCH3ts') ]
RXNM['%s-%s' % (dbse, 8)] = dict(zip(ACTV['%s-%s' % (dbse, 8)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 9)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'F2' ),
'%s-%s-reagent' % (dbse, 'HF2ts') ]
RXNM['%s-%s' % (dbse, 9)] = dict(zip(ACTV['%s-%s' % (dbse, 9)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 10)] = ['%s-%s-reagent' % (dbse, 'HF' ),
'%s-%s-reagent' % (dbse, 'F' ),
'%s-%s-reagent' % (dbse, 'HF2ts') ]
RXNM['%s-%s' % (dbse, 10)] = dict(zip(ACTV['%s-%s' % (dbse, 10)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 11)] = ['%s-%s-reagent' % (dbse, 'CH3' ),
'%s-%s-reagent' % (dbse, 'ClF' ),
'%s-%s-reagent' % (dbse, 'CH3FClts') ]
RXNM['%s-%s' % (dbse, 11)] = dict(zip(ACTV['%s-%s' % (dbse, 11)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 12)] = ['%s-%s-reagent' % (dbse, 'CH3F' ),
'%s-%s-reagent' % (dbse, 'Cl' ),
'%s-%s-reagent' % (dbse, 'CH3FClts') ]
RXNM['%s-%s' % (dbse, 12)] = dict(zip(ACTV['%s-%s' % (dbse, 12)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 13)] = ['%s-%s-reagent' % (dbse, 'F_anion'),
'%s-%s-reagent' % (dbse, 'CH3F' ),
'%s-%s-reagent' % (dbse, 'FCH3Fts') ]
RXNM['%s-%s' % (dbse, 13)] = dict(zip(ACTV['%s-%s' % (dbse, 13)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 14)] = ['%s-%s-reagent' % (dbse, 'F_anion'),
'%s-%s-reagent' % (dbse, 'CH3F' ),
'%s-%s-reagent' % (dbse, 'FCH3Fts') ]
RXNM['%s-%s' % (dbse, 14)] = dict(zip(ACTV['%s-%s' % (dbse, 14)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 15)] = ['%s-%s-reagent' % (dbse, 'FCH3Fcomp'),
'%s-%s-reagent' % (dbse, 'FCH3Fts' ) ]
RXNM['%s-%s' % (dbse, 15)] = dict(zip(ACTV['%s-%s' % (dbse, 15)], [-1, +1]))
ACTV['%s-%s' % (dbse, 16)] = ['%s-%s-reagent' % (dbse, 'FCH3Fcomp'),
'%s-%s-reagent' % (dbse, 'FCH3Fts' ) ]
RXNM['%s-%s' % (dbse, 16)] = dict(zip(ACTV['%s-%s' % (dbse, 16)], [-1, +1]))
ACTV['%s-%s' % (dbse, 17)] = ['%s-%s-reagent' % (dbse, 'Cl_anion' ),
'%s-%s-reagent' % (dbse, 'CH3Cl' ),
'%s-%s-reagent' % (dbse, 'ClCH3Clts') ]
RXNM['%s-%s' % (dbse, 17)] = dict(zip(ACTV['%s-%s' % (dbse, 17)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 18)] = ['%s-%s-reagent' % (dbse, 'Cl_anion' ),
'%s-%s-reagent' % (dbse, 'CH3Cl' ),
'%s-%s-reagent' % (dbse, 'ClCH3Clts') ]
RXNM['%s-%s' % (dbse, 18)] = dict(zip(ACTV['%s-%s' % (dbse, 18)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 19)] = ['%s-%s-reagent' % (dbse, 'ClCH3Clcomp'),
'%s-%s-reagent' % (dbse, 'ClCH3Clts' ) ]
RXNM['%s-%s' % (dbse, 19)] = dict(zip(ACTV['%s-%s' % (dbse, 19)], [-1, +1]))
ACTV['%s-%s' % (dbse, 20)] = ['%s-%s-reagent' % (dbse, 'ClCH3Clcomp'),
'%s-%s-reagent' % (dbse, 'ClCH3Clts' ) ]
RXNM['%s-%s' % (dbse, 20)] = dict(zip(ACTV['%s-%s' % (dbse, 20)], [-1, +1]))
ACTV['%s-%s' % (dbse, 21)] = ['%s-%s-reagent' % (dbse, 'F_anion' ),
'%s-%s-reagent' % (dbse, 'CH3Cl' ),
'%s-%s-reagent' % (dbse, 'FCH3Clts') ]
RXNM['%s-%s' % (dbse, 21)] = dict(zip(ACTV['%s-%s' % (dbse, 21)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 22)] = ['%s-%s-reagent' % (dbse, 'CH3F'),
'%s-%s-reagent' % (dbse, 'Cl_anion'),
'%s-%s-reagent' % (dbse, 'FCH3Clts') ]
RXNM['%s-%s' % (dbse, 22)] = dict(zip(ACTV['%s-%s' % (dbse, 22)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 23)] = ['%s-%s-reagent' % (dbse, 'FCH3Clcomp1'),
'%s-%s-reagent' % (dbse, 'FCH3Clts' ) ]
RXNM['%s-%s' % (dbse, 23)] = dict(zip(ACTV['%s-%s' % (dbse, 23)], [-1, +1]))
ACTV['%s-%s' % (dbse, 24)] = ['%s-%s-reagent' % (dbse, 'FCH3Clcomp2'),
'%s-%s-reagent' % (dbse, 'FCH3Clts' ) ]
RXNM['%s-%s' % (dbse, 24)] = dict(zip(ACTV['%s-%s' % (dbse, 24)], [-1, +1]))
ACTV['%s-%s' % (dbse, 25)] = ['%s-%s-reagent' % (dbse, 'OH_anion'),
'%s-%s-reagent' % (dbse, 'CH3F' ),
'%s-%s-reagent' % (dbse, 'HOCH3Fts') ]
RXNM['%s-%s' % (dbse, 25)] = dict(zip(ACTV['%s-%s' % (dbse, 25)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 26)] = ['%s-%s-reagent' % (dbse, 'CH3OH' ),
'%s-%s-reagent' % (dbse, 'F_anion' ),
'%s-%s-reagent' % (dbse, 'HOCH3Fts') ]
RXNM['%s-%s' % (dbse, 26)] = dict(zip(ACTV['%s-%s' % (dbse, 26)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 27)] = ['%s-%s-reagent' % (dbse, 'HOCH3Fcomp2'),
'%s-%s-reagent' % (dbse, 'HOCH3Fts' ) ]
RXNM['%s-%s' % (dbse, 27)] = dict(zip(ACTV['%s-%s' % (dbse, 27)], [-1, +1]))
ACTV['%s-%s' % (dbse, 28)] = ['%s-%s-reagent' % (dbse, 'HOCH3Fcomp1'),
'%s-%s-reagent' % (dbse, 'HOCH3Fts' ) ]
RXNM['%s-%s' % (dbse, 28)] = dict(zip(ACTV['%s-%s' % (dbse, 28)], [-1, +1]))
ACTV['%s-%s' % (dbse, 29)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'N2' ),
'%s-%s-reagent' % (dbse, 'HN2ts') ]
RXNM['%s-%s' % (dbse, 29)] = dict(zip(ACTV['%s-%s' % (dbse, 29)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 30)] = ['%s-%s-reagent' % (dbse, 'HN2' ),
'%s-%s-reagent' % (dbse, 'HN2ts') ]
RXNM['%s-%s' % (dbse, 30)] = dict(zip(ACTV['%s-%s' % (dbse, 30)], [-1, +1]))
ACTV['%s-%s' % (dbse, 31)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'CO' ),
'%s-%s-reagent' % (dbse, 'HCOts') ]
RXNM['%s-%s' % (dbse, 31)] = dict(zip(ACTV['%s-%s' % (dbse, 31)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 32)] = ['%s-%s-reagent' % (dbse, 'HCO' ),
'%s-%s-reagent' % (dbse, 'HCOts') ]
RXNM['%s-%s' % (dbse, 32)] = dict(zip(ACTV['%s-%s' % (dbse, 32)], [-1, +1]))
ACTV['%s-%s' % (dbse, 33)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'C2H4' ),
'%s-%s-reagent' % (dbse, 'C2H5ts') ]
RXNM['%s-%s' % (dbse, 33)] = dict(zip(ACTV['%s-%s' % (dbse, 33)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 34)] = ['%s-%s-reagent' % (dbse, 'C2H5' ),
'%s-%s-reagent' % (dbse, 'C2H5ts') ]
RXNM['%s-%s' % (dbse, 34)] = dict(zip(ACTV['%s-%s' % (dbse, 34)], [-1, +1]))
ACTV['%s-%s' % (dbse, 35)] = ['%s-%s-reagent' % (dbse, 'CH3' ),
'%s-%s-reagent' % (dbse, 'C2H4' ),
'%s-%s-reagent' % (dbse, 'C3H7ts') ]
RXNM['%s-%s' % (dbse, 35)] = dict(zip(ACTV['%s-%s' % (dbse, 35)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 36)] = ['%s-%s-reagent' % (dbse, 'C3H7' ),
'%s-%s-reagent' % (dbse, 'C3H7ts') ]
RXNM['%s-%s' % (dbse, 36)] = dict(zip(ACTV['%s-%s' % (dbse, 36)], [-1, +1]))
ACTV['%s-%s' % (dbse, 37)] = ['%s-%s-reagent' % (dbse, 'HCN' ),
'%s-%s-reagent' % (dbse, 'HCNts') ]
RXNM['%s-%s' % (dbse, 37)] = dict(zip(ACTV['%s-%s' % (dbse, 37)], [-1, +1]))
ACTV['%s-%s' % (dbse, 38)] = ['%s-%s-reagent' % (dbse, 'HNC' ),
'%s-%s-reagent' % (dbse, 'HCNts') ]
RXNM['%s-%s' % (dbse, 38)] = dict(zip(ACTV['%s-%s' % (dbse, 38)], [-1, +1]))
# <<< Reference Values >>>
BIND = {}
BIND['%s-%s' % (dbse, 1)] = 18.14
BIND['%s-%s' % (dbse, 2)] = 83.22
BIND['%s-%s' % (dbse, 3)] = 42.18
BIND['%s-%s' % (dbse, 4)] = 42.18
BIND['%s-%s' % (dbse, 5)] = 18.00
BIND['%s-%s' % (dbse, 6)] = 18.00
BIND['%s-%s' % (dbse, 7)] = 30.38
BIND['%s-%s' % (dbse, 8)] = 57.02
BIND['%s-%s' % (dbse, 9)] = 2.27
BIND['%s-%s' % (dbse, 10)] = 106.18
BIND['%s-%s' % (dbse, 11)] = 7.43
BIND['%s-%s' % (dbse, 12)] = 60.17
BIND['%s-%s' % (dbse, 13)] = -0.34
BIND['%s-%s' % (dbse, 14)] = -0.34
BIND['%s-%s' % (dbse, 15)] = 13.38
BIND['%s-%s' % (dbse, 16)] = 13.38
BIND['%s-%s' % (dbse, 17)] = 3.10
BIND['%s-%s' % (dbse, 18)] = 3.10
BIND['%s-%s' % (dbse, 19)] = 13.61
BIND['%s-%s' % (dbse, 20)] = 13.61
BIND['%s-%s' % (dbse, 21)] = -12.54
BIND['%s-%s' % (dbse, 22)] = 20.11
BIND['%s-%s' % (dbse, 23)] = 2.89
BIND['%s-%s' % (dbse, 24)] = 29.62
BIND['%s-%s' % (dbse, 25)] = -2.78
BIND['%s-%s' % (dbse, 26)] = 17.33
BIND['%s-%s' % (dbse, 27)] = 10.96
BIND['%s-%s' % (dbse, 28)] = 47.20
BIND['%s-%s' % (dbse, 29)] = 14.69
BIND['%s-%s' % (dbse, 30)] = 10.72
BIND['%s-%s' % (dbse, 31)] = 3.17
BIND['%s-%s' % (dbse, 32)] = 22.68
BIND['%s-%s' % (dbse, 33)] = 1.72
BIND['%s-%s' % (dbse, 34)] = 41.75
BIND['%s-%s' % (dbse, 35)] = 6.85
BIND['%s-%s' % (dbse, 36)] = 32.97
BIND['%s-%s' % (dbse, 37)] = 48.16
BIND['%s-%s' % (dbse, 38)] = 33.11
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 1)] = '{ H + N2O <-- [HN2O] } --> OH + N2'
TAGL['%s-%s' % (dbse, 2)] = 'H + N2O <-- { [HN2O] --> OH + N2 }'
TAGL['%s-%s' % (dbse, 3)] = '{ H + FH <-- [HFH] } --> HF + H'
TAGL['%s-%s' % (dbse, 4)] = 'H + FH <-- { [HFH] --> HF + H }'
TAGL['%s-%s' % (dbse, 5)] = '{ H + ClH <-- [HClH] } --> HCl + H'
TAGL['%s-%s' % (dbse, 6)] = 'H + ClH <-- { [HClH] --> HCl + H }'
TAGL['%s-%s' % (dbse, 7)] = '{ H + FCH3 <-- [HFCH3] } --> HF + CH3'
TAGL['%s-%s' % (dbse, 8)] = 'H + FCH3 <-- { [HFCH3] --> HF + CH3 }'
TAGL['%s-%s' % (dbse, 9)] = '{ H + F2 <-- [HF2] } --> HF + F'
TAGL['%s-%s' % (dbse, 10)] = 'H + F2 <-- { [HF2] --> HF + F }'
TAGL['%s-%s' % (dbse, 11)] = '{ CH3 + FCl <-- [CH3FCl] } --> CH3F + Cl'
TAGL['%s-%s' % (dbse, 12)] = 'CH3 + FCl <-- { [CH3FCl] --> CH3F + Cl }'
TAGL['%s-%s' % (dbse, 13)] = '{ F- + CH3F <-- [FCH3F-] } --> FCH3 + F-'
TAGL['%s-%s' % (dbse, 14)] = 'F- + CH3F <-- { [FCH3F-] --> FCH3 + F- }'
TAGL['%s-%s' % (dbse, 15)] = '{ F- ... CH3F <-- [FCH3F-] } --> FCH3 ... F-'
TAGL['%s-%s' % (dbse, 16)] = 'F- ... CH3F <-- { [FCH3F-] --> FCH3 ... F- }'
TAGL['%s-%s' % (dbse, 17)] = '{ Cl- + CH3Cl <-- [ClCH3Cl-] } --> ClCH3 + Cl-'
TAGL['%s-%s' % (dbse, 18)] = 'Cl- + CH3Cl <-- { [ClCH3Cl-] --> ClCH3 + Cl- }'
TAGL['%s-%s' % (dbse, 19)] = '{ Cl- ... CH3Cl <-- [ClCH3Cl-] } --> ClCH3 ... Cl-'
TAGL['%s-%s' % (dbse, 20)] = 'Cl- ... CH3Cl <-- { [ClCH3Cl-] --> ClCH3 ... Cl- }'
TAGL['%s-%s' % (dbse, 21)] = '{ F- + CH3Cl <-- [FCH3Cl-] } --> FCH3 + Cl-'
TAGL['%s-%s' % (dbse, 22)] = 'F- + CH3Cl <-- { [FCH3Cl-] --> FCH3 + Cl- }'
TAGL['%s-%s' % (dbse, 23)] = '{ F- ... CH3Cl <-- [FCH3Cl-] } --> FCH3 ... Cl-'
TAGL['%s-%s' % (dbse, 24)] = 'F- ... CH3Cl <-- { [FCH3Cl-] --> FCH3 ... Cl- }'
TAGL['%s-%s' % (dbse, 25)] = '{ OH- + CH3F <-- [OHCH3F-] } --> HOCH3 + F-'
TAGL['%s-%s' % (dbse, 26)] = 'OH- + CH3F <-- { [OHCH3F-] --> HOCH3 + F- }'
TAGL['%s-%s' % (dbse, 27)] = '{ OH- ... CH3F <-- [OHCH3F-] } --> HOCH3 ... F-'
TAGL['%s-%s' % (dbse, 28)] = 'OH- ... CH3F <-- { [OHCH3F-] --> HOCH3 ... F- }'
TAGL['%s-%s' % (dbse, 29)] = '{ H + N2 <-- [HN2] } --> HN2'
TAGL['%s-%s' % (dbse, 30)] = 'H + N2 <-- { [HN2] --> HN2 }'
TAGL['%s-%s' % (dbse, 31)] = '{ H + CO <-- [HCO] } --> HCO'
TAGL['%s-%s' % (dbse, 32)] = 'H + CO <-- { [HCO] --> HCO }'
TAGL['%s-%s' % (dbse, 33)] = '{ H + C2H4 <-- [HC2H4] } --> CH3CH2'
TAGL['%s-%s' % (dbse, 34)] = 'H + C2H4 <-- { [HC2H4] --> CH3CH2 }'
TAGL['%s-%s' % (dbse, 35)] = '{ CH3 + C2H4 <-- [CH3C2H4] } --> CH3CH2CH2'
TAGL['%s-%s' % (dbse, 36)] = 'CH3 + C2H4 <-- { [CH3C2H4] --> CH3CH2CH2 }'
TAGL['%s-%s' % (dbse, 37)] = '{ HCN <-- [HCN] } --> HNC'
TAGL['%s-%s' % (dbse, 38)] = 'HCN <-- { [HCN] --> HNC }'
TAGL['%s-%s-reagent' % (dbse, 'C2H4' )] = 'Ethene'
TAGL['%s-%s-reagent' % (dbse, 'C2H5ts' )] = 'Transition State of H + C2H4 <--> CH3CH2'
TAGL['%s-%s-reagent' % (dbse, 'C2H5' )] = 'C2H5'
TAGL['%s-%s-reagent' % (dbse, 'C3H7ts' )] = 'Transition State of CH3 + C2H4 <--> CH3CH2CH2'
TAGL['%s-%s-reagent' % (dbse, 'C3H7' )] = 'C3H7'
TAGL['%s-%s-reagent' % (dbse, 'CH3Cl' )] = 'CH3Cl'
TAGL['%s-%s-reagent' % (dbse, 'CH3FClts' )] = 'Transition State of CH3 + FCL <--> CH3F + Cl'
TAGL['%s-%s-reagent' % (dbse, 'CH3F' )] = 'CH3F'
TAGL['%s-%s-reagent' % (dbse, 'CH3OH' )] = 'Methanol'
TAGL['%s-%s-reagent' % (dbse, 'CH3' )] = 'CH3'
TAGL['%s-%s-reagent' % (dbse, 'ClCH3Clcomp')] = 'Complex of Cl- + CH3Cl'
TAGL['%s-%s-reagent' % (dbse, 'ClCH3Clts' )] = 'Transition State of Cl- + CH3Cl <--> ClCH3 + Cl-'
TAGL['%s-%s-reagent' % (dbse, 'ClF' )] = 'ClF'
TAGL['%s-%s-reagent' % (dbse, 'Cl_anion' )] = 'Chloride Anion'
TAGL['%s-%s-reagent' % (dbse, 'Cl' )] = 'Chlorine Atom'
TAGL['%s-%s-reagent' % (dbse, 'CO' )] = 'Carbon Monoxide'
TAGL['%s-%s-reagent' % (dbse, 'F2' )] = 'Fluorine Molecule'
TAGL['%s-%s-reagent' % (dbse, 'FCH3Clcomp1')] = 'Complex of F- + CH3Cl'
TAGL['%s-%s-reagent' % (dbse, 'FCH3Clcomp2')] = 'Complex of FCH3 + Cl-'
TAGL['%s-%s-reagent' % (dbse, 'FCH3Clts' )] = 'Transition State of F- + CH3Cl <--> FCH3 + Cl-'
TAGL['%s-%s-reagent' % (dbse, 'FCH3Fcomp' )] = 'Complex of F- + CH3F'
TAGL['%s-%s-reagent' % (dbse, 'FCH3Fts' )] = 'Transition State of F- CH3F <--> FCH3 + F-'
TAGL['%s-%s-reagent' % (dbse, 'F_anion' )] = 'Fluoride Anion'
TAGL['%s-%s-reagent' % (dbse, 'F' )] = 'Fluorine Atom'
TAGL['%s-%s-reagent' % (dbse, 'HClHts' )] = 'Transition State of H + ClH <--> HCl + H'
TAGL['%s-%s-reagent' % (dbse, 'HCl' )] = 'Hydrogen Chloride'
TAGL['%s-%s-reagent' % (dbse, 'HCNts' )] = 'Transition State of HCN <--> HNC'
TAGL['%s-%s-reagent' % (dbse, 'HCN' )] = 'Hydrogen Cyanide'
TAGL['%s-%s-reagent' % (dbse, 'HCOts' )] = 'Transition State of H + CO <--> HCO'
TAGL['%s-%s-reagent' % (dbse, 'HCO' )] = 'HCO'
TAGL['%s-%s-reagent' % (dbse, 'HF2ts' )] = 'Transition State of H + F2 <--> HF + F'
TAGL['%s-%s-reagent' % (dbse, 'HFCH3ts' )] = 'Transition State of H + FCH3 <--> HF + CH3'
TAGL['%s-%s-reagent' % (dbse, 'HFHts' )] = 'Transition State of H + FH <--> HF + H'
TAGL['%s-%s-reagent' % (dbse, 'HF' )] = 'Hydrogen Fluoride'
TAGL['%s-%s-reagent' % (dbse, 'HN2ts' )] = 'Transition State of H + N2 <--> HN2'
TAGL['%s-%s-reagent' % (dbse, 'HN2' )] = 'HN2'
TAGL['%s-%s-reagent' % (dbse, 'HNC' )] = 'HNC'
TAGL['%s-%s-reagent' % (dbse, 'HOCH3Fcomp1')] = 'Complex of HOCH3 + F-'
TAGL['%s-%s-reagent' % (dbse, 'HOCH3Fcomp2')] = 'Complex of OH- + CH3F'
TAGL['%s-%s-reagent' % (dbse, 'HOCH3Fts' )] = 'Transition State of OH- + CH3F <--> HOCH3 + F-'
TAGL['%s-%s-reagent' % (dbse, 'H' )] = 'Hydrogen Atom'
TAGL['%s-%s-reagent' % (dbse, 'N2OHts' )] = 'Transition State of H + N2O <--> OH + N2'
TAGL['%s-%s-reagent' % (dbse, 'N2O' )] = 'N2O'
TAGL['%s-%s-reagent' % (dbse, 'N2' )] = 'Nitrogen Molecule'
TAGL['%s-%s-reagent' % (dbse, 'OH_anion' )] = 'Hydroxide Anion'
TAGL['%s-%s-reagent' % (dbse, 'OH' )] = 'OH'
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-reagent' % (dbse, 'C2H4')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 0.66559300
C 0.00000000 -0.00000000 -0.66559300
H 0.00000000 0.92149500 1.23166800
H 0.00000000 -0.92149500 1.23166800
H 0.00000000 0.92149500 -1.23166800
H 0.00000000 -0.92149500 -1.23166800
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'C2H5ts')] = qcdb.Molecule("""
0 2
C -0.56787700 0.00005100 -0.21895800
C 0.75113900 -0.00003600 0.04193200
H -1.49388400 -0.00048800 1.53176500
H -1.10169100 0.92065100 -0.40862600
H -1.10202200 -0.92023400 -0.40911000
H 1.29912800 -0.92234400 0.17376300
H 1.29889900 0.92232500 0.17436300
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'C2H5')] = qcdb.Molecule("""
0 2
C -0.25871900 -0.81682900 0.00000000
C -0.25098700 0.67419100 0.00000000
H 0.75883000 -1.22593900 0.00000000
H -0.75883000 -1.21386600 0.88341900
H -0.75883000 -1.21386600 -0.88341900
H -0.17002100 1.22593900 -0.92432000
H -0.17002100 1.22593900 0.92432000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'C3H7ts')] = qcdb.Molecule("""
0 2
C -0.47213200 0.64593300 -0.00004300
C -1.38261700 -0.36388500 -0.00000200
H -0.23204400 1.16457500 -0.91726400
H -0.23234200 1.16475900 0.91716900
H -1.72712800 -0.80981000 0.92251900
H -1.72693600 -0.81013100 -0.92243500
C 1.61201500 -0.24218900 0.00003500
H 2.19518200 0.66867100 -0.00126900
H 1.58942300 -0.80961900 -0.91863200
H 1.59024500 -0.80759800 0.91996900
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'C3H7')] = qcdb.Molecule("""
0 2
C 1.20844000 -0.28718900 0.00005700
C -0.06535900 0.57613200 -0.00005700
C -1.31478700 -0.23951800 -0.00001100
H 1.24136900 -0.92839500 0.88123400
H 1.24139400 -0.92858600 -0.88098000
H 2.10187100 0.33872700 0.00000000
H -0.04821800 1.22685100 -0.87708900
H -0.04827200 1.22703700 0.87683400
H -1.72914600 -0.61577100 0.92443500
H -1.72876300 -0.61641500 -0.92436900
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3Cl')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 -1.12588600
Cl 0.00000000 0.00000000 0.65683000
H 0.00000000 1.02799300 -1.47026400
H 0.89026800 -0.51399700 -1.47026400
H -0.89026800 -0.51399700 -1.47026400
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3FClts')] = qcdb.Molecule("""
0 2
Cl 1.45474900 -0.00123700 -0.00004000
F -0.32358700 0.00463100 0.00012400
C -2.38741800 -0.00214700 -0.00007300
H -2.49508600 -0.85536100 -0.64940400
H -2.49731300 -0.13867300 1.06313900
H -2.50153700 0.98626900 -0.41373400
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3F')] = qcdb.Molecule("""
0 1
C -0.63207400 0.00000100 -0.00000000
F 0.74911700 0.00000200 -0.00000200
H -0.98318200 -0.33848900 0.97262500
H -0.98322200 1.01155300 -0.19317200
H -0.98320300 -0.67308400 -0.77943700
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3OH')] = qcdb.Molecule("""
0 1
C -0.04642300 0.66306900 0.00000000
O -0.04642300 -0.75506300 0.00000000
H -1.08695600 0.97593800 0.00000000
H 0.86059200 -1.05703900 0.00000000
H 0.43814500 1.07159400 0.88953900
H 0.43814500 1.07159400 -0.88953900
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3')] = qcdb.Molecule("""
0 2
C 0.00000000 0.00000000 0.00000000
H 1.07731727 0.00000000 0.00000000
H -0.53865863 0.93298412 0.00000000
H -0.53865863 -0.93298412 -0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'ClCH3Clcomp')] = qcdb.Molecule("""
-1 1
Cl 0.00000000 0.00000000 -2.38473500
C 0.00000000 0.00000000 -0.56633100
H 0.00000000 1.02506600 -0.22437900
H -0.88773400 -0.51253300 -0.22437900
H 0.88773400 -0.51253300 -0.22437900
Cl 0.00000000 0.00000000 2.62421300
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'ClCH3Clts')] = qcdb.Molecule("""
-1 1
Cl 2.32258100 -0.00013200 0.00014000
C -0.00008500 0.00049100 -0.00050900
H 0.00007700 -0.74429000 -0.76760500
H -0.00032000 -0.29144300 1.02802100
H 0.00008100 1.03721800 -0.26195900
Cl -2.32254200 -0.00012900 0.00013000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'ClF')] = qcdb.Molecule("""
0 1
F 0.00000000 0.00000000 0.00000000
Cl 1.63033021 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'Cl_anion')] = qcdb.Molecule("""
-1 1
Cl 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'Cl')] = qcdb.Molecule("""
0 2
Cl 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CO')] = qcdb.Molecule("""
0 1
O 0.00000000 0.00000000 0.00000000
C 1.12960815 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'F2')] = qcdb.Molecule("""
0 1
F 0.00000000 0.00000000 0.00000000
F 1.39520410 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'FCH3Clcomp1')] = qcdb.Molecule("""
-1 1
Cl 0.00000000 0.00000000 1.62313800
C 0.00000000 0.00000000 -0.22735800
H 0.00000000 1.02632100 -0.55514100
H 0.88882000 -0.51316000 -0.55514100
H -0.88882000 -0.51316000 -0.55514100
F 0.00000000 0.00000000 -2.72930800
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'FCH3Clcomp2')] = qcdb.Molecule("""
-1 1
F 0.00000000 0.00000000 -2.64853900
C 0.00000000 0.00000000 -1.24017000
H 0.00000000 1.02471900 -0.88640600
H -0.88743200 -0.51235900 -0.88640600
H 0.88743200 -0.51235900 -0.88640600
Cl 0.00000000 0.00000000 1.99629900
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'FCH3Clts')] = qcdb.Molecule("""
-1 1
F 0.00000000 0.00000000 -2.53792900
C 0.00000000 0.00000000 -0.48837200
H 0.00000000 1.06208700 -0.61497200
H -0.91979500 -0.53104400 -0.61497200
H 0.91979500 -0.53104400 -0.61497200
Cl 0.00000000 0.00000000 1.62450100
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'FCH3Fcomp')] = qcdb.Molecule("""
-1 1
F 0.00000000 0.00000000 -1.84762600
C 0.00000000 0.00000000 -0.42187300
H 0.00000000 1.02358100 -0.07384300
H -0.88644700 -0.51179100 -0.07384300
H 0.88644700 -0.51179100 -0.07384300
F 0.00000000 0.00000000 2.15348900
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'FCH3Fts')] = qcdb.Molecule("""
-1 1
F 0.00309800 -0.01889200 -0.01545600
C -0.00014900 -0.00014000 1.80785700
H 1.06944900 0.00170800 1.80976100
H -0.53660700 0.92513300 1.79693500
H -0.53260100 -0.92778300 1.81705800
F -0.00319100 0.01997400 3.63184500
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'F_anion')] = qcdb.Molecule("""
-1 1
F 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'F')] = qcdb.Molecule("""
0 2
F 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HClHts')] = qcdb.Molecule("""
0 2
H 0.00000000 0.00000000 1.48580000
Cl 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 -1.48580000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HCl')] = qcdb.Molecule("""
0 1
Cl 0.00000000 0.00000000 0.00000000
H 1.27444789 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HCNts')] = qcdb.Molecule("""
0 1
C 0.08031900 0.62025800 0.00000000
N 0.08031900 -0.56809500 0.00000000
H -1.04414800 0.25512100 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HCN')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 -0.50036500
N 0.00000000 0.00000000 0.65264000
H 0.00000000 0.00000000 -1.56629100
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HCOts')] = qcdb.Molecule("""
0 2
H -1.52086400 1.38882900 0.00000000
C 0.10863300 0.54932900 0.00000000
O 0.10863300 -0.58560100 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HCO')] = qcdb.Molecule("""
0 2
H -0.00905700 0.00000000 -0.00708600
C -0.00703500 0.00000000 1.10967800
O 0.95604000 0.00000000 1.78565600
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HF2ts')] = qcdb.Molecule("""
0 2
H 0.00000000 0.00000000 -2.23127300
F 0.00000000 0.00000000 -0.61621800
F 0.00000000 0.00000000 0.86413800
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HFCH3ts')] = qcdb.Molecule("""
0 2
H -0.03976400 0.00000000 0.04410600
F -0.04932100 0.00000000 1.28255400
C -0.06154400 0.00000000 2.95115700
H 0.99049700 0.00000000 3.19427500
H -0.59007000 0.91235500 3.18348100
H -0.59007000 -0.91235500 3.18348100
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HFHts')] = qcdb.Molecule("""
0 2
H 0.00000000 0.00000000 1.13721700
F 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 -1.13721700
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HF')] = qcdb.Molecule("""
0 1
F 0.00000000 0.00000000 0.00000000
H 0.91538107 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HN2ts')] = qcdb.Molecule("""
0 2
N 0.00000000 0.00000000 0.00000000
N 1.12281100 0.00000000 0.00000000
H 1.78433286 1.26844651 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HN2')] = qcdb.Molecule("""
0 2
N 0.00000000 0.00000000 0.00000000
N 1.17820000 0.00000000 0.00000000
H 1.64496947 0.93663681 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HNC')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 -0.73724800
N 0.00000000 0.00000000 0.43208900
H 0.00000000 0.00000000 1.42696000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HOCH3Fcomp1')] = qcdb.Molecule("""
-1 1
C -1.29799700 -0.38951800 -0.00003400
O -0.47722300 0.72802100 0.00005400
H -2.35192200 -0.08023200 -0.00863900
H -1.14085300 -1.03582100 -0.87810100
H -1.15317800 -1.02751300 0.88635900
H 0.51058000 0.37116000 0.00024300
F 1.74901600 -0.19051700 -0.00001000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HOCH3Fcomp2')] = qcdb.Molecule("""
-1 1
F 0.00037100 -2.46834000 0.02139000
C -0.27664200 -1.07441800 -0.00269000
H 0.64929000 -0.51650000 -0.00901600
H -0.84198900 -0.84711900 -0.89707500
H -0.85102800 -0.82658900 0.88141700
O -0.30171300 1.58252400 -0.20654400
H -0.60511200 2.49243400 -0.16430500
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HOCH3Fts')] = qcdb.Molecule("""
-1 1
F 0.02253600 -0.00745300 0.00552900
C -0.01842000 0.00503700 1.76492500
H 1.04805000 0.00524000 1.85414600
H -0.54781900 0.93470700 1.79222400
H -0.54895500 -0.92343300 1.80576200
O 0.00126500 0.01920000 3.75059900
H -0.92676300 0.03161500 3.99758100
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'H')] = qcdb.Molecule("""
0 2
H 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'N2OHts')] = qcdb.Molecule("""
0 2
H -0.30328600 -1.93071200 0.00000000
O -0.86100600 -0.62152600 0.00000000
N 0.00000000 0.25702700 0.00000000
N 1.02733300 0.72910400 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'N2O')] = qcdb.Molecule("""
0 1
N 0.00000000 0.00000000 0.00000000
N 1.12056262 0.00000000 0.00000000
O 2.30761092 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'N2')] = qcdb.Molecule("""
0 1
N 0.00000000 0.00000000 0.00000000
N 1.09710935 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'OH_anion')] = qcdb.Molecule("""
-1 1
O 0.00000000 0.00000000 0.00000000
H 0.96204317 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'OH')] = qcdb.Molecule("""
0 2
O 0.00000000 0.00000000 0.00000000
H 0.96889819 0.00000000 0.00000000
units angstrom
""")
#########################################################################
# <<< Supplementary Quantum Chemical Results >>>
DATA = {}
DATA['NUCLEAR REPULSION ENERGY'] = {}
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-H-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-N2O-reagent' ] = 60.94607766
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-N2OHts-reagent' ] = 65.68644495
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-OH-reagent' ] = 4.36931115
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-N2-reagent' ] = 23.63454766
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HF-reagent' ] = 5.20285489
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HFHts-reagent' ] = 8.60854029
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HCl-reagent' ] = 7.05875275
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HClHts-reagent' ] = 12.28739648
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-CH3F-reagent' ] = 37.42304655
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HFCH3ts-reagent' ] = 38.79779200
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-CH3-reagent' ] = 9.69236444
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-F2-reagent' ] = 30.72192369
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HF2ts-reagent' ] = 33.44223409
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-F-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-ClF-reagent' ] = 49.66117442
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-CH3FClts-reagent' ] = 95.59999471
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-Cl-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-F_anion-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-FCH3Fts-reagent' ] = 66.36618410
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-FCH3Fcomp-reagent' ] = 64.36230187
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-Cl_anion-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-CH3Cl-reagent' ] = 51.37857642
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-ClCH3Clts-reagent' ] = 110.27962403
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-ClCH3Clcomp-reagent' ] = 107.04230687
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-FCH3Clts-reagent' ] = 86.10066616
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-FCH3Clcomp1-reagent' ] = 86.07639241
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-FCH3Clcomp2-reagent' ] = 79.90981772
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-OH_anion-reagent' ] = 4.40044460
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HOCH3Fts-reagent' ] = 69.00558005
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-CH3OH-reagent' ] = 40.39337431
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HOCH3Fcomp2-reagent' ] = 67.43072234
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HOCH3Fcomp1-reagent' ] = 73.17394204
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HN2ts-reagent' ] = 27.37488066
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HN2-reagent' ] = 27.50439999
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-CO-reagent' ] = 22.48612142
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HCOts-reagent' ] = 25.76648888
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HCO-reagent' ] = 26.50985233
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-C2H4-reagent' ] = 33.42351838
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-C2H5ts-reagent' ] = 36.85248528
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-C2H5-reagent' ] = 36.97781691
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-C3H7ts-reagent' ] = 70.26842595
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-C3H7-reagent' ] = 75.86161869
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HCN-reagent' ] = 23.92417344
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HCNts-reagent' ] = 24.04634812
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HNC-reagent' ] = 24.19729155
|
CDSherrill/psi4
|
psi4/share/psi4/databases/NHTBH.py
|
Python
|
lgpl-3.0
| 36,605
|
[
"Psi4"
] |
7509a27b7042c25619a4c41cff7833bb5baabae4fcef6e31e47dac1a28e884d7
|
import os, sys, re, inspect, types, errno, pprint, subprocess, io, shutil, time, copy
import path_tool
path_tool.activate_module('FactorySystem')
path_tool.activate_module('argparse')
from ParseGetPot import ParseGetPot
from socket import gethostname
#from options import *
from util import *
from RunParallel import RunParallel
from CSVDiffer import CSVDiffer
from XMLDiffer import XMLDiffer
from Tester import Tester
from PetscJacobianTester import PetscJacobianTester
from InputParameters import InputParameters
from Factory import Factory
from Parser import Parser
from Warehouse import Warehouse
import argparse
from optparse import OptionParser, OptionGroup, Values
from timeit import default_timer as clock
class TestHarness:
@staticmethod
def buildAndRun(argv, app_name, moose_dir):
if '--store-timing' in argv:
harness = TestTimer(argv, app_name, moose_dir)
else:
harness = TestHarness(argv, app_name, moose_dir)
harness.findAndRunTests()
def __init__(self, argv, app_name, moose_dir):
self.factory = Factory()
# Get dependant applications and load dynamic tester plugins
# If applications have new testers, we expect to find them in <app_dir>/scripts/TestHarness/testers
dirs = [os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))]
sys.path.append(os.path.join(moose_dir, 'framework', 'scripts')) # For find_dep_apps.py
# Use the find_dep_apps script to get the dependant applications for an app
import find_dep_apps
depend_app_dirs = find_dep_apps.findDepApps(app_name)
dirs.extend([os.path.join(my_dir, 'scripts', 'TestHarness') for my_dir in depend_app_dirs.split('\n')])
# Finally load the plugins!
self.factory.loadPlugins(dirs, 'testers', Tester)
self.test_table = []
self.num_passed = 0
self.num_failed = 0
self.num_skipped = 0
self.num_pending = 0
self.host_name = gethostname()
self.moose_dir = moose_dir
self.base_dir = os.getcwd()
self.run_tests_dir = os.path.abspath('.')
self.code = '2d2d6769726c2d6d6f6465'
self.error_code = 0x0
# Assume libmesh is a peer directory to MOOSE if not defined
if os.environ.has_key("LIBMESH_DIR"):
self.libmesh_dir = os.environ['LIBMESH_DIR']
else:
self.libmesh_dir = os.path.join(self.moose_dir, 'libmesh', 'installed')
self.file = None
# Parse arguments
self.parseCLArgs(argv)
self.checks = {}
self.checks['platform'] = getPlatforms()
# The TestHarness doesn't strictly require the existence of libMesh in order to run. Here we allow the user
# to select whether they want to probe for libMesh configuration options.
if self.options.skip_config_checks:
self.checks['compiler'] = set(['ALL'])
self.checks['petsc_version'] = 'N/A'
self.checks['library_mode'] = set(['ALL'])
self.checks['mesh_mode'] = set(['ALL'])
self.checks['dtk'] = set(['ALL'])
self.checks['unique_ids'] = set(['ALL'])
self.checks['vtk'] = set(['ALL'])
self.checks['tecplot'] = set(['ALL'])
self.checks['dof_id_bytes'] = set(['ALL'])
self.checks['petsc_debug'] = set(['ALL'])
self.checks['curl'] = set(['ALL'])
self.checks['tbb'] = set(['ALL'])
else:
self.checks['compiler'] = getCompilers(self.libmesh_dir)
self.checks['petsc_version'] = getPetscVersion(self.libmesh_dir)
self.checks['library_mode'] = getSharedOption(self.libmesh_dir)
self.checks['mesh_mode'] = getLibMeshConfigOption(self.libmesh_dir, 'mesh_mode')
self.checks['dtk'] = getLibMeshConfigOption(self.libmesh_dir, 'dtk')
self.checks['unique_ids'] = getLibMeshConfigOption(self.libmesh_dir, 'unique_ids')
self.checks['vtk'] = getLibMeshConfigOption(self.libmesh_dir, 'vtk')
self.checks['tecplot'] = getLibMeshConfigOption(self.libmesh_dir, 'tecplot')
self.checks['dof_id_bytes'] = getLibMeshConfigOption(self.libmesh_dir, 'dof_id_bytes')
self.checks['petsc_debug'] = getLibMeshConfigOption(self.libmesh_dir, 'petsc_debug')
self.checks['curl'] = getLibMeshConfigOption(self.libmesh_dir, 'curl')
self.checks['tbb'] = getLibMeshConfigOption(self.libmesh_dir, 'tbb')
# Override the MESH_MODE option if using '--parallel-mesh' option
if self.options.parallel_mesh == True or \
(self.options.cli_args != None and \
self.options.cli_args.find('--parallel-mesh') != -1):
option_set = set(['ALL', 'PARALLEL'])
self.checks['mesh_mode'] = option_set
method = set(['ALL', self.options.method.upper()])
self.checks['method'] = method
self.initialize(argv, app_name)
"""
Recursively walks the current tree looking for tests to run
Error codes:
0x0 - Success
0x0* - Parser error
0x1* - TestHarness error
"""
def findAndRunTests(self):
self.error_code = 0x0
self.preRun()
self.start_time = clock()
try:
# PBS STUFF
if self.options.pbs and os.path.exists(self.options.pbs):
self.options.processingPBS = True
self.processPBSResults()
else:
self.options.processingPBS = False
self.base_dir = os.getcwd()
for dirpath, dirnames, filenames in os.walk(self.base_dir, followlinks=True):
# Prune submdule paths when searching for tests
if self.base_dir != dirpath and os.path.exists(os.path.join(dirpath, '.git')):
dirnames[:] = []
# walk into directories that aren't contrib directories
if "contrib" not in os.path.relpath(dirpath, os.getcwd()):
for file in filenames:
# set cluster_handle to be None initially (happens for each test)
self.options.cluster_handle = None
# See if there were other arguments (test names) passed on the command line
if file == self.options.input_file_name: #and self.test_match.search(file):
saved_cwd = os.getcwd()
sys.path.append(os.path.abspath(dirpath))
os.chdir(dirpath)
if self.prunePath(file):
continue
# Build a Warehouse to hold the MooseObjects
warehouse = Warehouse()
# Build a Parser to parse the objects
parser = Parser(self.factory, warehouse)
# Parse it
self.error_code = self.error_code | parser.parse(file)
# Retrieve the tests from the warehouse
testers = warehouse.getAllObjects()
# Augment the Testers with additional information directly from the TestHarness
for tester in testers:
self.augmentParameters(file, tester)
if self.options.enable_recover:
testers = self.appendRecoverableTests(testers)
# Handle PBS tests.cluster file
if self.options.pbs:
(tester, command) = self.createClusterLauncher(dirpath, testers)
if command is not None:
self.runner.run(tester, command)
else:
# Go through the Testers and run them
for tester in testers:
# Double the alloted time for tests when running with the valgrind option
tester.setValgrindMode(self.options.valgrind_mode)
# When running in valgrind mode, we end up with a ton of output for each failed
# test. Therefore, we limit the number of fails...
if self.options.valgrind_mode and self.num_failed > self.options.valgrind_max_fails:
(should_run, reason) = (False, 'Max Fails Exceeded')
elif self.num_failed > self.options.max_fails:
(should_run, reason) = (False, 'Max Fails Exceeded')
else:
(should_run, reason) = tester.checkRunnableBase(self.options, self.checks)
if should_run:
command = tester.getCommand(self.options)
# This method spawns another process and allows this loop to continue looking for tests
# RunParallel will call self.testOutputAndFinish when the test has completed running
# This method will block when the maximum allowed parallel processes are running
self.runner.run(tester, command)
else: # This job is skipped - notify the runner
if reason != '':
if (self.options.report_skipped and reason.find('skipped') != -1) or reason.find('skipped') == -1:
self.handleTestResult(tester.parameters(), '', reason)
self.runner.jobSkipped(tester.parameters()['test_name'])
os.chdir(saved_cwd)
sys.path.pop()
except KeyboardInterrupt:
print '\nExiting due to keyboard interrupt...'
sys.exit(0)
self.runner.join()
# Wait for all tests to finish
if self.options.pbs and self.options.processingPBS == False:
print '\n< checking batch status >\n'
self.options.processingPBS = True
self.processPBSResults()
self.cleanup()
if self.num_failed:
self.error_code = self.error_code | 0x10
sys.exit(self.error_code)
def createClusterLauncher(self, dirpath, testers):
self.options.test_serial_number = 0
command = None
tester = None
# Create the tests.cluster input file
# Loop through each tester and create a job
for tester in testers:
(should_run, reason) = tester.checkRunnableBase(self.options, self.checks)
if should_run:
if self.options.cluster_handle == None:
self.options.cluster_handle = open(dirpath + '/' + self.options.pbs + '.cluster', 'w')
self.options.cluster_handle.write('[Jobs]\n')
# This returns the command to run as well as builds the parameters of the test
# The resulting command once this loop has completed is sufficient to launch
# all previous jobs
command = tester.getCommand(self.options)
self.options.cluster_handle.write('[]\n')
self.options.test_serial_number += 1
else: # This job is skipped - notify the runner
if (reason != ''):
self.handleTestResult(tester.parameters(), '', reason)
self.runner.jobSkipped(tester.parameters()['test_name'])
# Close the tests.cluster file
if self.options.cluster_handle is not None:
self.options.cluster_handle.close()
self.options.cluster_handle = None
# Return the final tester/command (sufficient to run all tests)
return (tester, command)
def prunePath(self, filename):
test_dir = os.path.abspath(os.path.dirname(filename))
# Filter tests that we want to run
# Under the new format, we will filter based on directory not filename since it is fixed
prune = True
if len(self.tests) == 0:
prune = False # No filter
else:
for item in self.tests:
if test_dir.find(item) > -1:
prune = False
# Return the inverse of will_run to indicate that this path should be pruned
return prune
def augmentParameters(self, filename, tester):
params = tester.parameters()
# We are going to do some formatting of the path that is printed
# Case 1. If the test directory (normally matches the input_file_name) comes first,
# we will simply remove it from the path
# Case 2. If the test directory is somewhere in the middle then we should preserve
# the leading part of the path
test_dir = os.path.abspath(os.path.dirname(filename))
relative_path = test_dir.replace(self.run_tests_dir, '')
relative_path = relative_path.replace('/' + self.options.input_file_name + '/', ':')
relative_path = re.sub('^[/:]*', '', relative_path) # Trim slashes and colons
formatted_name = relative_path + '.' + tester.name()
params['test_name'] = formatted_name
params['test_dir'] = test_dir
params['relative_path'] = relative_path
params['executable'] = self.executable
params['hostname'] = self.host_name
params['moose_dir'] = self.moose_dir
params['base_dir'] = self.base_dir
if params.isValid('prereq'):
if type(params['prereq']) != list:
print "Option 'prereq' needs to be of type list in " + params['test_name']
sys.exit(1)
params['prereq'] = [relative_path.replace('/tests/', '') + '.' + item for item in params['prereq']]
# This method splits a lists of tests into two pieces each, the first piece will run the test for
# approx. half the number of timesteps and will write out a restart file. The second test will
# then complete the run using the MOOSE recover option.
def appendRecoverableTests(self, testers):
new_tests = []
for part1 in testers:
if part1.parameters()['recover'] == True:
# Clone the test specs
part2 = copy.deepcopy(part1)
# Part 1:
part1_params = part1.parameters()
part1_params['test_name'] += '_part1'
part1_params['cli_args'].append('--half-transient :Outputs/checkpoint=true')
part1_params['skip_checks'] = True
# Part 2:
part2_params = part2.parameters()
part2_params['prereq'].append(part1.parameters()['test_name'])
part2_params['delete_output_before_running'] = False
part2_params['cli_args'].append('--recover')
part2_params.addParam('caveats', ['recover'], "")
new_tests.append(part2)
testers.extend(new_tests)
return testers
## Finish the test by inspecting the raw output
def testOutputAndFinish(self, tester, retcode, output, start=0, end=0):
caveats = []
test = tester.specs # Need to refactor
if test.isValid('caveats'):
caveats = test['caveats']
if self.options.pbs and self.options.processingPBS == False:
(reason, output) = self.buildPBSBatch(output, tester)
elif self.options.dry_run:
reason = 'DRY_RUN'
output += '\n'.join(tester.processResultsCommand(self.moose_dir, self.options))
else:
(reason, output) = tester.processResults(self.moose_dir, retcode, self.options, output)
if self.options.scaling and test['scale_refine']:
caveats.append('scaled')
did_pass = True
if reason == '':
# It ran OK but is this test set to be skipped on any platform, compiler, so other reason?
if self.options.extra_info:
checks = ['platform', 'compiler', 'petsc_version', 'mesh_mode', 'method', 'library_mode', 'dtk', 'unique_ids']
for check in checks:
if not 'ALL' in test[check]:
caveats.append(', '.join(test[check]))
if len(caveats):
result = '[' + ', '.join(caveats).upper() + '] OK'
elif self.options.pbs and self.options.processingPBS == False:
result = 'LAUNCHED'
else:
result = 'OK'
elif reason == 'DRY_RUN':
result = 'DRY_RUN'
else:
result = 'FAILED (%s)' % reason
did_pass = False
self.handleTestResult(tester.specs, output, result, start, end)
return did_pass
def getTiming(self, output):
time = ''
m = re.search(r"Active time=(\S+)", output)
if m != None:
return m.group(1)
def getSolveTime(self, output):
time = ''
m = re.search(r"solve().*", output)
if m != None:
return m.group().split()[5]
def checkExpectError(self, output, expect_error):
if re.search(expect_error, output, re.MULTILINE | re.DOTALL) == None:
#print "%" * 100, "\nExpect Error Pattern not found:\n", expect_error, "\n", "%" * 100, "\n"
return False
else:
return True
# PBS Defs
def processPBSResults(self):
# If batch file exists, check the contents for pending tests.
if os.path.exists(self.options.pbs):
# Build a list of launched jobs
batch_file = open(self.options.pbs)
batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
batch_file.close()
del batch_list[-1:]
# Loop through launched jobs and match the TEST_NAME to determin correct stdout (Output_Path)
for job in batch_list:
file = '/'.join(job[2].split('/')[:-2]) + '/' + job[3]
# Build a Warehouse to hold the MooseObjects
warehouse = Warehouse()
# Build a Parser to parse the objects
parser = Parser(self.factory, warehouse)
# Parse it
parser.parse(file)
# Retrieve the tests from the warehouse
testers = warehouse.getAllObjects()
for tester in testers:
self.augmentParameters(file, tester)
for tester in testers:
# Build the requested Tester object
if job[1] == tester.parameters()['test_name']:
# Create Test Type
# test = self.factory.create(tester.parameters()['type'], tester)
# Get job status via qstat
qstat = ['qstat', '-f', '-x', str(job[0])]
qstat_command = subprocess.Popen(qstat, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
qstat_stdout = qstat_command.communicate()[0]
if qstat_stdout != None:
output_value = re.search(r'job_state = (\w+)', qstat_stdout).group(1)
else:
return ('QSTAT NOT FOUND', '')
# Report the current status of JOB_ID
if output_value == 'F':
# F = Finished. Get the exit code reported by qstat
exit_code = int(re.search(r'Exit_status = (-?\d+)', qstat_stdout).group(1))
# Read the stdout file
if os.path.exists(job[2]):
output_file = open(job[2], 'r')
# Not sure I am doing this right: I have to change the TEST_DIR to match the temporary cluster_launcher TEST_DIR location, thus violating the tester.specs...
tester.parameters()['test_dir'] = '/'.join(job[2].split('/')[:-1])
outfile = output_file.read()
output_file.close()
else:
# I ran into this scenario when the cluster went down, but launched/completed my job :)
self.handleTestResult(tester.specs, '', 'FAILED (NO STDOUT FILE)', 0, 0, True)
self.testOutputAndFinish(tester, exit_code, outfile)
elif output_value == 'R':
# Job is currently running
self.handleTestResult(tester.specs, '', 'RUNNING', 0, 0, True)
elif output_value == 'E':
# Job is exiting
self.handleTestResult(tester.specs, '', 'EXITING', 0, 0, True)
elif output_value == 'Q':
# Job is currently queued
self.handleTestResult(tester.specs, '', 'QUEUED', 0, 0, True)
else:
return ('BATCH FILE NOT FOUND', '')
def buildPBSBatch(self, output, tester):
# Create/Update the batch file
if 'command not found' in output:
return ('QSUB NOT FOUND', '')
else:
# Get the PBS Job ID using qstat
results = re.findall(r'JOB_NAME: (\w+\d+) JOB_ID: (\d+) TEST_NAME: (\S+)', output, re.DOTALL)
if len(results) != 0:
file_name = self.options.pbs
job_list = open(os.path.abspath(os.path.join(tester.specs['executable'], os.pardir)) + '/' + file_name, 'a')
for result in results:
(test_dir, job_id, test_name) = result
qstat_command = subprocess.Popen(['qstat', '-f', '-x', str(job_id)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
qstat_stdout = qstat_command.communicate()[0]
# Get the Output_Path from qstat stdout
if qstat_stdout != None:
output_value = re.search(r'Output_Path(.*?)(^ +)', qstat_stdout, re.S | re.M).group(1)
output_value = output_value.split(':')[1].replace('\n', '').replace('\t', '')
else:
job_list.close()
return ('QSTAT NOT FOUND', '')
# Write job_id, test['test_name'], and Ouput_Path to the batch file
job_list.write(str(job_id) + ':' + test_name + ':' + output_value + ':' + self.options.input_file_name + '\n')
# Return to TestHarness and inform we have launched the job
job_list.close()
return ('', 'LAUNCHED')
else:
return ('QSTAT INVALID RESULTS', '')
def cleanPBSBatch(self):
# Open the PBS batch file and assign it to a list
if os.path.exists(self.options.pbs_cleanup):
batch_file = open(self.options.pbs_cleanup, 'r')
batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
batch_file.close()
del batch_list[-1:]
else:
print 'PBS batch file not found:', self.options.pbs_cleanup
sys.exit(1)
# Loop through launched jobs and delete whats found.
for job in batch_list:
if os.path.exists(job[2]):
batch_dir = os.path.abspath(os.path.join(job[2], os.pardir)).split('/')
if os.path.exists('/'.join(batch_dir)):
shutil.rmtree('/'.join(batch_dir))
if os.path.exists('/'.join(batch_dir[:-1]) + '/' + self.options.pbs_cleanup + '.cluster'):
os.remove('/'.join(batch_dir[:-1]) + '/' + self.options.pbs_cleanup + '.cluster')
os.remove(self.options.pbs_cleanup)
# END PBS Defs
## Update global variables and print output based on the test result
# Containing OK means it passed, skipped means skipped, anything else means it failed
def handleTestResult(self, specs, output, result, start=0, end=0, add_to_table=True):
timing = ''
if self.options.timing:
timing = self.getTiming(output)
elif self.options.store_time:
timing = self.getSolveTime(output)
# Only add to the test_table if told to. We now have enough cases where we wish to print to the screen, but not
# in the 'Final Test Results' area.
if add_to_table:
self.test_table.append( (specs, output, result, timing, start, end) )
if result.find('OK') != -1 or result.find('DRY_RUN') != -1:
self.num_passed += 1
elif result.find('skipped') != -1:
self.num_skipped += 1
elif result.find('deleted') != -1:
self.num_skipped += 1
elif result.find('LAUNCHED') != -1 or result.find('RUNNING') != -1 or result.find('QUEUED') != -1 or result.find('EXITING') != -1:
self.num_pending += 1
else:
self.num_failed += 1
self.postRun(specs, timing)
if self.options.show_directory:
print printResult(specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options)
else:
print printResult(specs['test_name'], result, timing, start, end, self.options)
if self.options.verbose or ('FAILED' in result and not self.options.quiet):
output = output.replace('\r', '\n') # replace the carriage returns with newlines
lines = output.split('\n');
color = ''
if 'EXODIFF' in result or 'CSVDIFF' in result:
color = 'YELLOW'
elif 'FAILED' in result:
color = 'RED'
else:
color = 'GREEN'
test_name = colorText(specs['test_name'] + ": ", color, colored=self.options.colored, code=self.options.code)
output = ("\n" + test_name).join(lines)
print output
# Print result line again at the bottom of the output for failed tests
if self.options.show_directory:
print printResult(specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options), "(reprint)"
else:
print printResult(specs['test_name'], result, timing, start, end, self.options), "(reprint)"
if not 'skipped' in result:
if self.options.file:
if self.options.show_directory:
self.file.write(printResult( specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options, color=False) + '\n')
self.file.write(output)
else:
self.file.write(printResult( specs['test_name'], result, timing, start, end, self.options, color=False) + '\n')
self.file.write(output)
if self.options.sep_files or (self.options.fail_files and 'FAILED' in result) or (self.options.ok_files and result.find('OK') != -1):
fname = os.path.join(specs['test_dir'], specs['test_name'].split('/')[-1] + '.' + result[:6] + '.txt')
f = open(fname, 'w')
f.write(printResult( specs['test_name'], result, timing, start, end, self.options, color=False) + '\n')
f.write(output)
f.close()
# Write the app_name to a file, if the tests passed
def writeState(self, app_name):
# If we encounter bitten_status_moose environment, build a line itemized list of applications which passed their tests
if os.environ.has_key("BITTEN_STATUS_MOOSE"):
result_file = open(os.path.join(self.moose_dir, 'test_results.log'), 'a')
result_file.write(os.path.split(app_name)[1].split('-')[0] + '\n')
result_file.close()
# Print final results, close open files, and exit with the correct error code
def cleanup(self):
# Print the results table again if a bunch of output was spewed to the screen between
# tests as they were running
if self.options.verbose or (self.num_failed != 0 and not self.options.quiet):
print '\n\nFinal Test Results:\n' + ('-' * (TERM_COLS-1))
for (test, output, result, timing, start, end) in sorted(self.test_table, key=lambda x: x[2], reverse=True):
if self.options.show_directory:
print printResult(test['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options)
else:
print printResult(test['test_name'], result, timing, start, end, self.options)
time = clock() - self.start_time
print '-' * (TERM_COLS-1)
print 'Ran %d tests in %.1f seconds' % (self.num_passed+self.num_failed, time)
if self.num_passed:
summary = '<g>%d passed</g>'
else:
summary = '<b>%d passed</b>'
summary += ', <b>%d skipped</b>'
if self.num_pending:
summary += ', <c>%d pending</c>'
else:
summary += ', <b>%d pending</b>'
if self.num_failed:
summary += ', <r>%d FAILED</r>'
else:
summary += ', <b>%d failed</b>'
# Mask off TestHarness error codes to report parser errors
if self.error_code & 0x0F:
summary += ', <r>FATAL PARSER ERROR</r>'
print colorText( summary % (self.num_passed, self.num_skipped, self.num_pending, self.num_failed), "", html = True, \
colored=self.options.colored, code=self.options.code )
if self.options.pbs:
print '\nYour PBS batch file:', self.options.pbs
if self.file:
self.file.close()
if self.num_failed == 0:
self.writeState(self.executable)
def initialize(self, argv, app_name):
# Initialize the parallel runner with how many tests to run in parallel
self.runner = RunParallel(self, self.options.jobs, self.options.load)
## Save executable-under-test name to self.executable
self.executable = os.getcwd() + '/' + app_name + '-' + self.options.method
# Save the output dir since the current working directory changes during tests
self.output_dir = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), self.options.output_dir)
# Create the output dir if they ask for it. It is easier to ask for forgiveness than permission
if self.options.output_dir:
try:
os.makedirs(self.output_dir)
except OSError, ex:
if ex.errno == errno.EEXIST: pass
else: raise
# Open the file to redirect output to and set the quiet option for file output
if self.options.file:
self.file = open(os.path.join(self.output_dir, self.options.file), 'w')
if self.options.file or self.options.fail_files or self.options.sep_files:
self.options.quiet = True
## Parse command line options and assign them to self.options
def parseCLArgs(self, argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description='A tool used to test MOOSE based applications')
parser.add_argument('test_name', nargs=argparse.REMAINDER)
parser.add_argument('--opt', action='store_const', dest='method', const='opt', help='test the app_name-opt binary')
parser.add_argument('--dbg', action='store_const', dest='method', const='dbg', help='test the app_name-dbg binary')
parser.add_argument('--devel', action='store_const', dest='method', const='devel', help='test the app_name-devel binary')
parser.add_argument('--oprof', action='store_const', dest='method', const='oprof', help='test the app_name-oprof binary')
parser.add_argument('--pro', action='store_const', dest='method', const='pro', help='test the app_name-pro binary')
parser.add_argument('-j', '--jobs', nargs=1, metavar='int', action='store', type=int, dest='jobs', default=1, help='run test binaries in parallel')
parser.add_argument('-e', action='store_true', dest='extra_info', help='Display "extra" information including all caveats and deleted tests')
parser.add_argument('-c', '--no-color', action='store_false', dest='colored', help='Do not show colored output')
parser.add_argument('--heavy', action='store_true', dest='heavy_tests', help='Run tests marked with HEAVY : True')
parser.add_argument('--all-tests', action='store_true', dest='all_tests', help='Run normal tests and tests marked with HEAVY : True')
parser.add_argument('-g', '--group', action='store', type=str, dest='group', default='ALL', help='Run only tests in the named group')
parser.add_argument('--not_group', action='store', type=str, dest='not_group', help='Run only tests NOT in the named group')
# parser.add_argument('--dofs', action='store', dest='dofs', help='This option is for automatic scaling which is not currently implemented in MOOSE 2.0')
parser.add_argument('--dbfile', nargs='?', action='store', dest='dbFile', help='Location to timings data base file. If not set, assumes $HOME/timingDB/timing.sqlite')
parser.add_argument('-l', '--load-average', action='store', type=float, dest='load', default=64.0, help='Do not run additional tests if the load average is at least LOAD')
parser.add_argument('-t', '--timing', action='store_true', dest='timing', help='Report Timing information for passing tests')
parser.add_argument('-s', '--scale', action='store_true', dest='scaling', help='Scale problems that have SCALE_REFINE set')
parser.add_argument('-i', nargs=1, action='store', type=str, dest='input_file_name', default='tests', help='The default test specification file to look for (default="tests").')
parser.add_argument('--libmesh_dir', nargs=1, action='store', type=str, dest='libmesh_dir', help='Currently only needed for bitten code coverage')
parser.add_argument('--skip-config-checks', action='store_true', dest='skip_config_checks', help='Skip configuration checks (all tests will run regardless of restrictions)')
parser.add_argument('--parallel', '-p', nargs='?', action='store', type=int, dest='parallel', const=1, help='Number of processors to use when running mpiexec')
parser.add_argument('--n-threads', nargs=1, action='store', type=int, dest='nthreads', default=1, help='Number of threads to use when running mpiexec')
parser.add_argument('-d', action='store_true', dest='debug_harness', help='Turn on Test Harness debugging')
parser.add_argument('--recover', action='store_true', dest='enable_recover', help='Run a test in recover mode')
parser.add_argument('--valgrind', action='store_const', dest='valgrind_mode', const='NORMAL', help='Run normal valgrind tests')
parser.add_argument('--valgrind-heavy', action='store_const', dest='valgrind_mode', const='HEAVY', help='Run heavy valgrind tests')
parser.add_argument('--valgrind-max-fails', nargs=1, type=int, dest='valgrind_max_fails', default=5, help='The number of valgrind tests allowed to fail before any additional valgrind tests will run')
parser.add_argument('--max-fails', nargs=1, type=int, dest='max_fails', default=50, help='The number of tests allowed to fail before any additional tests will run')
parser.add_argument('--pbs', nargs='?', metavar='batch_file', dest='pbs', const='generate', help='Enable launching tests via PBS. If no batch file is specified one will be created for you')
parser.add_argument('--pbs-cleanup', nargs=1, metavar='batch_file', help='Clean up the directories/files created by PBS. You must supply the same batch_file used to launch PBS.')
parser.add_argument('--re', action='store', type=str, dest='reg_exp', help='Run tests that match --re=regular_expression')
parser.add_argument('--parallel-mesh', action='store_true', dest='parallel_mesh', help="Pass --parallel-mesh to executable")
parser.add_argument('--error', action='store_true', help='Run the tests with warnings as errors')
parser.add_argument('--cli-args', nargs='?', type=str, dest='cli_args', help='Append the following list of arguments to the command line (Encapsulate the command in quotes)')
parser.add_argument('--dry-run', action='store_true', dest='dry_run', help="Pass --dry-run to print commands to run, but don't actually run them")
outputgroup = parser.add_argument_group('Output Options', 'These options control the output of the test harness. The sep-files options write output to files named test_name.TEST_RESULT.txt. All file output will overwrite old files')
outputgroup.add_argument('-v', '--verbose', action='store_true', dest='verbose', help='show the output of every test')
outputgroup.add_argument('-q', '--quiet', action='store_true', dest='quiet', help='only show the result of every test, don\'t show test output even if it fails')
outputgroup.add_argument('--no-report', action='store_false', dest='report_skipped', help='do not report skipped tests')
outputgroup.add_argument('--show-directory', action='store_true', dest='show_directory', help='Print test directory path in out messages')
outputgroup.add_argument('-o', '--output-dir', nargs=1, metavar='directory', dest='output_dir', default='', help='Save all output files in the directory, and create it if necessary')
outputgroup.add_argument('-f', '--file', nargs=1, action='store', dest='file', help='Write verbose output of each test to FILE and quiet output to terminal')
outputgroup.add_argument('-x', '--sep-files', action='store_true', dest='sep_files', help='Write the output of each test to a separate file. Only quiet output to terminal. This is equivalant to \'--sep-files-fail --sep-files-ok\'')
outputgroup.add_argument('--sep-files-ok', action='store_true', dest='ok_files', help='Write the output of each passed test to a separate file')
outputgroup.add_argument('-a', '--sep-files-fail', action='store_true', dest='fail_files', help='Write the output of each FAILED test to a separate file. Only quiet output to terminal.')
outputgroup.add_argument("--store-timing", action="store_true", dest="store_time", help="Store timing in the SQL database: $HOME/timingDB/timing.sqlite A parent directory (timingDB) must exist.")
outputgroup.add_argument("--revision", nargs=1, action="store", type=str, dest="revision", help="The current revision being tested. Required when using --store-timing.")
outputgroup.add_argument("--yaml", action="store_true", dest="yaml", help="Dump the parameters for the testers in Yaml Format")
outputgroup.add_argument("--dump", action="store_true", dest="dump", help="Dump the parameters for the testers in GetPot Format")
code = True
if self.code.decode('hex') in argv:
del argv[argv.index(self.code.decode('hex'))]
code = False
self.options = parser.parse_args()
self.tests = self.options.test_name
self.options.code = code
# Convert all list based options of length one to scalars
for key, value in vars(self.options).items():
if type(value) == list and len(value) == 1:
tmp_str = getattr(self.options, key)
setattr(self.options, key, value[0])
self.checkAndUpdateCLArgs()
## Called after options are parsed from the command line
# Exit if options don't make any sense, print warnings if they are merely weird
def checkAndUpdateCLArgs(self):
opts = self.options
if opts.output_dir and not (opts.file or opts.sep_files or opts.fail_files or opts.ok_files):
print 'WARNING: --output-dir is specified but no output files will be saved, use -f or a --sep-files option'
if opts.group == opts.not_group:
print 'ERROR: The group and not_group options cannot specify the same group'
sys.exit(1)
if opts.store_time and not (opts.revision):
print 'ERROR: --store-timing is specified but no revision'
sys.exit(1)
if opts.store_time:
# timing returns Active Time, while store_timing returns Solve Time.
# Thus we need to turn off timing.
opts.timing = False
opts.scaling = True
if opts.valgrind_mode and (opts.parallel > 1 or opts.nthreads > 1):
print 'ERROR: --parallel and/or --threads can not be used with --valgrind'
sys.exit(1)
# Update any keys from the environment as necessary
if not self.options.method:
if os.environ.has_key('METHOD'):
self.options.method = os.environ['METHOD']
else:
self.options.method = 'opt'
if not self.options.valgrind_mode:
self.options.valgrind_mode = ''
# Update libmesh_dir to reflect arguments
if opts.libmesh_dir:
self.libmesh_dir = opts.libmesh_dir
# Generate a batch file if PBS argument supplied with out a file
if opts.pbs == 'generate':
largest_serial_num = 0
for name in os.listdir('.'):
m = re.search('pbs_(\d{3})', name)
if m != None and int(m.group(1)) > largest_serial_num:
largest_serial_num = int(m.group(1))
opts.pbs = "pbs_" + str(largest_serial_num+1).zfill(3)
# When running heavy tests, we'll make sure we use --no-report
if opts.heavy_tests:
self.options.report_skipped = False
def postRun(self, specs, timing):
return
def preRun(self):
if self.options.yaml:
self.factory.printYaml("Tests")
sys.exit(0)
elif self.options.dump:
self.factory.printDump("Tests")
sys.exit(0)
if self.options.pbs_cleanup:
self.cleanPBSBatch()
sys.exit(0)
def getOptions(self):
return self.options
#################################################################################################################################
# The TestTimer TestHarness
# This method finds and stores timing for individual tests. It is activated with --store-timing
#################################################################################################################################
CREATE_TABLE = """create table timing
(
app_name text,
test_name text,
revision text,
date int,
seconds real,
scale int,
load real
);"""
class TestTimer(TestHarness):
def __init__(self, argv, app_name, moose_dir):
TestHarness.__init__(self, argv, app_name, moose_dir)
try:
from sqlite3 import dbapi2 as sqlite
except:
print 'Error: --store-timing requires the sqlite3 python module.'
sys.exit(1)
self.app_name = app_name
self.db_file = self.options.dbFile
if not self.db_file:
home = os.environ['HOME']
self.db_file = os.path.join(home, 'timingDB/timing.sqlite')
if not os.path.exists(self.db_file):
print 'Warning: creating new database at default location: ' + str(self.db_file)
self.createDB(self.db_file)
else:
print 'Warning: Assuming database location ' + self.db_file
def createDB(self, fname):
from sqlite3 import dbapi2 as sqlite
print 'Creating empty database at ' + fname
con = sqlite.connect(fname)
cr = con.cursor()
cr.execute(CREATE_TABLE)
con.commit()
def preRun(self):
from sqlite3 import dbapi2 as sqlite
# Delete previous data if app_name and repo revision are found
con = sqlite.connect(self.db_file)
cr = con.cursor()
cr.execute('delete from timing where app_name = ? and revision = ?', (self.app_name, self.options.revision))
con.commit()
# After the run store the results in the database
def postRun(self, test, timing):
from sqlite3 import dbapi2 as sqlite
con = sqlite.connect(self.db_file)
cr = con.cursor()
timestamp = int(time.time())
load = os.getloadavg()[0]
# accumulate the test results
data = []
sum_time = 0
num = 0
parse_failed = False
# Were only interested in storing scaled data
if timing != None and test['scale_refine'] != 0:
sum_time += float(timing)
num += 1
data.append( (self.app_name, test['test_name'].split('/').pop(), self.options.revision, timestamp, timing, test['scale_refine'], load) )
# Insert the data into the database
cr.executemany('insert into timing values (?,?,?,?,?,?,?)', data)
con.commit()
|
wgapl/moose
|
python/TestHarness/TestHarness.py
|
Python
|
lgpl-2.1
| 41,159
|
[
"MOOSE",
"VTK"
] |
9da32ac036503823d8a2790357dad00968361145de930e99e25f2142299b44be
|
""" Analyze libraries in trees
Analyze library dependencies in paths and wheel files
"""
import logging
import os
import sys
import warnings
from os.path import basename, dirname
from os.path import join as pjoin
from os.path import realpath
from typing import (
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Set,
Text,
Tuple,
)
import delocate.delocating
from .tmpdirs import TemporaryDirectory
from .tools import (
get_environment_variable_paths,
get_install_names,
get_rpaths,
zip2dir,
)
logger = logging.getLogger(__name__)
class DependencyNotFound(Exception):
"""
Raised by tree_libs or resolve_rpath if an expected dependency is missing.
"""
def _filter_system_libs(libname):
# type: (Text) -> bool
return not (libname.startswith("/usr/lib") or libname.startswith("/System"))
def get_dependencies(
lib_fname, # type: Text
executable_path=None, # type: Optional[Text]
filt_func=lambda filepath: True, # type: Callable[[str], bool]
):
# type: (...) -> Iterator[Tuple[Optional[Text], Text]]
"""Find and yield the real paths of dependencies of the library `lib_fname`
This function is used to search for the real files that are required by
`lib_fname`.
The caller must check if any `dependency_path` is None and must decide on
how to handle missing dependencies.
Parameters
----------
lib_fname : str
The library to fetch dependencies from. Must be an existing file.
executable_path : str, optional
An alternative path to use for resolving `@executable_path`.
filt_func : callable, optional
A callable which accepts filename as argument and returns True if we
should inspect the file or False otherwise.
Defaults to inspecting all files for library dependencies.
If `filt_func` returns False for `lib_fname` then no values will be
yielded.
If `filt_func` returns False for a dependencies real path then that
dependency will not be yielded.
Yields
------
dependency_path : str or None
The real path of the dependencies of `lib_fname`.
If the library at `install_name` can not be found then this value will
be None.
install_name : str
The install name of `dependency_path` as if :func:`get_install_names`
was called.
Raises
------
DependencyNotFound
When `lib_fname` does not exist.
"""
if not filt_func(lib_fname):
logger.debug("Ignoring dependencies of %s" % lib_fname)
return
if not os.path.isfile(lib_fname):
if not _filter_system_libs(lib_fname):
logger.debug(
"Ignoring missing library %s because it is a system library.",
lib_fname,
)
return
raise DependencyNotFound(lib_fname)
rpaths = get_rpaths(lib_fname) + get_environment_variable_paths()
for install_name in get_install_names(lib_fname):
try:
if install_name.startswith("@"):
dependency_path = resolve_dynamic_paths(
install_name,
rpaths,
loader_path=dirname(lib_fname),
executable_path=executable_path,
)
else:
dependency_path = search_environment_for_lib(install_name)
if not os.path.isfile(dependency_path):
if not _filter_system_libs(dependency_path):
logger.debug(
"Skipped missing dependency %s"
" because it is a system library.",
dependency_path,
)
else:
raise DependencyNotFound(dependency_path)
if dependency_path != install_name:
logger.debug(
"%s resolved to: %s", install_name, dependency_path
)
yield dependency_path, install_name
except DependencyNotFound:
message = "\n%s not found:\n Needed by: %s" % (
install_name,
lib_fname,
)
if install_name.startswith("@rpath"):
message += "\n Search path:\n " + "\n ".join(rpaths)
logger.error(message)
# At this point install_name is known to be a bad path.
yield None, install_name
def walk_library(
lib_fname, # type: Text
filt_func=lambda filepath: True, # type: Callable[[Text], bool]
visited=None, # type: Optional[Set[Text]]
executable_path=None, # type: Optional[Text]
):
# type: (...) -> Iterator[Text]
"""
Yield all libraries on which `lib_fname` depends, directly or indirectly.
First yields `lib_fname` itself, if not already `visited` and then all
dependencies of `lib_fname`, including dependencies of dependencies.
Dependencies which can not be resolved will be logged and ignored.
Parameters
----------
lib_fname : str
The library to start with.
filt_func : callable, optional
A callable which accepts filename as argument and returns True if we
should inspect the file or False otherwise.
Defaults to inspecting all files for library dependencies.
If `filt_func` filters a library it will also exclude all of that
libraries dependencies as well.
visited : None or set of str, optional
We update `visited` with new library_path's as we visit them, to
prevent infinite recursion and duplicates. Input value of None
corresponds to the set `{lib_path}`. Modified in-place.
executable_path : str, optional
An alternative path to use for resolving `@executable_path`.
Yields
------
library_path : str
The path of each library depending on `lib_fname`, including
`lib_fname`, without duplicates.
"""
if visited is None:
visited = {lib_fname}
elif lib_fname in visited:
return
else:
visited.add(lib_fname)
if not filt_func(lib_fname):
logger.debug("Ignoring %s and its dependencies.", lib_fname)
return
yield lib_fname
for dependency_fname, install_name in get_dependencies(
lib_fname, executable_path=executable_path, filt_func=filt_func
):
if dependency_fname is None:
logger.error(
"%s not found, requested by %s",
install_name,
lib_fname,
)
continue
for sub_dependency in walk_library(
dependency_fname,
filt_func=filt_func,
visited=visited,
executable_path=executable_path,
):
yield sub_dependency
def walk_directory(
root_path, # type: Text
filt_func=lambda filepath: True, # type: Callable[[Text], bool]
executable_path=None, # type: Optional[Text]
):
# type: (...) -> Iterator[Text]
"""Walk along dependencies starting with the libraries within `root_path`.
Dependencies which can not be resolved will be logged and ignored.
Parameters
----------
root_path : str
The root directory to search for libraries depending on other libraries.
filt_func : None or callable, optional
A callable which accepts filename as argument and returns True if we
should inspect the file or False otherwise.
Defaults to inspecting all files for library dependencies.
If `filt_func` filters a library it will will not further analyze any
of that library's dependencies.
executable_path : None or str, optional
If not None, an alternative path to use for resolving
`@executable_path`.
Yields
------
library_path : str
Iterates over the libraries in `root_path` and each of their
dependencies without any duplicates.
"""
visited_paths = set() # type: Set[Text]
for dirpath, dirnames, basenames in os.walk(root_path):
for base in basenames:
depending_path = realpath(pjoin(dirpath, base))
if depending_path in visited_paths:
continue # A library in root_path was a dependency of another.
if not filt_func(depending_path):
continue
for library_path in walk_library(
depending_path,
filt_func=filt_func,
visited=visited_paths,
executable_path=executable_path,
):
yield library_path
def _tree_libs_from_libraries(
libraries: Iterable[str],
*,
lib_filt_func: Callable[[str], bool],
copy_filt_func: Callable[[str], bool],
executable_path: Optional[str] = None,
ignore_missing: bool = False,
) -> Dict[str, Dict[str, str]]:
"""Return an analysis of the dependencies of `libraries`.
Parameters
----------
libraries : iterable of str
The paths to the libraries to find dependencies of.
lib_filt_func : callable, keyword-only
A callable which accepts filename as argument and returns True if we
should inspect the file or False otherwise.
If `filt_func` filters a library it will will not further analyze any
of that library's dependencies.
copy_filt_func : callable, keyword-only
Called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
executable_path : None or str, optional, keyword-only
If not None, an alternative path to use for resolving
`@executable_path`.
ignore_missing : bool, default=False, optional, keyword-only
Continue even if missing dependencies are detected.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``).
``libpath`` is a canonical (``os.path.realpath``) filename of library,
or library name starting with {'@loader_path'}.
``dependings_dict`` is a dict with (key, value) pairs of
(``depending_libpath``, ``install_name``), where ``dependings_libpath``
is the canonical (``os.path.realpath``) filename of the library
depending on ``libpath``, and ``install_name`` is the "install_name" by
which ``depending_libpath`` refers to ``libpath``.
Raises
------
DelocationError
When any dependencies can not be located and ``ignore_missing`` is
False.
"""
lib_dict: Dict[str, Dict[str, str]] = {}
missing_libs = False
for library_path in libraries:
for depending_path, install_name in get_dependencies(
library_path,
executable_path=executable_path,
filt_func=lib_filt_func,
):
if depending_path is None:
missing_libs = True
continue
if copy_filt_func and not copy_filt_func(depending_path):
continue
lib_dict.setdefault(depending_path, {})
lib_dict[depending_path][library_path] = install_name
if missing_libs and not ignore_missing:
# get_dependencies will already have logged details of missing
# libraries.
raise delocate.delocating.DelocationError(
"Could not find all dependencies."
)
return lib_dict
def tree_libs_from_directory(
start_path: str,
*,
lib_filt_func: Callable[[str], bool] = _filter_system_libs,
copy_filt_func: Callable[[str], bool] = lambda path: True,
executable_path: Optional[str] = None,
ignore_missing: bool = False,
) -> Dict[Text, Dict[Text, Text]]:
"""Return an analysis of the libraries in the directory of `start_path`.
Parameters
----------
start_path : iterable of str
Root path of tree to search for libraries depending on other libraries.
lib_filt_func : callable, optional, keyword-only
A callable which accepts filename as argument and returns True if we
should inspect the file or False otherwise.
If `filt_func` filters a library it will will not further analyze any
of that library's dependencies.
Defaults to inspecting all files except for system libraries.
copy_filt_func : callable, optional, keyword-only
Called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Defaults to copying all detected dependencies.
executable_path : None or str, optional, keyword-only
If not None, an alternative path to use for resolving
`@executable_path`.
ignore_missing : bool, default=False, optional, keyword-only
Continue even if missing dependencies are detected.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``).
``libpath`` is a canonical (``os.path.realpath``) filename of library,
or library name starting with {'@loader_path'}.
``dependings_dict`` is a dict with (key, value) pairs of
(``depending_libpath``, ``install_name``), where ``dependings_libpath``
is the canonical (``os.path.realpath``) filename of the library
depending on ``libpath``, and ``install_name`` is the "install_name" by
which ``depending_libpath`` refers to ``libpath``.
Raises
------
DelocationError
When any dependencies can not be located and ``ignore_missing`` is
False.
"""
return _tree_libs_from_libraries(
walk_directory(
start_path, lib_filt_func, executable_path=executable_path
),
lib_filt_func=lib_filt_func,
copy_filt_func=copy_filt_func,
ignore_missing=ignore_missing,
)
def _allow_all(path: str) -> bool:
"""A filter which returns True for all files."""
return True
def tree_libs(
start_path, # type: Text
filt_func=None, # type: Optional[Callable[[Text], bool]]
):
# type: (...) -> Dict[Text, Dict[Text, Text]]
"""Return analysis of library dependencies within `start_path`
Parameters
----------
start_path : str
root path of tree to search for libraries depending on other libraries.
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``).
``libpath`` is a canonical (``os.path.realpath``) filename of library,
or library name starting with {'@loader_path'}.
``dependings_dict`` is a dict with (key, value) pairs of
(``depending_libpath``, ``install_name``), where ``dependings_libpath``
is the canonical (``os.path.realpath``) filename of the library
depending on ``libpath``, and ``install_name`` is the "install_name" by
which ``depending_libpath`` refers to ``libpath``.
Notes
-----
See:
* https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/dyld.1.html # noqa: E501
* http://matthew-brett.github.io/pydagogue/mac_runtime_link.html
.. deprecated:: 0.9
This function does not support `@loader_path` and only returns the
direct dependencies of the libraries in `start_path`.
:func:`tree_libs_from_directory` should be used instead.
"""
warnings.warn(
"tree_libs doesn't support @loader_path and has been deprecated.",
DeprecationWarning,
stacklevel=2,
)
if filt_func is None:
filt_func = _allow_all
lib_dict = {} # type: Dict[Text, Dict[Text, Text]]
for dirpath, dirnames, basenames in os.walk(start_path):
for base in basenames:
depending_path = realpath(pjoin(dirpath, base))
for dependency_path, install_name in get_dependencies(
depending_path,
filt_func=filt_func,
):
if dependency_path is None:
# Mimic deprecated behavior.
# A lib_dict with unresolved paths is unsuitable for
# delocating, this is a missing dependency.
dependency_path = realpath(install_name)
if install_name.startswith("@loader_path/"):
# Support for `@loader_path` would break existing callers.
logger.debug(
"Excluding %s because it has '@loader_path'.",
install_name,
)
continue
lib_dict.setdefault(dependency_path, {})
lib_dict[dependency_path][depending_path] = install_name
return lib_dict
def resolve_dynamic_paths(lib_path, rpaths, loader_path, executable_path=None):
# type: (Text, Iterable[Text], Text, Optional[Text]) -> Text
"""Return `lib_path` with any special runtime linking names resolved.
If `lib_path` has `@rpath` then returns the first `rpaths`/`lib_path`
combination found. If the library can't be found in `rpaths` then
DependencyNotFound is raised.
`@loader_path` and `@executable_path` are resolved with their respective
parameters.
Parameters
----------
lib_path : str
The path to a library file, which may or may not be a relative path
starting with `@rpath`, `@loader_path`, or `@executable_path`.
rpaths : sequence of str
A sequence of search paths, usually gotten from a call to `get_rpaths`.
loader_path : str
The path to be used for `@loader_path`.
This should be the directory of the library which is loading `lib_path`.
executable_path : None or str, optional
The path to be used for `@executable_path`.
If None is given then the path of the Python executable will be used.
Returns
-------
lib_path : str
A str with the resolved libraries realpath.
Raises
------
DependencyNotFound
When `lib_path` has `@rpath` in it but no library can be found on any
of the provided `rpaths`.
"""
if executable_path is None:
executable_path = dirname(sys.executable)
if lib_path.startswith("@loader_path/"):
return realpath(pjoin(loader_path, lib_path.split("/", 1)[1]))
if lib_path.startswith("@executable_path/"):
return realpath(pjoin(executable_path, lib_path.split("/", 1)[1]))
if not lib_path.startswith("@rpath/"):
return realpath(lib_path)
lib_rpath = lib_path.split("/", 1)[1]
for rpath in rpaths:
rpath_lib = resolve_dynamic_paths(
pjoin(rpath, lib_rpath), (), loader_path, executable_path
)
if os.path.exists(rpath_lib):
return realpath(rpath_lib)
raise DependencyNotFound(lib_path)
def resolve_rpath(lib_path, rpaths):
# type: (Text, Iterable[Text]) -> Text
"""Return `lib_path` with its `@rpath` resolved
If the `lib_path` doesn't have `@rpath` then it's returned as is.
If `lib_path` has `@rpath` then returns the first `rpaths`/`lib_path`
combination found. If the library can't be found in `rpaths` then a
detailed warning is printed and `lib_path` is returned as is.
Parameters
----------
lib_path : str
The path to a library file, which may or may not start with `@rpath`.
rpaths : sequence of str
A sequence of search paths, usually gotten from a call to `get_rpaths`.
Returns
-------
lib_path : str
A str with the resolved libraries realpath.
.. deprecated:: 0.9
This function does not support `@loader_path`.
Use `resolve_dynamic_paths` instead.
"""
warnings.warn(
"resolve_rpath doesn't support @loader_path and has been deprecated."
" Switch to using `resolve_dynamic_paths` instead.",
DeprecationWarning,
stacklevel=2,
)
if not lib_path.startswith("@rpath/"):
return lib_path
lib_rpath = lib_path.split("/", 1)[1]
for rpath in rpaths:
rpath_lib = realpath(pjoin(rpath, lib_rpath))
if os.path.exists(rpath_lib):
return rpath_lib
warnings.warn(
"Couldn't find {0} on paths:\n\t{1}".format(
lib_path,
"\n\t".join(realpath(path) for path in rpaths),
)
)
return lib_path
def search_environment_for_lib(lib_path):
# type: (Text) -> Text
"""Search common environment variables for `lib_path`
We'll use a single approach here:
1. Search for the basename of the library on DYLD_LIBRARY_PATH
2. Search for ``realpath(lib_path)``
3. Search for the basename of the library on DYLD_FALLBACK_LIBRARY_PATH
This follows the order that Apple defines for "searching for a
library that has a directory name in it" as defined in their
documentation here:
https://developer.apple.com/library/archive/documentation/DeveloperTools/Conceptual/DynamicLibraries/100-Articles/DynamicLibraryUsageGuidelines.html#//apple_ref/doc/uid/TP40001928-SW10
See the script "testing_osx_rpath_env_variables.sh" in tests/data
for a more in-depth explanation. The case where LD_LIBRARY_PATH is
used is a narrow subset of that, so we'll ignore it here to keep
things simple.
Parameters
----------
lib_path : str
Name of the library to search for
Returns
-------
lib_path : str
Real path of the first found location, if it can be found, or
``realpath(lib_path)`` if it cannot.
"""
lib_basename = basename(lib_path)
potential_library_locations = []
# 1. Search on DYLD_LIBRARY_PATH
potential_library_locations += _paths_from_var(
"DYLD_LIBRARY_PATH", lib_basename
)
# 2. Search for realpath(lib_path)
potential_library_locations.append(realpath(lib_path))
# 3. Search on DYLD_FALLBACK_LIBRARY_PATH
potential_library_locations += _paths_from_var(
"DYLD_FALLBACK_LIBRARY_PATH", lib_basename
)
for location in potential_library_locations:
if os.path.exists(location):
# See GH#133 for why we return the realpath here if it can be found
return realpath(location)
return realpath(lib_path)
def get_prefix_stripper(strip_prefix):
# type: (Text) -> Callable[[Text], Text]
"""Return function to strip `strip_prefix` prefix from string if present
Parameters
----------
strip_prefix : str
Prefix to strip from the beginning of string if present
Returns
-------
stripper : func
function such that ``stripper(a_string)`` will strip `prefix` from
``a_string`` if present, otherwise pass ``a_string`` unmodified
"""
n = len(strip_prefix)
def stripper(path):
# type: (Text) -> Text
return path if not path.startswith(strip_prefix) else path[n:]
return stripper
def get_rp_stripper(strip_path):
# type: (Text) -> Callable[[Text], Text]
"""Return function to strip ``realpath`` of `strip_path` from string
Parameters
----------
strip_path : str
path to strip from beginning of strings. Processed to ``strip_prefix``
by ``realpath(strip_path) + os.path.sep``.
Returns
-------
stripper : func
function such that ``stripper(a_string)`` will strip ``strip_prefix``
from ``a_string`` if present, otherwise pass ``a_string`` unmodified
"""
return get_prefix_stripper(realpath(strip_path) + os.path.sep)
def stripped_lib_dict(lib_dict, strip_prefix):
# type: (Dict[Text, Dict[Text, Text]], Text) -> Dict[Text, Dict[Text, Text]]
"""Return `lib_dict` with `strip_prefix` removed from start of paths
Use to give form of `lib_dict` that appears relative to some base path
given by `strip_prefix`. Particularly useful for analyzing wheels where we
unpack to a temporary path before analyzing.
Parameters
----------
lib_dict : dict
See :func:`tree_libs` for definition. All depending and depended paths
are canonical (therefore absolute)
strip_prefix : str
Prefix to remove (if present) from all depended and depending library
paths in `lib_dict`
Returns
-------
relative_dict : dict
`lib_dict` with `strip_prefix` removed from beginning of all depended
and depending library paths.
"""
relative_dict = {}
stripper = get_prefix_stripper(strip_prefix)
for lib_path, dependings_dict in lib_dict.items():
ding_dict = {}
for depending_libpath, install_name in dependings_dict.items():
ding_dict[stripper(depending_libpath)] = install_name
relative_dict[stripper(lib_path)] = ding_dict
return relative_dict
def wheel_libs(
wheel_fname: str,
filt_func: Optional[Callable[[Text], bool]] = None,
*,
ignore_missing: bool = False,
) -> Dict[Text, Dict[Text, Text]]:
"""Return analysis of library dependencies with a Python wheel
Use this routine for a dump of the dependency tree.
Parameters
----------
wheel_fname : str
Filename of wheel
filt_func : None or callable, optional
If None, inspect all non-system files for library dependencies.
If callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise.
ignore_missing : bool, default=False, optional, keyword-only
Continue even if missing dependencies are detected.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``). ``libpath`` is library being depended on,
relative to wheel root path if within wheel tree. ``dependings_dict``
is (key, value) of (``depending_lib_path``, ``install_name``). Again,
``depending_lib_path`` is library relative to wheel root path, if
within wheel tree.
Raises
------
DelocationError
When dependencies can not be located and `ignore_missing` is False.
"""
if filt_func is None:
filt_func = _filter_system_libs
with TemporaryDirectory() as tmpdir:
zip2dir(wheel_fname, tmpdir)
lib_dict = tree_libs_from_directory(
tmpdir, lib_filt_func=filt_func, ignore_missing=ignore_missing
)
return stripped_lib_dict(lib_dict, realpath(tmpdir) + os.path.sep)
def _paths_from_var(varname: str, lib_basename: str) -> List[str]:
var = os.environ.get(varname)
if var is None:
return []
return [pjoin(path, lib_basename) for path in var.split(":")]
|
matthew-brett/delocate
|
delocate/libsana.py
|
Python
|
bsd-2-clause
| 26,935
|
[
"VisIt"
] |
b5c726d0ea51be93c7d117589a11f39f038a49c08b7eeca18b2eda56a02f1074
|
'''OAuth support functionality
'''
from __future__ import unicode_literals
# Try importing the Python 3 packages first
# falling back to 2.x packages when it fails.
try:
from http import server as http_server
except ImportError:
import BaseHTTPServer as http_server
try:
from urllib import parse as urllib_parse
except ImportError:
import urlparse as urllib_parse
import logging
import random
import os.path
import sys
import webbrowser
import six
from requests_toolbelt import MultipartEncoder
import requests
from requests_oauthlib import OAuth1
from . import sockutil, exceptions, html
from .exceptions import FlickrError
class OAuthTokenHTTPHandler(http_server.BaseHTTPRequestHandler):
def do_GET(self):
# /?oauth_token=72157630789362986-5405f8542b549e95
# &oauth_verifier=fe4eac402339100e
qs = urllib_parse.urlsplit(self.path).query
url_vars = urllib_parse.parse_qs(qs)
oauth_token = url_vars['oauth_token'][0]
oauth_verifier = url_vars['oauth_verifier'][0]
if six.PY2:
self.server.oauth_token = oauth_token.decode('utf-8')
self.server.oauth_verifier = oauth_verifier.decode('utf-8')
else:
self.server.oauth_token = oauth_token
self.server.oauth_verifier = oauth_verifier
assert(isinstance(self.server.oauth_token, six.string_types))
assert(isinstance(self.server.oauth_verifier, six.string_types))
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(html.auth_okay_html)
class OAuthTokenHTTPServer(http_server.HTTPServer):
'''HTTP server on a random port, which will receive the OAuth verifier.'''
def __init__(self):
self.log = logging.getLogger('%s.%s' % (self.__class__.__module__,
self.__class__.__name__))
self.local_addr = self.listen_port()
self.log.info('Creating HTTP server at %s', self.local_addr)
http_server.HTTPServer.__init__(self, self.local_addr,
OAuthTokenHTTPHandler)
self.oauth_verifier = None
def listen_port(self):
'''Returns the hostname and TCP/IP port number to listen on.
By default finds a random free port between 1100 and 20000.
'''
# Find a random free port
local_addr = ('localhost', int(random.uniform(1100, 20000)))
self.log.debug('Finding free port starting at %s', local_addr)
# return local_addr
return sockutil.find_free_port(local_addr)
def wait_for_oauth_verifier(self, timeout=None):
'''Starts the HTTP server, waits for the OAuth verifier.'''
if self.oauth_verifier is None:
self.timeout = timeout
self.handle_request()
if self.oauth_verifier:
self.log.info('OAuth verifier: %s' % self.oauth_verifier)
return self.oauth_verifier
@property
def oauth_callback_url(self):
return 'http://localhost:%i/' % (self.local_addr[1], )
class FlickrAccessToken(object):
'''Flickr access token.
Contains the token, token secret,
and the user's full name, username and NSID.
'''
levels = ('read', 'write', 'delete')
def __init__(self, token, token_secret, access_level,
fullname=u'', username=u'', user_nsid=u''):
lvl = access_level
assert isinstance(token, six.text_type), 'token should be unicode text'
assert isinstance(token_secret, six.text_type), ('token_secret should '
'be unicode text')
assert isinstance(access_level, six.text_type), ('access_level should '
'be unicode text, is '
'%r' % type(lvl))
assert isinstance(fullname, six.text_type), ('fullname should be '
'unicode text')
assert isinstance(username, six.text_type), ('username should be '
'unicode text')
assert isinstance(user_nsid, six.text_type), ('user_nsid should be '
'unicode text')
access_level = access_level.lower()
assert access_level in self.levels, ('access_level should be one of '
'%r' % (self.levels, ))
self.token = token
self.token_secret = token_secret
self.access_level = access_level
self.fullname = fullname
self.username = username
self.user_nsid = user_nsid
def __str__(self):
return six.text_type(self).encode('utf-8')
def __unicode__(self):
return ('FlickrAccessToken(token=%s, fullname=%s, username=%s,'
'user_nsid=%s)' % (self.token, self.fullname, self.username,
self.user_nsid))
def __repr__(self):
return str(self)
def has_level(self, access_level):
'''Returns True iff the token's access level implies the
given access level.'''
my_idx = self.levels.index(self.access_level)
q_idx = self.levels.index(access_level)
return q_idx <= my_idx
class OAuthFlickrInterface(object):
'''Interface object for handling OAuth-authenticated calls to Flickr.'''
REQUEST_TOKEN_URL = "https://www.flickr.com/services/oauth/request_token"
AUTHORIZE_URL = "https://www.flickr.com/services/oauth/authorize"
ACCESS_TOKEN_URL = "https://www.flickr.com/services/oauth/access_token"
def __init__(self, api_key, api_secret, oauth_token=None):
self.log = logging.getLogger('%s.%s' % (self.__class__.__module__,
self.__class__.__name__))
assert isinstance(api_key, six.text_type), ('api_key must be '
'unicode string')
assert isinstance(api_secret, six.text_type), ('api_secret must be '
'unicode string')
token = None
secret = None
if oauth_token.token:
token = oauth_token.token.token
secret = oauth_token.token.token_secret
self.oauth = OAuth1(api_key, api_secret, token, secret,
signature_type='auth_header')
self.oauth_token = oauth_token
self.auth_http_server = None
self.requested_permissions = None
@property
def key(self):
'''Returns the OAuth key'''
return self.oauth.client.client_key
@property
def resource_owner_key(self):
'''Returns the OAuth resource owner key'''
return self.oauth.client.resource_owner_key
@resource_owner_key.setter
def resource_owner_key(self, new_key):
'''Stores the OAuth resource owner key'''
self.oauth.client.resource_owner_key = new_key
@property
def resource_owner_secret(self):
'''Returns the OAuth resource owner secret'''
return self.oauth.client.resource_owner_secret
@resource_owner_secret.setter
def resource_owner_secret(self, new_secret):
'''Stores the OAuth resource owner secret'''
self.oauth.client.resource_owner_secret = new_secret
@property
def verifier(self):
'''Returns the OAuth verifier.'''
return self.oauth.client.verifier
@verifier.setter
def verifier(self, new_verifier):
'''Sets the OAuth verifier'''
assert isinstance(new_verifier, six.text_type), ('verifier must be '
'unicode text type')
self.oauth.client.verifier = new_verifier
@property
def token(self):
return self.oauth_token
@token.setter
def token(self, new_token):
if new_token is None:
self.oauth_token = None
self.oauth.client.resource_owner_key = None
self.oauth.client.resource_owner_secret = None
self.oauth.client.verifier = None
self.requested_permissions = None
return
assert isinstance(new_token, FlickrAccessToken), new_token
self.oauth_token = new_token
self.oauth.client.resource_owner_key = new_token.token
self.oauth.client.resource_owner_secret = new_token.token_secret
self.oauth.client.verifier = None
self.requested_permissions = new_token.access_level
def _find_cache_dir(self):
'''Returns the appropriate directory for the HTTP cache.'''
if sys.platform.startswith('win'):
return os.path.expandvars('%APPDATA%/flickrapi/cache')
return os.path.expanduser('~/.flickrapi/cache')
def do_request(self, url, params=None):
'''Performs the HTTP request, signed with OAuth.
@return: the response content
'''
req = requests.get(url,
params=params,
auth=self.oauth,
headers={'Connection': 'close'})
# check the response headers / status code.
if req.status_code != 200:
self.log.error('do_request: Status code %i received, content:',
req.status_code)
for part in req.content.split('&'):
self.log.error(' %s', urllib_parse.unquote(part))
raise exceptions.FlickrError('do_request: Status code %s received'
% req.status_code)
return req.content
def do_upload(self, filename, url, params=None, fileobj=None):
'''Performs a file upload to the given URL with the given parameters,
signed with OAuth.
@return: the response content
'''
# work-around to allow non-ascii characters in file name
# Flickr doesn't store the name but does use it as a default title
if 'title' not in params:
params['title'] = os.path.basename(filename)
# work-around for Flickr expecting 'photo' to be excluded
# from the oauth signature:
# 1. create a dummy request without 'photo'
# 2. create real request and use auth headers from the dummy one
dummy_req = requests.Request('POST', url, data=params,
auth=self.oauth,
headers={'Connection': 'close'})
prepared = dummy_req.prepare()
headers = prepared.headers
self.log.debug('do_upload: prepared headers = %s', headers)
if not fileobj:
fileobj = open(filename, 'rb')
params['photo'] = ('dummy name', fileobj)
m = MultipartEncoder(fields=params)
auth = {'Authorization': headers.get('Authorization'),
'Content-Type': m.content_type,
'Connection': 'close'}
self.log.debug('POST %s', auth)
req = requests.post(url, data=m, headers=auth)
# check the response headers / status code.
if req.status_code != 200:
self.log.error('do_upload: Status code %i received, content:',
req.status_code)
for part in req.content.split('&'):
self.log.error(' %s', urllib_parse.unquote(part))
raise exceptions.FlickrError('do_upload: Status code %s received'
% req.status_code)
return req.content
@staticmethod
def parse_oauth_response(data):
'''Parses the data string as OAuth response, returning it as a dict.
The keys and values of the dictionary will be text strings
(i.e. not binary strings).
'''
if isinstance(data, six.binary_type):
data = data.decode('utf-8')
qsl = urllib_parse.parse_qsl(data)
resp = {}
for key, value in qsl:
resp[key] = value
return resp
def _start_http_server(self):
'''Starts the HTTP server, if it wasn't started already.'''
if self.auth_http_server is not None:
return
self.auth_http_server = OAuthTokenHTTPServer()
def _stop_http_server(self):
'''Stops the HTTP server, if one was started.'''
if self.auth_http_server is None:
return
self.auth_http_server = None
def get_request_token(self, oauth_callback=None):
'''Requests a new request token.
Updates this OAuthFlickrInterface object to use the request token
on the following authentication calls.
@param oauth_callback: the URL the user is sent to after granting the
token access.
If the callback is None, a local web server is started on a
random port, and the
callback will be http://localhost:randomport/
If you do not have a web-app and you also do not want to start a
local web server, pass oauth_callback='oob' and have your application
accept the verifier from the user instead.
'''
self.log.debug('get_request_token(oauth_callback=%s):', oauth_callback)
if oauth_callback is None:
self._start_http_server()
oauth_callback = self.auth_http_server.oauth_callback_url
params = {
'oauth_callback': oauth_callback,
}
token_data = self.do_request(self.REQUEST_TOKEN_URL, params)
self.log.debug('Token data: %s', token_data)
# Parse the token data
request_token = self.parse_oauth_response(token_data)
self.log.debug('Request token: %s', request_token)
self.oauth.client.resource_owner_key = request_token['oauth_token']
secret = request_token['oauth_token_secret']
self.oauth.client.resource_owner_secret = secret
def auth_url(self, perms='read'):
'''Returns the URL the user should visit to authenticate the given
oauth Token.
Use this method in webapps, where you can redirect the user to the
returned URL.
After authorization by the user, the browser is redirected to the
callback URL,
which will contain the OAuth verifier. Set the 'verifier' property on
this object
in order to use it.
In stand-alone apps, use open_browser_for_authentication instead.
'''
if self.oauth.client.resource_owner_key is None:
raise FlickrError('No resource owner key set, you probably forgot'
'to call get_request_token(...)')
if perms not in ('read', 'write', 'delete'):
raise ValueError('Invalid parameter perms=%r' % perms)
self.requested_permissions = perms
owner_key = self.oauth.client.resource_owner_key
return "%s?oauth_token=%s&perms=%s" % (self.AUTHORIZE_URL,
owner_key, perms)
def auth_via_browser(self, perms='read'):
'''Opens the webbrowser to authenticate the given request
request_token, sets the verifier.
Use this method in stand-alone apps. In webapps, use auth_url(...)
instead, and redirect the user to the returned URL.
Updates the given request_token by setting the OAuth verifier.
'''
# The HTTP server may have been started already, but we're not sure.
# Just start it if it needs to be started.
self._start_http_server()
url = self.auth_url(perms)
if not webbrowser.open_new_tab(url):
raise exceptions.FlickrError('Unable to open a browser to visit %s'
% url)
self.verifier = self.auth_http_server.wait_for_oauth_verifier()
# We're now done with the HTTP server, so close it down again.
self._stop_http_server()
def get_access_token(self):
'''Exchanges the request token for an access token.
Also stores the access token in 'self' for easy authentication
of subsequent calls.
@return: Access token, a FlickrAccessToken object.
'''
if self.oauth.client.resource_owner_key is None:
raise FlickrError('No resource owner key set, you probably forgot '
'to call get_request_token(...)')
if self.oauth.client.verifier is None:
raise FlickrError('No token verifier set, you probably forgot to '
'set %s.verifier' % self)
if self.requested_permissions is None:
raise FlickrError('Requested permissions are unknown.')
content = self.do_request(self.ACCESS_TOKEN_URL)
# parse the response
access_token_resp = self.parse_oauth_response(content)
secret = access_token_resp['oauth_token_secret']
self.oauth_token = FlickrAccessToken(access_token_resp['oauth_token'],
secret,
self.requested_permissions,
access_token_resp.get('fullname',
''),
access_token_resp['username'],
access_token_resp['user_nsid'])
self.oauth.client.resource_owner_key = access_token_resp['oauth_token']
self.oauth.client.resource_owner_secret = secret
self.oauth.client.verifier = None
return self.oauth_token
|
onitu/onitu
|
drivers/flickr/onitu_flickr/flickrapi/auth.py
|
Python
|
mit
| 17,734
|
[
"VisIt"
] |
f2a1271e63ecae9da6fa6a10a7a01214d430f6f9bb799179b90ec37965aec972
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Utilities for manipulating coordinates or list of coordinates, under periodic
boundary conditions or otherwise. Many of these are heavily vectorized in
numpy for performance.
"""
from six.moves import zip
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Nov 27, 2011"
import numpy as np
import math
#array size threshold for looping instead of broadcasting
LOOP_THRESHOLD = 1e6
def find_in_coord_list(coord_list, coord, atol=1e-8):
"""
Find the indices of matches of a particular coord in a coord_list.
Args:
coord_list: List of coords to test
coord: Specific coordinates
atol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and
array.
Returns:
Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.
"""
if len(coord_list) == 0:
return []
diff = np.array(coord_list) - np.array(coord)[None, :]
return np.where(np.all(np.abs(diff) < atol, axis=1))[0]
def in_coord_list(coord_list, coord, atol=1e-8):
"""
Tests if a particular coord is within a coord_list.
Args:
coord_list: List of coords to test
coord: Specific coordinates
atol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and
array.
Returns:
True if coord is in the coord list.
"""
return len(find_in_coord_list(coord_list, coord, atol=atol)) > 0
def is_coord_subset(subset, superset, atol=1e-8):
"""
Tests if all coords in subset are contained in superset.
Doesn't use periodic boundary conditions
Args:
subset, superset: List of coords
Returns:
True if all of subset is in superset.
"""
c1 = np.array(subset)
c2 = np.array(superset)
is_close = np.all(np.abs(c1[:, None, :] - c2[None, :, :]) < atol, axis=-1)
any_close = np.any(is_close, axis=-1)
return np.all(any_close)
def coord_list_mapping(subset, superset):
"""
Gives the index mapping from a subset to a superset.
Subset and superset cannot contain duplicate rows
Args:
subset, superset: List of coords
Returns:
list of indices such that superset[indices] = subset
"""
c1 = np.array(subset)
c2 = np.array(superset)
inds = np.where(np.all(np.isclose(c1[:, None, :], c2[None, :, :]),
axis=2))[1]
result = c2[inds]
if not np.allclose(c1, result):
if not is_coord_subset(subset, superset):
raise ValueError("subset is not a subset of superset")
if not result.shape == c1.shape:
raise ValueError("Something wrong with the inputs, likely duplicates "
"in superset")
return inds
def coord_list_mapping_pbc(subset, superset, atol=1e-8):
"""
Gives the index mapping from a subset to a superset.
Subset and superset cannot contain duplicate rows
Args:
subset, superset: List of frac_coords
Returns:
list of indices such that superset[indices] = subset
"""
c1 = np.array(subset)
c2 = np.array(superset)
diff = c1[:, None, :] - c2[None, :, :]
diff -= np.round(diff)
inds = np.where(np.all(np.abs(diff) < atol, axis = 2))[1]
#verify result (its easier to check validity of the result than
#the validity of inputs)
test = c2[inds] - c1
test -= np.round(test)
if not np.allclose(test, 0):
if not is_coord_subset_pbc(subset, superset):
raise ValueError("subset is not a subset of superset")
if not test.shape == c1.shape:
raise ValueError("Something wrong with the inputs, likely duplicates "
"in superset")
return inds
def get_linear_interpolated_value(x_values, y_values, x):
"""
Returns an interpolated value by linear interpolation between two values.
This method is written to avoid dependency on scipy, which causes issues on
threading servers.
Args:
x_values: Sequence of x values.
y_values: Corresponding sequence of y values
x: Get value at particular x
Returns:
Value at x.
"""
a = np.array(sorted(zip(x_values, y_values), key=lambda d: d[0]))
ind = np.where(a[:, 0] >= x)[0]
if len(ind) == 0 or ind[0] == 0:
raise ValueError("x is out of range of provided x_values")
i = ind[0]
x1, x2 = a[i - 1][0], a[i][0]
y1, y2 = a[i - 1][1], a[i][1]
return y1 + (y2 - y1) / (x2 - x1) * (x - x1)
def all_distances(coords1, coords2):
"""
Returns the distances between two lists of coordinates
Args:
coords1: First set of cartesian coordinates.
coords2: Second set of cartesian coordinates.
Returns:
2d array of cartesian distances. E.g the distance between
coords1[i] and coords2[j] is distances[i,j]
"""
c1 = np.array(coords1)
c2 = np.array(coords2)
z = (c1[:, None, :] - c2[None, :, :]) ** 2
return np.sum(z, axis=-1) ** 0.5
def pbc_diff(fcoords1, fcoords2):
"""
Returns the 'fractional distance' between two coordinates taking into
account periodic boundary conditions.
Args:
fcoords1: First set of fractional coordinates. e.g., [0.5, 0.6,
0.7] or [[1.1, 1.2, 4.3], [0.5, 0.6, 0.7]]. It can be a single
coord or any array of coords.
fcoords2: Second set of fractional coordinates.
Returns:
Fractional distance. Each coordinate must have the property that
abs(a) <= 0.5. Examples:
pbc_diff([0.1, 0.1, 0.1], [0.3, 0.5, 0.9]) = [-0.2, -0.4, 0.2]
pbc_diff([0.9, 0.1, 1.01], [0.3, 0.5, 0.9]) = [-0.4, -0.4, 0.11]
"""
fdist = np.subtract(fcoords1, fcoords2)
return fdist - np.round(fdist)
#create images, 2d array of all length 3 combinations of [-1,0,1]
r = np.arange(-1, 2)
arange = r[:, None] * np.array([1, 0, 0])[None, :]
brange = r[:, None] * np.array([0, 1, 0])[None, :]
crange = r[:, None] * np.array([0, 0, 1])[None, :]
images = arange[:, None, None] + brange[None, :, None] + \
crange[None, None, :]
images = images.reshape((27, 3))
def pbc_shortest_vectors(lattice, fcoords1, fcoords2):
"""
Returns the shortest vectors between two lists of coordinates taking into
account periodic boundary conditions and the lattice.
Args:
lattice: lattice to use
fcoords1: First set of fractional coordinates. e.g., [0.5, 0.6, 0.7]
or [[1.1, 1.2, 4.3], [0.5, 0.6, 0.7]]. It can be a single
coord or any array of coords.
fcoords2: Second set of fractional coordinates.
Returns:
array of displacement vectors from fcoords1 to fcoords2
first index is fcoords1 index, second is fcoords2 index
"""
#ensure correct shape
fcoords1, fcoords2 = np.atleast_2d(fcoords1, fcoords2)
#ensure that all points are in the unit cell
fcoords1 = np.mod(fcoords1, 1)
fcoords2 = np.mod(fcoords2, 1)
#create images of f2
shifted_f2 = fcoords2[:, None, :] + images[None, :, :]
cart_f1 = lattice.get_cartesian_coords(fcoords1)
cart_f2 = lattice.get_cartesian_coords(shifted_f2)
if cart_f1.size * cart_f2.size < LOOP_THRESHOLD:
#all vectors from f1 to f2
vectors = cart_f2[None, :, :, :] - cart_f1[:, None, None, :]
d_2 = np.sum(vectors ** 2, axis=-1)
a, b = np.indices(vectors.shape[:2])
return vectors[a, b, np.argmin(d_2, axis=-1)]
else:
shortest = np.zeros((len(cart_f1), len(cart_f2), 3))
for i, c1 in enumerate(cart_f1):
vectors = cart_f2[:, :, :] - c1[None, None, :]
d_2 = np.sum(vectors ** 2, axis=-1)
shortest[i] = vectors[np.arange(len(vectors)),
np.argmin(d_2, axis=-1)]
return shortest
def find_in_coord_list_pbc(fcoord_list, fcoord, atol=1e-8):
"""
Get the indices of all points in a fractional coord list that are
equal to a fractional coord (with a tolerance), taking into account
periodic boundary conditions.
Args:
fcoord_list: List of fractional coords
fcoord: A specific fractional coord to test.
atol: Absolute tolerance. Defaults to 1e-8.
Returns:
Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.
"""
if len(fcoord_list) == 0:
return []
fcoords = np.tile(fcoord, (len(fcoord_list), 1))
fdist = fcoord_list - fcoords
fdist -= np.round(fdist)
return np.where(np.all(np.abs(fdist) < atol, axis=1))[0]
def in_coord_list_pbc(fcoord_list, fcoord, atol=1e-8):
"""
Tests if a particular fractional coord is within a fractional coord_list.
Args:
fcoord_list: List of fractional coords to test
fcoord: A specific fractional coord to test.
atol: Absolute tolerance. Defaults to 1e-8.
Returns:
True if coord is in the coord list.
"""
return len(find_in_coord_list_pbc(fcoord_list, fcoord, atol=atol)) > 0
def is_coord_subset_pbc(subset, superset, atol=1e-8, mask=None):
"""
Tests if all fractional coords in subset are contained in superset.
Args:
subset, superset: List of fractional coords
atol (float or size 3 array): Tolerance for matching
mask (boolean array): Mask of matches that are not allowed.
i.e. if mask[1,2] == True, then subset[1] cannot be matched
to superset[2]
Returns:
True if all of subset is in superset.
"""
c1 = np.array(subset)
c2 = np.array(superset)
dist = c1[:, None, :] - c2[None, :, :]
dist -= np.round(dist)
if mask is not None:
dist[np.array(mask)] = np.inf
is_close = np.all(np.abs(dist) < atol, axis=-1)
any_close = np.any(is_close, axis=-1)
return np.all(any_close)
def lattice_points_in_supercell(supercell_matrix):
"""
Returns the list of points on the original lattice contained in the
supercell in fractional coordinates (with the supercell basis).
e.g. [[2,0,0],[0,1,0],[0,0,1]] returns [[0,0,0],[0.5,0,0]]
Args:
supercell_matrix: 3x3 matrix describing the supercell
Returns:
numpy array of the fractional coordinates
"""
diagonals = np.array(
[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1],
[1, 1, 0], [1, 1, 1]])
d_points = np.dot(diagonals, supercell_matrix)
mins = np.min(d_points, axis=0)
maxes = np.max(d_points, axis=0) + 1
ar = np.arange(mins[0], maxes[0])[:, None] * \
np.array([1, 0, 0])[None, :]
br = np.arange(mins[1], maxes[1])[:, None] * \
np.array([0, 1, 0])[None, :]
cr = np.arange(mins[2], maxes[2])[:, None] * \
np.array([0, 0, 1])[None, :]
all_points = ar[:, None, None] + br[None, :, None] + cr[None, None, :]
all_points = all_points.reshape((-1, 3))
frac_points = np.dot(all_points, np.linalg.inv(supercell_matrix))
tvects = frac_points[np.all(frac_points < 1 - 1e-10, axis=1)
& np.all(frac_points >= -1e-10, axis=1)]
assert len(tvects) == round(abs(np.linalg.det(supercell_matrix)))
return tvects
def barycentric_coords(coords, simplex):
"""
Converts a list of coordinates to barycentric coordinates, given a
simplex with d+1 points. Only works for d >= 2.
Args:
coords: list of n coords to transform, shape should be (n,d)
simplex: list of coordinates that form the simplex, shape should be
(d+1, d)
Returns:
a LIST of barycentric coordinates (even if the original input was 1d)
"""
coords = np.atleast_2d(coords)
t = np.transpose(simplex[:-1, :]) - np.transpose(simplex[-1, :])[:, None]
all_but_one = np.transpose(
np.linalg.solve(t, np.transpose(coords - simplex[-1])))
last_coord = 1 - np.sum(all_but_one, axis=-1)[:, None]
return np.append(all_but_one, last_coord, axis=-1)
def get_angle(v1, v2, units="degrees"):
"""
Calculates the angle between two vectors.
Args:
v1: Vector 1
v2: Vector 2
units: "degrees" or "radians". Defaults to "degrees".
Returns:
Angle between them in degrees.
"""
d = np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)
d = min(d, 1)
d = max(d, -1)
angle = math.acos(d)
if units == "degrees":
return math.degrees(angle)
elif units == "radians":
return angle
else:
raise ValueError("Invalid units {}".format(units))
|
sonium0/pymatgen
|
pymatgen/util/coord_utils.py
|
Python
|
mit
| 12,786
|
[
"pymatgen"
] |
dc153a19497d56f28a920f80724adf64a68b2f5405584bb86312a50701add4c6
|
#
# if inflow == 'log':
# kappa = 0.4 # Kappa: von karman constant
# us = WS * kappa / np.log(WF.WT[0].H / z0) # friction velocity
# #eq inflow ws
# WS_inf = gaussN(WF.WT[0].R, Ua, [WF.WT[0].H,us,z0]).sum()
# elif inflow == 'pow':
# #eq inflow ws
# WS_inf = gaussN(WF.WT[0].R, Ua_shear, [WF.WT[0].H,WS,alpha]).sum()
#
#
# def gauss_rws():
# pass
#
# class WFFM(WindFarm):
# def supperposition(self, ws, **kwargs):
# """
# """
# pass
#
# def single_wake(self, x, r, **kwargs):
# """
# """
# pass
#
# def wake_width(self, x, **kwargs):
# """
# """
# pass
#
# def rotor_wind_speed(self, ws, **kwargs):
# """
# """
# pass
#
# def run(self):
# """
# """
# (distFlowCoord,id0) = WF.turbineDistance(WD)
#
# # TODO: decide how at what height the us should be defined
# if inflow == 'log':
# kappa = 0.4 # Kappa: von karman constant
# us = WS * kappa / np.log(WF.WT[0].H / z0) # friction velocity
# #eq inflow ws
# WS_inf = gaussN(WF.WT[0].R, Ua, [WF.WT[0].H,us,z0]).sum()
# elif inflow == 'pow':
# #eq inflow ws
# WS_inf = gaussN(WF.WT[0].R, Ua_shear, [WF.WT[0].H,WS,alpha]).sum()
#
# # Gauss quadrature points
# r_Gc,w_Gc = np.polynomial.legendre.leggauss(NG)
# wj,wk=np.meshgrid(w_Gc,w_Gc)
# tj,rk=np.meshgrid(r_Gc,r_Gc)
# wj = wj.reshape((NG**2))
# tj = tj.reshape((NG**2))
# wk = wk.reshape((NG**2))
# rk = rk.reshape((NG**2))
#
# # Initialize arrays to NaN
# Ct = np.nan*np.ones([WF.nWT])
# P_WT = np.nan*np.ones([WF.nWT])
#
# # Initialize velocity to undisturbed eq ws
# U_WT = WS_inf*np.ones([WF.nWT])
# U_WT0 = WS_inf*np.ones([WF.nWT])
# DU_sq = 0.*U_WT
#
# allR = np.array([WF.WT[i].R for i in range(WF.nWT)])
#
# # Extreme wake to define WT's in each wake, including partial wakes
# ID_wake = {i:(get_Rw(x=distFlowCoord[0,i,:], # streamwise distance
# R=WF.WT[i].R, # Upstream radius
# TI=TI,
# CT=0.9, #Maximum effect
# pars=pars)
# > np.abs(distFlowCoord[1,i,:]) + allR).nonzero()[0]
# for i in id0}
#
# for i in range(WF.nWT):
# #Current wind turbine starting from the most upstream
# cWT = id0[i]
# # Current radius
# cR = WF.WT[i].R
# # Current hub wind speed
# cU = U_WT[cWT]
# if cU>WF.WT[i].u_cutin:
# Ct[cWT] = WF.WT[i].get_CT(U_WT[cWT])
# P_WT[cWT] = WF.WT[i].get_P(U_WT[cWT])
# else:
# Ct[cWT] = 0.053 # Drag coefficient of the idled turbine
# P_WT[cWT] = 0.0
#
# # Current turbine CT
# cCT=Ct[cWT]
# #ID_wake = {cWT:(get_Rw(x=distFlowCoord[0,cWT,:],\
# # R=cR*1.5,TI=TI,CT=cCT)\
# # >np.abs(distFlowCoord[1,cWT,:])).nonzero()}
#
# #Radial coordinates in cWT for wake affected WT's
# x = distFlowCoord[0, cWT, ID_wake[cWT]]
# r_Ri = np.abs(distFlowCoord[1,cWT, ID_wake[cWT]])
# th_Ri = np.pi*(np.sign(distFlowCoord[1, cWT, ID_wake[cWT]]) + 1.0) # <- what is this? [0|2pi]
#
# # Get all the wake radius at the position of the -in wake- downstream turbines
# RW = get_Rw(x=x, R=cR, TI=TI, CT=cCT, pars=pars)
#
# # Meshgrids (Tensorial) extension of points of evaluation
# # to perform Gaussian quadrature
# r_Ri_m, rk_m = np.meshgrid(r_Ri, rk)
# th_Ri_m, tj_m = np.meshgrid(th_Ri, tj)
# x_m, wj_m = np.meshgrid(x, wj)
# RW_m, wk_m = np.meshgrid(RW, wk)
#
# # downstream Radius
# downR = np.array([WF.WT[j].R for j in ID_wake[cWT]])
# downR_m, dummyvar = np.meshgrid(downR, np.zeros((NG**2)))
# cH = WF.WT[cWT].H
# downH = np.array([WF.WT[j].H for j in ID_wake[cWT]]) - cH
# downH_m, dummyvar = np.meshgrid(downH, np.zeros((NG**2)))
#
# # Radial points of evaluation <- probably need to add the turbine height difference here?
# r_eval = np.sqrt(r_Ri_m**2.0 +
# (downR_m * (rk_m + 1.) / 2.0)**2. +
# r_Ri_m * downR_m * (rk_m + 1.) * np.cos(th_Ri_m - np.pi*(tj_m + 1.)))
#
# # Eval wake velocity deficit
# DU_m = get_dU(x=x_m, r=r_eval, Rw=RW_m,
# U=cU, R=downR_m, TI=TI, CT=cCT, pars=pars)
#
# # Gaussian average
# localDU = np.sum((1./4.) * wj_m * wk_m * DU_m * (rk_m + 1.0), axis=0)
#
# # Wake superposition
# if sup == 'lin':
# U_WT[ID_wake[cWT]] = U_WT[ID_wake[cWT]] + localDU
# U_WT[U_WT<0.]=0.
# elif sup == 'quad':
# DU_sq[ID_wake[cWT]] = DU_sq[ID_wake[cWT]] + localDU**2.
# U_WT = U_WT0 - np.sqrt(DU_sq)
# U_WT[U_WT<0.]=0.
#
# return (P_WT,U_WT,Ct)
#
# def inflow_wind_speed(self):
# pass
#
# def __call__(self, **kwargs):
# self.__dict__.update(kwargs)
# self.run()
#
#
#
# def wffm_vectorized(WF, WS, WD,TI,
# z0=0.0001, alpha=0.101, inflow='log', NG=4, sup='lin'):
# """Computes the WindFarm flow and Power using GCLarsen
# [Larsen, 2009, A simple Stationary...]
#
# Parameters
# ----------
# WF: WindFarm
# Windfarm instance
# WS: float
# Undisturbed wind speed at hub height [m/s]
# WD: float
# Undisturbed wind direction at hub height [deg].
# Meteorological axis. North = 0 [deg], clockwise.
# TI: float
# Ambient turbulence intensity [-]
# z0: float, optional
# Roughness height [m]
# alpha: float, optional
# Shear coefficient [-]
# Only used for power-law undisturbed inflow.
# inflow: Str, optional
# Undisturbed inflow vertical profile:
# 'log': Logarithmic law (neutral case); uses z0
# 'pow': Power law profile; uses alpha
# NG: int, optional
# Number of points in Gaussian Quadrature for equivalent wind
# speed integration over rotor distFlowCoord
# sup: str, optional
# Wake velocity deficit superposition method:
# 'lin': Linear superposition
# 'quad' Quadratic superposition
#
# Returns
# -------
# P_WT: ndarray
# Power production of the wind turbines (nWT,1) [W]
# U_WT: ndarray
# Wind speed at hub height (nWT,1) [m/s]
# Ct: float
# Thrust coefficients for each wind turbine (nWT,1) [-]
# """
# (distFlowCoord,id0) = WF.turbineDistance(WD)
#
# # TODO: decide how at what height the us should be defined
# if inflow == 'log':
# kappa = 0.4 # Kappa: von karman constant
# us = WS * kappa / np.log(WF.WT[0].H / z0) # friction velocity
# #eq inflow ws
# WS_inf = gaussN(WF.WT[0].R, Ua, [WF.WT[0].H,us,z0]).sum()
# elif inflow == 'pow':
# #eq inflow ws
# WS_inf = gaussN(WF.WT[0].R, Ua_shear, [WF.WT[0].H,WS,alpha]).sum()
#
# # Gauss quadrature points
# r_Gc,w_Gc = np.polynomial.legendre.leggauss(NG)
# wj,wk=np.meshgrid(w_Gc,w_Gc)
# tj,rk=np.meshgrid(r_Gc,r_Gc)
# wj = wj.reshape((NG**2))
# tj = tj.reshape((NG**2))
# wk = wk.reshape((NG**2))
# rk = rk.reshape((NG**2))
#
# # Initialize arrays to NaN
# Ct = np.nan*np.ones([WF.nWT])
# P_WT = np.nan*np.ones([WF.nWT])
#
# # Initialize velocity to undisturbed eq ws
# U_WT = WS_inf*np.ones([WF.nWT])
# U_WT0 = WS_inf*np.ones([WF.nWT])
# DU_sq = 0.*U_WT
#
# allR = np.array([WF.WT[i].R for i in range(WF.nWT)])
#
# # Extreme wake to define WT's in each wake, including partial wakes
# ID_wake = {i:(get_Rw(x=distFlowCoord[0,i,:], # streamwise distance
# R=WF.WT[i].R, # Upstream radius
# TI=TI,
# CT=0.9, #Maximum effect
# pars=pars)
# > np.abs(distFlowCoord[1,i,:]) + allR).nonzero()[0]
# for i in id0}
#
# for i in range(WF.nWT):
# #Current wind turbine starting from the most upstream
# cWT = id0[i]
# # Current radius
# cR = WF.WT[i].R
# # Current hub wind speed
# cU = U_WT[cWT]
# if cU>WF.WT[i].u_cutin:
# Ct[cWT] = WF.WT[i].get_CT(U_WT[cWT])
# P_WT[cWT] = WF.WT[i].get_P(U_WT[cWT])
# else:
# Ct[cWT] = 0.053 # Drag coefficient of the idled turbine
# P_WT[cWT] = 0.0
#
# # Current turbine CT
# cCT=Ct[cWT]
# #ID_wake = {cWT:(get_Rw(x=distFlowCoord[0,cWT,:],\
# # R=cR*1.5,TI=TI,CT=cCT)\
# # >np.abs(distFlowCoord[1,cWT,:])).nonzero()}
#
# #Radial coordinates in cWT for wake affected WT's
# x = distFlowCoord[0, cWT, ID_wake[cWT]]
# r_Ri = np.abs(distFlowCoord[1,cWT, ID_wake[cWT]])
# th_Ri = np.pi*(np.sign(distFlowCoord[1, cWT, ID_wake[cWT]]) + 1.0) # <- what is this? [0|2pi]
#
# # Get all the wake radius at the position of the -in wake- downstream turbines
# RW = get_Rw(x=x, R=cR, TI=TI, CT=cCT, pars=pars)
#
# # Meshgrids (Tensorial) extension of points of evaluation
# # to perform Gaussian quadrature
# r_Ri_m, rk_m = np.meshgrid(r_Ri, rk)
# th_Ri_m, tj_m = np.meshgrid(th_Ri, tj)
# x_m, wj_m = np.meshgrid(x, wj)
# RW_m, wk_m = np.meshgrid(RW, wk)
#
# # downstream Radius
# downR = np.array([WF.WT[j].R for j in ID_wake[cWT]])
# downR_m, dummyvar = np.meshgrid(downR, np.zeros((NG**2)))
# cH = WF.WT[cWT].H
# downH = np.array([WF.WT[j].H for j in ID_wake[cWT]]) - cH
# downH_m, dummyvar = np.meshgrid(downH, np.zeros((NG**2)))
#
# # Radial points of evaluation <- probably need to add the turbine height difference here?
# r_eval = np.sqrt(r_Ri_m**2.0 +
# (downR_m * (rk_m + 1.) / 2.0)**2. +
# r_Ri_m * downR_m * (rk_m + 1.) * np.cos(th_Ri_m - np.pi*(tj_m + 1.)))
#
# # Eval wake velocity deficit
# DU_m = get_dU(x=x_m, r=r_eval, Rw=RW_m,
# U=cU, R=downR_m, TI=TI, CT=cCT, pars=pars)
#
# # Gaussian average
# localDU = np.sum((1./4.) * wj_m * wk_m * DU_m * (rk_m + 1.0), axis=0)
#
# # Wake superposition
# if sup == 'lin':
# U_WT[ID_wake[cWT]] = U_WT[ID_wake[cWT]] + localDU
# U_WT[U_WT<0.]=0.
# elif sup == 'quad':
# DU_sq[ID_wake[cWT]] = DU_sq[ID_wake[cWT]] + localDU**2.
# U_WT = U_WT0 - np.sqrt(DU_sq)
# U_WT[U_WT<0.]=0.
#
# return (P_WT,U_WT,Ct)
#
#
# def wffm_classic(WF, WS, WD, TI, z0, NG=4, sup='lin',
# pars=[0.435449861, 0.797853685, -0.124807893, 0.136821858, 15.6298, 1.0]):
# """Computes the WindFarm flow and Power using GCLarsen
# [Larsen, 2009, A simple Stationary...]
#
# Parameters
# ----------
# WF: WindFarm
# Windfarm instance
# WS: float
# Undisturbed wind speed at hub height [m/s]
# WD: float
# Undisturbed wind direction at hub height [deg].
# Meteorological axis. North = 0 [deg], clockwise.
# TI: float
# Ambient turbulence intensity [-]
# z0: float
# Roughness height [m]
#
# NG: int, optional
# Number of points in Gaussian Quadrature for equivalent wind
# speed integration over rotor distFlowCoord
# sup: str, optional
# Wake velocity deficit superposition method:
# 'lin': Linear superposition
# 'quad' Quadratic superposition
#
#
# Returns
# -------
# P_WT: ndarray
# Power production of the wind turbines (nWT,1) [W]
# U_WT: ndarray
# Wind speed at hub height (nWT,1) [m/s]
# Ct: ndarray
# Thrust coefficients for each wind turbine (nWT,1) [-]
# """
# (Dist, id0) = WF.turbineDistance(WD)
#
# kappa = 0.4 # Kappa: von karman constant
# us = WS * kappa/np.log(WF.WT[id0[0]].H/z0) # friction velocity
# WS_inf = gaussN(WF.WT[id0[0]].R, Ua, [WF.WT[id0[0]].H, us, z0]).sum() # eq inflow ws
#
# # Initialize arrays to NaN
# Ct = np.nan * np.ones([WF.nWT])
# U_WT = np.nan * np.ones([WF.nWT])
# P_WT = np.nan * np.ones([WF.nWT])
# MainWake = np.zeros([WF.nWT])
# MaxDU = 0.0
#
# # Initialize first upstream turbine
# Ct[id0[0]] = WF.WT[id0[0]].get_CT(WS_inf)
# U_WT[id0[0]] = WS_inf
# P_WT[id0[0]] = WF.WT[id0[0]].get_P(WS_inf)
#
# for i in range(1, WF.nWT):
# cWT = id0[i] # Current wind turbine
# cR = WF.WT[cWT].R
# LocalDU = np.zeros([WF.nWT, 1])
# for j in range(i-1, -1, -1):
# # Loop on the upstream turbines of iWT
# uWT = id0[j]
# uWS = U_WT[uWT] # Wind speed at wind turbine uWT
# uCT = Ct[uWT]
# uR = WF.WT[uWT].R
# if np.isnan(uCT):
# uCT = WF.WT.get_CT(uWS)
#
# WakeL = Dist[0, uWT, cWT]
# C2C = Dist[1, uWT, cWT]
#
# # Calculate the wake width of jWT at the position of iWT
# Rw = get_Rw(WakeL, uR, TI, uCT, pars)
# if (abs(C2C) <= Rw+cR or uWS == 0):
# LocalDU[uWT] = gaussN(uR, dU4Gauss,
# [C2C, 0.0, WakeL, Rw, uWS, uR, TI, uCT, pars], NG).sum()
# if LocalDU[uWT]<MaxDU:
# MaxDU = LocalDU[uWT]
# MainWake[cWT] = uWT
#
# # Wake superposition
# if sup == 'lin':
# DU = LocalDU.sum()
# elif sup == 'quad':
# DU = -np.sqrt(np.sum(LocalDU**2))
#
# U_WT[cWT] = max(0, WS_inf + DU)
# if U_WT[cWT] > WF.WT[cWT].u_cutin:
# Ct[cWT] = WF.WT[cWT].get_CT(U_WT[cWT])
# P_WT[cWT] = WF.WT[cWT].get_P(U_WT[cWT])
# else:
# Ct[cWT] = 0.053
# P_WT[cWT] = 0.0
#
# return (P_WT,U_WT,Ct)
|
DTUWindEnergy/FUSED-Wake
|
fusedwake/wind_farm_flow.py
|
Python
|
mit
| 14,757
|
[
"Gaussian"
] |
f255d2fd83d2e1c47df2843d4f4f8c681316bcc0fbdf6ac3dd2dc27989dac8d1
|
import numpy as np
from cs231n.layers import *
from cs231n.layer_utils import *
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network with ReLU nonlinearity and
softmax loss that uses a modular layer design. We assume an input dimension
of D, a hidden dimension of H, and perform classification over C classes.
The architecure should be affine - relu - affine - softmax.
Note that this class does not implement gradient descent; instead, it
will interact with a separate Solver object that is responsible for running
optimization.
The learnable parameters of the model are stored in the dictionary
self.params that maps parameter names to numpy arrays.
"""
def __init__(self, input_dim=3 * 32 * 32, hidden_dim=100, num_classes=10,
weight_scale=1e-3, reg=0.0):
"""
Initialize a new network.
Inputs:
- input_dim: An integer giving the size of the input
- hidden_dim: An integer giving the size of the hidden layer
- num_classes: An integer giving the number of classes to classify
- dropout: Scalar between 0 and 1 giving dropout strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- reg: Scalar giving L2 regularization strength.
"""
self.params = {}
self.reg = reg
############################################################################
# TODO: Initialize the weights and biases of the two-layer net. Weights #
# should be initialized from a Gaussian with standard deviation equal to #
# weight_scale, and biases should be initialized to zero. All weights and #
# biases should be stored in the dictionary self.params, with first layer #
# weights and biases using the keys 'W1' and 'b1' and second layer weights #
# and biases using the keys 'W2' and 'b2'. #
############################################################################
self.params['W1'] = weight_scale * np.random.randn(input_dim, hidden_dim) # / np.sqrt(input_dim / 2)
self.params['b1'] = np.zeros(hidden_dim)
self.params['W2'] = weight_scale * np.random.randn(hidden_dim, num_classes) # / np.sqrt(hidden_dim / 2)
self.params['b2'] = np.zeros(num_classes)
############################################################################
# END OF YOUR CODE #
############################################################################
def loss(self, X, y=None):
"""
Compute loss and gradient for a minibatch of data.
Inputs:
- X: Array of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,). y[i] gives the label for X[i].
Returns:
If y is None, then run a test-time forward pass of the model and return:
- scores: Array of shape (N, C) giving classification scores, where
scores[i, c] is the classification score for X[i] and class c.
If y is not None, then run a training-time forward and backward pass and
return a tuple of:
- loss: Scalar value giving the loss
- grads: Dictionary with the same keys as self.params, mapping parameter
names to gradients of the loss with respect to those parameters.
"""
scores = None
############################################################################
# TODO: Implement the forward pass for the two-layer net, computing the #
# class scores for X and storing them in the scores variable. #
############################################################################
out1, cache1 = affine_relu_forward(X, self.params['W1'], self.params['b1'])
out2, cache2 = affine_forward(out1, self.params['W2'], self.params['b2'])
scores = out2
############################################################################
# END OF YOUR CODE #
############################################################################
# If y is None then we are in test mode so just return scores
if y is None:
return scores
loss, grads = 0, {}
############################################################################
# TODO: Implement the backward pass for the two-layer net. Store the loss #
# in the loss variable and gradients in the grads dictionary. Compute data #
# loss using softmax, and make sure that grads[k] holds the gradients for #
# self.params[k]. Don't forget to add L2 regularization! #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
loss, dx = softmax_loss(scores, y)
loss += 0.5 * self.reg * (np.sum(self.params['W1'] * self.params['W1']) +
np.sum(self.params['W2'] * self.params['W2']))
dx, grads['W2'], grads['b2'] = affine_backward(dx, cache2)
grads['W2'] += self.reg * self.params['W2']
_, grads['W1'], grads['b1'] = affine_relu_backward(dx, cache1)
grads['W1'] += self.reg * self.params['W1']
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
class FullyConnectedNet(object):
"""
A fully-connected neural network with an arbitrary number of hidden layers,
ReLU nonlinearities, and a softmax loss function. This will also implement
dropout and batch normalization as options. For a network with L layers,
the architecture will be
{affine - [batch norm] - relu - [dropout]} x (L - 1) - affine - softmax
where batch normalization and dropout are optional, and the {...} block is
repeated L - 1 times.
Similar to the TwoLayerNet above, learnable parameters are stored in the
self.params dictionary and will be learned using the Solver class.
"""
def __init__(self, hidden_dims, input_dim=3 * 32 * 32, num_classes=10,
dropout=0, use_batchnorm=False, reg=0.0,
weight_scale=1e-2, dtype=np.float32, seed=None):
"""
Initialize a new FullyConnectedNet.
Inputs:
- hidden_dims: A list of integers giving the size of each hidden layer.
- input_dim: An integer giving the size of the input.
- num_classes: An integer giving the number of classes to classify.
- dropout: Scalar between 0 and 1 giving dropout strength. If dropout=0 then
the network should not use dropout at all.
- use_batchnorm: Whether or not the network should use batch normalization.
- reg: Scalar giving L2 regularization strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- dtype: A numpy datatype object; all computations will be performed using
this datatype. float32 is faster but less accurate, so you should use
float64 for numeric gradient checking.
- seed: If not None, then pass this random seed to the dropout layers. This
will make the dropout layers deterministic so we can gradient check the
model.
"""
self.use_batchnorm = use_batchnorm
self.use_dropout = dropout > 0
self.reg = reg
self.num_layers = 1 + len(hidden_dims)
self.dtype = dtype
self.params = {}
############################################################################
# TODO: Initialize the parameters of the network, storing all values in #
# the self.params dictionary. Store weights and biases for the first layer #
# in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #
# initialized from a normal distribution with standard deviation equal to #
# weight_scale and biases should be initialized to zero. #
# #
# When using batch normalization, store scale and shift parameters for the #
# first layer in gamma1 and beta1; for the second layer use gamma2 and #
# beta2, etc. Scale parameters should be initialized to one and shift #
# parameters should be initialized to zero. #
############################################################################
for layer_index in range(self.num_layers):
weight_name = 'W' + str(layer_index + 1)
bias_name = 'b' + str(layer_index + 1)
if layer_index == 0:
self.params[weight_name] = weight_scale * np.random.randn(input_dim, hidden_dims[0])
self.params[bias_name] = np.zeros(hidden_dims[0])
elif layer_index == self.num_layers - 1:
self.params[weight_name] = weight_scale * np.random.randn(hidden_dims[layer_index - 1], num_classes)
self.params[bias_name] = np.zeros(num_classes)
else:
self.params[weight_name] = weight_scale * np.random.randn(hidden_dims[layer_index - 1],
hidden_dims[layer_index])
self.params[bias_name] = np.zeros(hidden_dims[layer_index])
if self.use_batchnorm and layer_index != self.num_layers - 1:
gamma_name = 'gamma' + str(layer_index + 1)
beta_name = 'beta' + str(layer_index + 1)
shape = hidden_dims[layer_index]
self.params[gamma_name] = np.ones(shape)
self.params[beta_name] = np.zeros(shape)
############################################################################
# END OF YOUR CODE #
############################################################################
# When using dropout we need to pass a dropout_param dictionary to each
# dropout layer so that the layer knows the dropout probability and the mode
# (train / test). You can pass the same dropout_param to each dropout layer.
self.dropout_param = {}
if self.use_dropout:
self.dropout_param = {'mode': 'train', 'p': dropout}
if seed is not None:
self.dropout_param['seed'] = seed
# With batch normalization we need to keep track of running means and
# variances, so we need to pass a special bn_param object to each batch
# normalization layer. You should pass self.bn_params[0] to the forward pass
# of the first batch normalization layer, self.bn_params[1] to the forward
# pass of the second batch normalization layer, etc.
self.bn_params = []
if self.use_batchnorm:
self.bn_params = [{'mode': 'train'} for i in xrange(self.num_layers - 1)]
# Cast all parameters to the correct datatype
for k, v in self.params.iteritems():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Compute loss and gradient for the fully-connected net.
Input / output: Same as TwoLayerNet above.
"""
X = X.astype(self.dtype)
mode = 'test' if y is None else 'train'
# Set train/test mode for batchnorm params and dropout param since they
# behave differently during training and testing.
if self.dropout_param is not None:
self.dropout_param['mode'] = mode
if self.use_batchnorm:
for bn_param in self.bn_params:
bn_param[mode] = mode
scores = None
############################################################################
# TODO: Implement the forward pass for the fully-connected net, computing #
# the class scores for X and storing them in the scores variable. #
# #
# When using dropout, you'll need to pass self.dropout_param to each #
# dropout forward pass. #
# #
# When using batch normalization, you'll need to pass self.bn_params[0] to #
# the forward pass for the first batch normalization layer, pass #
# self.bn_params[1] to the forward pass for the second batch normalization #
# layer, etc. #
############################################################################
out = X
cache_lst = []
for layer_index in range(self.num_layers):
weight_name = 'W' + str(layer_index + 1)
bias_name = 'b' + str(layer_index + 1)
if layer_index == self.num_layers - 1:
out, cache = affine_forward(out, self.params[weight_name], self.params[bias_name])
else:
if not self.use_batchnorm:
if not self.use_dropout:
out, cache = affine_relu_forward(out, self.params[weight_name],
self.params[bias_name])
else:
out, cache = affine_relu_dropout_forward(out, self.params[weight_name],
self.params[bias_name], self.dropout_param)
else:
gamma_name = 'gamma' + str(layer_index + 1)
beta_name = 'beta' + str(layer_index + 1)
if not self.use_dropout:
out, cache = affine_relu_batchnorm_forward(out, self.params[weight_name],
self.params[bias_name],
self.params[gamma_name], self.params[beta_name],
self.bn_params[layer_index])
else:
out, cache = affine_relu_batchnorm_dropout_forward(out, self.params[weight_name],
self.params[bias_name],
self.params[gamma_name],
self.params[beta_name],
self.bn_params[layer_index],
self.dropout_param)
cache_lst.append(cache)
############################################################################
# END OF YOUR CODE #
############################################################################
scores = out
# If test mode return early
if mode == 'test':
return scores
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the backward pass for the fully-connected net. Store the #
# loss in the loss variable and gradients in the grads dictionary. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
# #
# When using batch normalization, you don't need to regularize the scale #
# and shift parameters. #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
loss, dout = softmax_loss(scores, y)
regularization = 0.0
for i in range(self.num_layers):
regularization += np.sum(self.params['W' + str(i + 1)] * self.params['W' + str(i + 1)])
loss += 0.5 * self.reg * regularization
# last layer
for layer_index in reversed(range(self.num_layers)):
weight_name = 'W' + str(layer_index + 1)
bias_name = 'b' + str(layer_index + 1)
gamma_name = 'gamma' + str(layer_index + 1)
beta_name = 'beta' + str(layer_index + 1)
if layer_index == self.num_layers - 1:
dout, grads[weight_name], grads[bias_name] = affine_backward(dout, cache_lst[layer_index])
else:
if not self.use_batchnorm:
if not self.use_dropout:
dout, grads[weight_name], grads[bias_name] = affine_relu_backward(dout, cache_lst[layer_index])
else:
dout, grads[weight_name], grads[bias_name] = affine_relu_dropout_backward(dout, cache_lst[
layer_index])
else:
if not self.use_dropout:
dout, grads[weight_name], grads[bias_name], grads[gamma_name], grads[beta_name] = \
affine_relu_batchnorm_backward(dout, cache_lst[layer_index])
else:
dout, grads[weight_name], grads[bias_name], grads[gamma_name], grads[beta_name] = \
affine_relu_batchnorm_dropout_backward(dout, cache_lst[layer_index])
grads[weight_name] += self.reg * self.params[weight_name]
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
|
vermouth1992/deep-learning-playground
|
analysis/cs231n/classifiers/fc_net.py
|
Python
|
apache-2.0
| 19,060
|
[
"Gaussian"
] |
a2c4f019449667f7e5f3bdd26049fe2cc1881cbd7aee413a0ad87859dd007c75
|
#############################
##
## The Sire python module
##
## This contains the parts of the main Sire program
## that are exposed to Python.
##
## (C) Christopher Woods
##
_module_to_package = {}
def _install_package(name, package_registry):
"""Internal function used to install the module
called 'name', using the passed 'package_registry'
to find the package that contains the package
that contains this module"""
# get the directory containing the python executable,
# we will assume this will also contain 'conda', 'pip'
# or 'easy_install'
from os.path import realpath, dirname
from os import system
from sys import executable
binpath = dirname( realpath(executable) )
# ensure that we have the root package name
try:
package = name.split(".")[0]
except:
package = name
if package in package_registry:
package = package_registry[name]
try:
print("\nTrying to install %s from package %s using %s/conda...\n" \
% (name,package,binpath))
ok = system("%s/conda install %s -y" % (binpath,package))
if ok == 0:
# installed ok
return
except:
pass
try:
print("\nTrying to install %s from package %s using %s/pip...\n" \
% (name,package,binpath))
ok = system("%s/pip install %s" % (binpath,package))
if ok == 0:
# installed ok
return
except:
pass
try:
print("\nTrying to install %s from package %s using %s/easy_install...\n" \
% (name,package,binpath))
ok = system("%s/easy_install %s" % (binpath,package))
if ok == 0:
# installed ok
return
except:
pass
print("\nWARNING: Unable to install '%s' from package '%s'\n" \
% (name,package))
return
def try_import(name, package_registry=_module_to_package):
"""Try to import the module called 'name', returning
the loaded module as an argument. If the module
is not available, then it looks up the name of
the package to install using "package_registry"
(or if this is not available, using just the name
of the module). This will then be installed using
"conda", then "pip" then "easy_install" (first one
that works will return).
For example, use this via
sys = try_import("sys")
mdtraj = try_import("mdtraj")
Note that you can also rename modules, e.g. by using
md = try_import("mdtraj")
Note that you should use try_import_from if you
want only specific symbols, e.g.
(argv, stdout) = try_import_from("sys", ["argv","stdout"])
"""
try:
mod = __import__(name)
return mod
except:
pass
if not (package_registry is None):
_install_package(name, package_registry)
return try_import(name, package_registry=None)
raise ImportError("Failed to install module %s" % name)
def try_import_from(name, fromlist, package_registry=_module_to_package):
"""Try to import from the module called 'name' the passed symbol
(or list of symbols) contained in 'fromlist', returning
the symbol (or list of symbols).
If the module cannot be loaded, then the package containing
the module is looked up in 'module_to_package' (or just guessed
from the name if it does not exist in 'module_to_package'.
An attempt is made to load the package, using first conda,
then pip, then easy_install.
Example usage:
Mol = try_import_from("Sire", "Mol")
(argv,stdout = try_import_from("sys", ["argv", "stdout"])
mapReduce = try_import_from("scoop.Futures", "mapReduce")
ut = try_import_from("mdtraj", "utils")
"""
if isinstance(fromlist, str):
# we are importing only a single module - put
# this string into a list for the user
fromlist = [fromlist]
try:
nsyms = len(fromlist)
except:
return try_import(name, package_registry)
if nsyms == 0:
# just import the entire module
return try_import(name, package_registry)
is_loaded = False
try:
mod = __import__(name, globals(), locals(), fromlist)
is_loaded = True
except:
is_loaded = False
if not is_loaded:
if not (package_registry is None):
_install_package(name, package_registry)
return try_import_from(name, fromlist, package_registry=None)
else:
raise ImportError("Failed to install module '%s'" % name)
if nsyms == 1:
try:
return getattr(mod, fromlist[0])
except:
raise ImportError("Cannot find the symbol '%s' in module '%s'" \
% (fromlist[0],name) )
else:
ret = []
missing_symbols = []
for sym in fromlist:
try:
ret.append( getattr(mod, sym) )
except:
missing_symbols.append(sym)
if len(missing_symbols) > 0:
raise ImportError("Cannot find the following symbols in module '%s' : [ %s ]" \
% (name, ", ".join(missing_symbols)))
return ret
#ensure that the SireQt and SireError libraries are loaded as
#these are vital for the rest of the module
import Sire.Qt
import Sire.Error
import Sire.Config
__version__ = Sire.Config.__version__
def _versionString():
"""Return a nicely formatted string that describes the current Sire version"""
import Sire.Base
return """Sire %s [%s|%s, %s]""" % \
(Sire.Base.getReleaseVersion(),
Sire.Base.getRepositoryBranch(),
Sire.Config.sire_repository_version[0:7],
["unclean", "clean"][Sire.Base.getRepositoryVersionIsClean()])
Sire.Config.versionString = _versionString
sent_usage_data = None
def _getOSInfo():
import platform as _pf
data = {}
data["platform"] = _pf.system()
if _pf.system().startswith("Darwin"):
data["OS"] = _pf.mac_ver()[0]
elif _pf.system().startswith("Linux"):
ld = _pf.linux_distribution()
data["OS"] = "%s (%s %s)" % (ld[0],ld[1],ld[2])
else:
data["OS"] = "unknown"
u = _pf.uname()
data["uname"] = "%s | %s | %s | %s" % (u.system,u.release,u.machine,u.processor)
data["OS"] = "%s : %s"
# Now try to upload usage data to siremol.org
def _uploadUsageData():
try:
global sent_usage_data
if not sent_usage_data is None:
# don't send data twice
return
import time as _time
# wait a couple of seconds before uploading. This
# stops annoying uploads when people print help
_time.sleep(2)
import os as _os
if "SIRE_DONT_PHONEHOME" in _os.environ:
# respect user wish to not phone home
if not "SIRE_SILENT_PHONEHOME" in _os.environ:
print("\n=======================================================")
print("Respecting your privacy - not sending usage statistics.")
print("Please see http://siremol.org/analytics for more information.")
print("=======================================================\n")
return
else:
if not "SIRE_SILENT_PHONEHOME" in _os.environ:
print("\n==============================================================")
print("Sending anonymous Sire usage statistics to http://siremol.org.")
print("For more information, see http://siremol.org/analytics")
print("To disable, set the environment variable 'SIRE_DONT_PHONEHOME' to 1")
print("To see the information sent, set the environment variable ")
print("SIRE_VERBOSE_PHONEHOME equal to 1. To silence this message, set")
print("the environment variable SIRE_SILENT_PHONEHOME to 1.")
print("==============================================================\n")
from Sire.Base import CPUID as _CPUID
id = _CPUID()
data = {}
# get information about the processor
data["processor"] = id.brand()
data["vendor"] = id.vendor()
data["clockspeed"] = id.clockSpeed()
data["numcores"] = id.numCores()
# get information about the operating system
import platform as _pf
data["platform"] = _pf.system()
if _pf.system().startswith("Darwin"):
data["OS"] = _pf.mac_ver()[0]
elif _pf.system().startswith("Linux"):
ld = _pf.linux_distribution()
data["OS"] = "%s (%s %s)" % (ld[0],ld[1],ld[2])
elif _pf.system().startswith("Windows"):
ld = _pf.win32_ver()
data["OS"] = "%s (%s %s)" % (ld[0],ld[1],ld[2])
else:
data["OS"] = "unknown"
u = _pf.uname()
data["uname"] = "%s | %s | %s | %s" % (u.system,u.release,u.machine,u.processor)
# get information about the version of Sire
data["version"] = Sire.__version__
data["repository"] = Sire.Config.sire_repository_url
data["repository_version"] = Sire.Config.sire_repository_version
# now get information about which Sire app is running
import sys as _sys
# get the executable name, but make sure we don't get the path
# (as it may contain sensitive user information)
data["executable"] = _os.path.basename( _sys.executable )
import json as _json
import http.client as _htc
import urllib.parse as _parse
params = _parse.urlencode({'data' : _json.dumps(data)})
headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
if "SIRE_VERBOSE_PHONEHOME" in _os.environ:
print("Information sent to http://siremol.org is...")
keys = list(data.keys())
keys.sort()
for key in keys:
print(" -- %s == %s" % (key,data[key]))
print("\n")
sent_usage_data = data
conn = _htc.HTTPConnection("siremol.org")
conn.request("POST", "/phonehome/postusagestats.php", params, headers)
#r1 = conn.getresponse()
#print(r1.status, r1.reason)
#print(r1.read())
except:
# something went wrong - just ignore the error
# and cancel the phone home
return
sent_usage_data = None
if not sent_usage_data:
import threading as _threading
_thread = _threading.Thread(target=_uploadUsageData)
_thread.daemon = True
_thread.start()
|
chryswoods/Sire
|
wrapper/__init__.py
|
Python
|
gpl-2.0
| 10,800
|
[
"MDTraj"
] |
fc11d49977b841679e04d7f51a032a46e2d95d06b60cfdbe90a8964910919474
|
# Orca
#
# Copyright 2010-2011 The Orca Team
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Implements generic chat support."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010-2011 The Orca Team"
__license__ = "LGPL"
import pyatspi
from . import cmdnames
from . import debug
from . import guilabels
from . import input_event
from . import keybindings
from . import messages
from . import orca_state
from . import settings
from . import settings_manager
_settingsManager = settings_manager.getManager()
#############################################################################
# #
# Ring List. A fixed size circular list by Flavio Catalani #
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/435902 #
# #
# Included here to keep track of conversation histories. #
# #
#############################################################################
class RingList:
def __init__(self, length):
self.__data__ = []
self.__full__ = 0
self.__max__ = length
self.__cur__ = 0
def append(self, x):
if self.__full__ == 1:
for i in range (0, self.__cur__ - 1):
self.__data__[i] = self.__data__[i + 1]
self.__data__[self.__cur__ - 1] = x
else:
self.__data__.append(x)
self.__cur__ += 1
if self.__cur__ == self.__max__:
self.__full__ = 1
def get(self):
return self.__data__
def remove(self):
if (self.__cur__ > 0):
del self.__data__[self.__cur__ - 1]
self.__cur__ -= 1
def size(self):
return self.__cur__
def maxsize(self):
return self.__max__
def __str__(self):
return ''.join(self.__data__)
#############################################################################
# #
# Conversation #
# #
#############################################################################
class Conversation:
# The number of messages to keep in the history
#
MESSAGE_LIST_LENGTH = 9
def __init__(self, name, accHistory, inputArea=None):
"""Creates a new instance of the Conversation class.
Arguments:
- name: the chatroom/conversation name
- accHistory: the accessible which holds the conversation history
- inputArea: the editable text object for this conversation.
"""
self.name = name
self.accHistory = accHistory
self.inputArea = inputArea
# A cyclic list to hold the chat room history for this conversation
#
self._messageHistory = RingList(Conversation.MESSAGE_LIST_LENGTH)
# Initially populate the cyclic lists with empty strings.
#
i = 0
while i < self._messageHistory.maxsize():
self.addMessage("")
i += 1
# Keep track of the last typing status because some platforms (e.g.
# MSN) seem to issue the status constantly and even though it has
# not changed.
#
self._typingStatus = ""
def addMessage(self, message):
"""Adds the current message to the message history.
Arguments:
- message: A string containing the message to add
"""
self._messageHistory.append(message)
def getNthMessage(self, messageNumber):
"""Returns the specified message from the message history.
Arguments:
- messageNumber: the index of the message to get.
"""
messages = self._messageHistory.get()
return messages[messageNumber]
def getTypingStatus(self):
"""Returns the typing status of the buddy in this conversation."""
return self._typingStatus
def setTypingStatus(self, status):
"""Sets the typing status of the buddy in this conversation.
Arguments:
- status: a string describing the current status.
"""
self._typingStatus = status
#############################################################################
# #
# ConversationList #
# #
#############################################################################
class ConversationList:
def __init__(self, messageListLength):
"""Creates a new instance of the ConversationList class.
Arguments:
- messageListLength: the size of the message history to keep.
"""
self.conversations = []
# A cyclic list to hold the most recent (messageListLength) previous
# messages for all conversations in the ConversationList.
#
self._messageHistory = RingList(messageListLength)
# A corresponding cyclic list to hold the name of the conversation
# associated with each message in the messageHistory.
#
self._roomHistory = RingList(messageListLength)
# Initially populate the cyclic lists with empty strings.
#
i = 0
while i < self._messageHistory.maxsize():
self.addMessage("", None)
i += 1
def addMessage(self, message, conversation):
"""Adds the current message to the message history.
Arguments:
- message: A string containing the message to add
- conversation: The instance of the Conversation class with which
the message is associated
"""
if not conversation:
name = ""
else:
if not self.hasConversation(conversation):
self.addConversation(conversation)
name = conversation.name
self._messageHistory.append(message)
self._roomHistory.append(name)
def getNthMessageAndName(self, messageNumber):
"""Returns a list containing the specified message from the message
history and the name of the chatroom/conversation associated with
that message.
Arguments:
- messageNumber: the index of the message to get.
"""
messages = self._messageHistory.get()
rooms = self._roomHistory.get()
return messages[messageNumber], rooms[messageNumber]
def hasConversation(self, conversation):
"""Returns True if we know about this conversation.
Arguments:
- conversation: the conversation of interest
"""
return conversation in self.conversations
def getNConversations(self):
"""Returns the number of conversations we currently know about."""
return len(self.conversations)
def addConversation(self, conversation):
"""Adds conversation to the list of conversations.
Arguments:
- conversation: the conversation to add
"""
self.conversations.append(conversation)
def removeConversation(self, conversation):
"""Removes conversation from the list of conversations.
Arguments:
- conversation: the conversation to remove
Returns True if conversation was successfully removed.
"""
# TODO - JD: In the Pidgin script, I do not believe we handle the
# case where a conversation window is closed. I *think* it remains
# in the overall chat history. What do we want to do in that case?
# I would assume that we'd want to remove it.... So here's a method
# to do so. Nothing in the Chat class uses it yet.
#
try:
self.conversations.remove(conversation)
except:
return False
else:
return True
#############################################################################
# #
# Chat #
# #
#############################################################################
class Chat:
"""This class implements the chat functionality which is available to
scripts.
"""
def __init__(self, script, buddyListAncestries):
"""Creates an instance of the Chat class.
Arguments:
- script: the script with which this instance is associated.
- buddyListAncestries: a list of lists of pyatspi roles beginning
with the object serving as the actual buddy list (e.g.
ROLE_TREE_TABLE) and ending with the top level object (e.g.
ROLE_FRAME).
"""
self._script = script
self._buddyListAncestries = buddyListAncestries
# Keybindings to provide conversation message history. The message
# review order will be based on the index within the list. Thus F1
# is associated with the most recent message, F2 the message before
# that, and so on. A script could override this. Setting messageKeys
# to ["a", "b", "c" ... ] will cause "a" to be associated with the
# most recent message, "b" to be associated with the message before
# that, etc. Scripts can also override the messageKeyModifier.
#
self.messageKeys = \
["F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9"]
self.messageKeyModifier = keybindings.ORCA_MODIFIER_MASK
self.inputEventHandlers = {}
self.setupInputEventHandlers()
self.keyBindings = self.getKeyBindings()
# The length of the message history will be based on how many keys
# are bound to the task of providing it.
#
self.messageListLength = len(self.messageKeys)
self._conversationList = ConversationList(self.messageListLength)
# To make pylint happy.
#
self.focusedChannelRadioButton = None
self.allChannelsRadioButton = None
self.allMessagesRadioButton = None
self.buddyTypingCheckButton = None
self.chatRoomHistoriesCheckButton = None
self.speakNameCheckButton = None
def setupInputEventHandlers(self):
"""Defines InputEventHandler fields for chat functions which
will be used by the script associated with this chat instance."""
self.inputEventHandlers["togglePrefixHandler"] = \
input_event.InputEventHandler(
self.togglePrefix,
cmdnames.CHAT_TOGGLE_ROOM_NAME_PREFIX)
self.inputEventHandlers["toggleBuddyTypingHandler"] = \
input_event.InputEventHandler(
self.toggleBuddyTyping,
cmdnames.CHAT_TOGGLE_BUDDY_TYPING)
self.inputEventHandlers["toggleMessageHistoriesHandler"] = \
input_event.InputEventHandler(
self.toggleMessageHistories,
cmdnames.CHAT_TOGGLE_MESSAGE_HISTORIES)
self.inputEventHandlers["reviewMessage"] = \
input_event.InputEventHandler(
self.readPreviousMessage,
cmdnames.CHAT_PREVIOUS_MESSAGE)
return
def getKeyBindings(self):
"""Defines the chat-related key bindings which will be used by
the script associated with this chat instance.
Returns: an instance of keybindings.KeyBindings.
"""
keyBindings = keybindings.KeyBindings()
keyBindings.add(
keybindings.KeyBinding(
"",
keybindings.defaultModifierMask,
keybindings.NO_MODIFIER_MASK,
self.inputEventHandlers["togglePrefixHandler"]))
keyBindings.add(
keybindings.KeyBinding(
"",
keybindings.defaultModifierMask,
keybindings.NO_MODIFIER_MASK,
self.inputEventHandlers["toggleBuddyTypingHandler"]))
keyBindings.add(
keybindings.KeyBinding(
"",
keybindings.defaultModifierMask,
keybindings.NO_MODIFIER_MASK,
self.inputEventHandlers["toggleMessageHistoriesHandler"]))
for messageKey in self.messageKeys:
keyBindings.add(
keybindings.KeyBinding(
messageKey,
self.messageKeyModifier,
keybindings.ORCA_MODIFIER_MASK,
self.inputEventHandlers["reviewMessage"]))
return keyBindings
def getAppPreferencesGUI(self):
"""Return a GtkGrid containing the application unique configuration
GUI items for the current application. """
from gi.repository import Gtk
grid = Gtk.Grid()
grid.set_border_width(12)
label = guilabels.CHAT_SPEAK_ROOM_NAME
value = _settingsManager.getSetting('chatSpeakRoomName')
self.speakNameCheckButton = Gtk.CheckButton.new_with_mnemonic(label)
self.speakNameCheckButton.set_active(value)
grid.attach(self.speakNameCheckButton, 0, 0, 1, 1)
label = guilabels.CHAT_ANNOUNCE_BUDDY_TYPING
value = _settingsManager.getSetting('chatAnnounceBuddyTyping')
self.buddyTypingCheckButton = Gtk.CheckButton.new_with_mnemonic(label)
self.buddyTypingCheckButton.set_active(value)
grid.attach(self.buddyTypingCheckButton, 0, 1, 1, 1)
label = guilabels.CHAT_SEPARATE_MESSAGE_HISTORIES
value = _settingsManager.getSetting('chatRoomHistories')
self.chatRoomHistoriesCheckButton = \
Gtk.CheckButton.new_with_mnemonic(label)
self.chatRoomHistoriesCheckButton.set_active(value)
grid.attach(self.chatRoomHistoriesCheckButton, 0, 2, 1, 1)
messagesFrame = Gtk.Frame()
grid.attach(messagesFrame, 0, 3, 1, 1)
label = Gtk.Label("<b>%s</b>" % guilabels.CHAT_SPEAK_MESSAGES_FROM)
label.set_use_markup(True)
messagesFrame.set_label_widget(label)
messagesAlignment = Gtk.Alignment.new(0.5, 0.5, 1, 1)
messagesAlignment.set_padding(0, 0, 12, 0)
messagesFrame.add(messagesAlignment)
messagesGrid = Gtk.Grid()
messagesAlignment.add(messagesGrid)
value = _settingsManager.getSetting('chatMessageVerbosity')
label = guilabels.CHAT_SPEAK_MESSAGES_ALL
rb1 = Gtk.RadioButton.new_with_mnemonic(None, label)
rb1.set_active(value == settings.CHAT_SPEAK_ALL)
self.allMessagesRadioButton = rb1
messagesGrid.attach(self.allMessagesRadioButton, 0, 0, 1, 1)
label = guilabels.CHAT_SPEAK_MESSAGES_ACTIVE
rb2 = Gtk.RadioButton.new_with_mnemonic(None, label)
rb2.join_group(rb1)
rb2.set_active(value == settings.CHAT_SPEAK_FOCUSED_CHANNEL)
self.focusedChannelRadioButton = rb2
messagesGrid.attach(self.focusedChannelRadioButton, 0, 1, 1, 1)
label = guilabels.CHAT_SPEAK_MESSAGES_ALL_IF_FOCUSED % \
self._script.app.name
rb3 = Gtk.RadioButton.new_with_mnemonic(None, label)
rb3.join_group(rb1)
rb3.set_active(value == settings.CHAT_SPEAK_ALL_IF_FOCUSED)
self.allChannelsRadioButton = rb3
messagesGrid.attach(self.allChannelsRadioButton, 0, 2, 1, 1)
grid.show_all()
return grid
def getPreferencesFromGUI(self):
"""Returns a dictionary with the app-specific preferences."""
if self.allChannelsRadioButton.get_active():
verbosity = settings.CHAT_SPEAK_ALL_IF_FOCUSED
elif self.focusedChannelRadioButton.get_active():
verbosity = settings.CHAT_SPEAK_FOCUSED_CHANNEL
else:
verbosity = settings.CHAT_SPEAK_ALL
return {
'chatMessageVerbosity': verbosity,
'chatSpeakRoomName': self.speakNameCheckButton.get_active(),
'chatAnnounceBuddyTyping': self.buddyTypingCheckButton.get_active(),
'chatRoomHistories': self.chatRoomHistoriesCheckButton.get_active(),
}
########################################################################
# #
# InputEvent handlers and supporting utilities #
# #
########################################################################
def togglePrefix(self, script, inputEvent):
""" Toggle whether we prefix chat room messages with the name of
the chat room.
Arguments:
- script: the script associated with this event
- inputEvent: if not None, the input event that caused this action.
"""
line = messages.CHAT_ROOM_NAME_PREFIX_ON
speakRoomName = _settingsManager.getSetting('chatSpeakRoomName')
_settingsManager.setSetting('chatSpeakRoomName', not speakRoomName)
if speakRoomName:
line = messages.CHAT_ROOM_NAME_PREFIX_OFF
self._script.presentMessage(line)
return True
def toggleBuddyTyping(self, script, inputEvent):
""" Toggle whether we announce when our buddies are typing a message.
Arguments:
- script: the script associated with this event
- inputEvent: if not None, the input event that caused this action.
"""
line = messages.CHAT_BUDDY_TYPING_ON
announceTyping = _settingsManager.getSetting('chatAnnounceBuddyTyping')
_settingsManager.setSetting(
'chatAnnounceBuddyTyping', not announceTyping)
if announceTyping:
line = messages.CHAT_BUDDY_TYPING_OFF
self._script.presentMessage(line)
return True
def toggleMessageHistories(self, script, inputEvent):
""" Toggle whether we provide chat room specific message histories.
Arguments:
- script: the script associated with this event
- inputEvent: if not None, the input event that caused this action.
"""
line = messages.CHAT_SEPARATE_HISTORIES_ON
roomHistories = _settingsManager.getSetting('chatRoomHistories')
_settingsManager.setSetting('chatRoomHistories', not roomHistories)
if roomHistories:
line = messages.CHAT_SEPARATE_HISTORIES_OFF
self._script.presentMessage(line)
return True
def readPreviousMessage(self, script, inputEvent=None, index=0):
""" Speak/braille a previous chat room message.
Arguments:
- script: the script associated with this event
- inputEvent: if not None, the input event that caused this action.
- index: The index of the message to read -- by default, the most
recent message. If we get an inputEvent, however, the value of
index is ignored and the index of the event_string with respect
to self.messageKeys is used instead.
"""
try:
index = self.messageKeys.index(inputEvent.event_string)
except:
pass
messageNumber = self.messageListLength - (index + 1)
message, chatRoomName = None, None
if _settingsManager.getSetting('chatRoomHistories'):
conversation = self.getConversation(orca_state.locusOfFocus)
if conversation:
message = conversation.getNthMessage(messageNumber)
chatRoomName = conversation.name
else:
message, chatRoomName = \
self._conversationList.getNthMessageAndName(messageNumber)
if message and chatRoomName:
self.utterMessage(chatRoomName, message, True)
def utterMessage(self, chatRoomName, message, focused=True):
""" Speak/braille a chat room message.
Arguments:
- chatRoomName: name of the chat room this message came from
- message: the chat room message
- focused: whether or not the current chatroom has focus. Defaults
to True so that we can use this method to present chat history
as well as incoming messages.
"""
# Only speak/braille the new message if it matches how the user
# wants chat messages spoken.
#
verbosity = _settingsManager.getAppSetting(self._script.app, 'chatMessageVerbosity')
if orca_state.activeScript.name != self._script.name \
and verbosity == settings.CHAT_SPEAK_ALL_IF_FOCUSED:
return
elif not focused and verbosity == settings.CHAT_SPEAK_FOCUSED_CHANNEL:
return
text = ""
if chatRoomName and \
_settingsManager.getAppSetting(self._script.app, 'chatSpeakRoomName'):
text = messages.CHAT_MESSAGE_FROM_ROOM % chatRoomName
if not settings.presentChatRoomLast:
text = self._script.utilities.appendString(text, message)
else:
text = self._script.utilities.appendString(message, text)
if len(text.strip()):
voice = self._script.speechGenerator.voice(string=text)
self._script.speakMessage(text, voice=voice)
self._script.displayBrailleMessage(text)
def getMessageFromEvent(self, event):
"""Get the actual displayed message. This will almost always be the
unaltered any_data from an event of type object:text-changed:insert.
Arguments:
- event: the Event from which to take the text.
Returns the string which should be presented as the newly-inserted
text. (Things like chatroom name prefacing get handled elsewhere.)
"""
return event.any_data
def presentInsertedText(self, event):
"""Gives the Chat class an opportunity to present the text from the
text inserted Event.
Arguments:
- event: the text inserted Event
Returns True if we handled this event here; otherwise False, which
tells the associated script that is not a chat event that requires
custom handling.
"""
if not event \
or not event.type.startswith("object:text-changed:insert") \
or not event.any_data:
return False
if self.isGenericTextObject(event.source):
# The script should handle non-chat specific text areas (e.g.,
# adding a new account).
#
return False
elif self.isInBuddyList(event.source):
# These are status changes. What the Pidgin script currently
# does for these is ignore them. It might be nice to add
# some options to allow the user to customize what status
# changes are presented. But for now, we'll ignore them
# across the board.
#
return True
elif self.isTypingStatusChangedEvent(event):
self.presentTypingStatusChange(event, event.any_data)
return True
elif self.isChatRoomMsg(event.source):
# We always automatically go back to focus tracking mode when
# someone sends us a message.
#
if self._script.flatReviewContext:
self._script.toggleFlatReviewMode()
if self.isNewConversation(event.source):
name = self.getChatRoomName(event.source)
conversation = Conversation(name, event.source)
else:
conversation = self.getConversation(event.source)
name = conversation.name
message = self.getMessageFromEvent(event).strip("\n")
if message:
self.addMessageToHistory(message, conversation)
# The user may or may not want us to present this message. Also,
# don't speak the name if it's the focused chat.
#
focused = self.isFocusedChat(event.source)
if focused:
name = ""
if message:
self.utterMessage(name, message, focused)
return True
elif self.isAutoCompletedTextEvent(event):
text = event.any_data
voice = self._script.speechGenerator.voice(string=text)
self._script.speakMessage(text, voice=voice)
return True
return False
def presentTypingStatusChange(self, event, status):
"""Presents a change in typing status for the current conversation
if the status has indeed changed and if the user wants to hear it.
Arguments:
- event: the accessible Event
- status: a string containing the status change
Returns True if we spoke the change; False otherwise
"""
if _settingsManager.getSetting('chatAnnounceBuddyTyping'):
conversation = self.getConversation(event.source)
if conversation and (status != conversation.getTypingStatus()):
voice = self._script.speechGenerator.voice(string=status)
self._script.speakMessage(status, voice=voice)
conversation.setTypingStatus(status)
return True
return False
def addMessageToHistory(self, message, conversation):
"""Adds message to both the individual conversation's history
as well as to the complete history stored in our conversation
list.
Arguments:
- message: a string containing the message to be added
- conversation: the instance of the Conversation class to which
this message belongs
"""
conversation.addMessage(message)
self._conversationList.addMessage(message, conversation)
########################################################################
# #
# Convenience methods for identifying, locating different accessibles #
# #
########################################################################
def isGenericTextObject(self, obj):
"""Returns True if the given accessible seems to be something
unrelated to the custom handling we're attempting to do here.
Arguments:
- obj: the accessible object to examine.
"""
state = obj.getState()
if state.contains(pyatspi.STATE_EDITABLE) \
and state.contains(pyatspi.STATE_SINGLE_LINE):
return True
return False
def isBuddyList(self, obj):
"""Returns True if obj is the list of buddies in the buddy list
window. Note that this method relies upon a hierarchical check,
using a list of hierarchies provided by the script. Scripts
which have more reliable means of identifying the buddy list
can override this method.
Arguments:
- obj: the accessible being examined
"""
if obj:
for roleList in self._buddyListAncestries:
if self._script.utilities.hasMatchingHierarchy(obj, roleList):
return True
return False
def isInBuddyList(self, obj, includeList=True):
"""Returns True if obj is, or is inside of, the buddy list.
Arguments:
- obj: the accessible being examined
- includeList: whether or not the list itself should be
considered "in" the buddy list.
"""
if includeList and self.isBuddyList(obj):
return True
for roleList in self._buddyListAncestries:
buddyListRole = roleList[0]
candidate = self._script.utilities.ancestorWithRole(
obj, [buddyListRole], [pyatspi.ROLE_FRAME])
if self.isBuddyList(candidate):
return True
return False
def isNewConversation(self, obj):
"""Returns True if the given accessible is the chat history
associated with a new conversation.
Arguments:
- obj: the accessible object to examine.
"""
conversation = self.getConversation(obj)
return not self._conversationList.hasConversation(conversation)
def getConversation(self, obj):
"""Attempts to locate the conversation associated with obj.
Arguments:
- obj: the accessible of interest
Returns the conversation if found; None otherwise
"""
if not obj:
return None
name = ""
# TODO - JD: If we have multiple chats going on and those
# chats have the same name, and we're in the input area,
# this approach will fail. What I should probably do instead
# is, upon creation of a new conversation, figure out where
# the input area is and save it. For now, I just want to get
# things working. And people should not be in multiple chat
# rooms with identical names anyway. :-)
#
if obj.getRole() in [pyatspi.ROLE_TEXT, pyatspi.ROLE_ENTRY] \
and obj.getState().contains(pyatspi.STATE_EDITABLE):
name = self.getChatRoomName(obj)
for conversation in self._conversationList.conversations:
if name:
if name == conversation.name:
return conversation
# Doing an equality check seems to be preferable here to
# utilities.isSameObject as a result of false positives.
#
elif obj == conversation.accHistory:
return conversation
return None
def isChatRoomMsg(self, obj):
"""Returns True if the given accessible is the text object for
associated with a chat room conversation.
Arguments:
- obj: the accessible object to examine.
"""
if obj and obj.getRole() == pyatspi.ROLE_TEXT \
and obj.parent.getRole() == pyatspi.ROLE_SCROLL_PANE:
state = obj.getState()
if not state.contains(pyatspi.STATE_EDITABLE) \
and state.contains(pyatspi.STATE_MULTI_LINE):
return True
return False
def isFocusedChat(self, obj):
"""Returns True if we plan to treat this chat as focused for
the purpose of deciding whether or not a message should be
presented to the user.
Arguments:
- obj: the accessible object to examine.
"""
if obj and obj.getState().contains(pyatspi.STATE_SHOWING):
active = self._script.utilities.topLevelObjectIsActiveAndCurrent(obj)
msg = "INFO: %s's window is focused chat: %s" % (obj, active)
debug.println(debug.LEVEL_INFO, msg, True)
return active
msg = "INFO: %s is not focused chat (not showing)" % obj
debug.println(debug.LEVEL_INFO, msg, True)
return False
def getChatRoomName(self, obj):
"""Attempts to find the name of the current chat room.
Arguments:
- obj: The accessible of interest
Returns a string containing what we think is the chat room name.
"""
# Most of the time, it seems that the name can be found in the
# page tab which is the ancestor of the chat history. Failing
# that, we'll look at the frame name. Failing that, scripts
# should override this method. :-)
#
ancestor = self._script.utilities.ancestorWithRole(
obj,
[pyatspi.ROLE_PAGE_TAB, pyatspi.ROLE_FRAME],
[pyatspi.ROLE_APPLICATION])
name = ""
try:
text = self._script.utilities.displayedText(ancestor)
if text.lower().strip() != self._script.name.lower().strip():
name = text
except:
pass
# Some applications don't trash their page tab list when there is
# only one active chat, but instead they remove the text or hide
# the item. Therefore, we'll give it one more shot.
#
if not name:
ancestor = self._script.utilities.ancestorWithRole(
ancestor, [pyatspi.ROLE_FRAME], [pyatspi.ROLE_APPLICATION])
try:
text = self._script.utilities.displayedText(ancestor)
if text.lower().strip() != self._script.name.lower().strip():
name = text
except:
pass
return name
def isAutoCompletedTextEvent(self, event):
"""Returns True if event is associated with text being autocompleted.
Arguments:
- event: the accessible event being examined
"""
if event.source.getRole() != pyatspi.ROLE_TEXT:
return False
lastKey, mods = self._script.utilities.lastKeyAndModifiers()
if lastKey == "Tab" and event.any_data and event.any_data != "\t":
return True
return False
def isTypingStatusChangedEvent(self, event):
"""Returns True if event is associated with a change in typing status.
Arguments:
- event: the accessible event being examined
"""
# TODO - JD: I still need to figure this one out. Pidgin seems to
# no longer be presenting this change in the conversation history
# as it was doing before. And I'm not yet sure what other apps do.
# In the meantime, scripts can override this.
#
return False
|
GNOME/orca
|
src/orca/chat.py
|
Python
|
lgpl-2.1
| 34,518
|
[
"ORCA"
] |
6259966fb721b4361281900d66e33954a885459be7eb976462c748039654be7a
|
"""
"""
import os
from tempfile import tempdir
from pulsar.manager_factory import build_managers
from pulsar.cache import Cache
from pulsar.tools import ToolBox
from pulsar.tools.authorization import get_authorizer
from pulsar import messaging
from galaxy.objectstore import build_object_store_from_config
from galaxy.tools.deps import DependencyManager
from galaxy.jobs.metrics import JobMetrics
from galaxy.util.bunch import Bunch
from logging import getLogger
log = getLogger(__name__)
DEFAULT_PRIVATE_TOKEN = None
DEFAULT_FILES_DIRECTORY = "files"
DEFAULT_STAGING_DIRECTORY = os.path.join(DEFAULT_FILES_DIRECTORY, "staging")
DEFAULT_PERSISTENCE_DIRECTORY = os.path.join(DEFAULT_FILES_DIRECTORY, "persisted_data")
NOT_WHITELIST_WARNING = "Starting the Pulsar without a toolbox to white-list." + \
"Ensure this application is protected by firewall or a configured private token."
class PulsarApp(object):
def __init__(self, **conf):
if conf is None:
conf = {}
self.__setup_staging_directory(conf.get("staging_directory", DEFAULT_STAGING_DIRECTORY))
self.__setup_private_token(conf.get("private_token", DEFAULT_PRIVATE_TOKEN))
self.__setup_persistence_directory(conf.get("persistence_directory", None))
self.__setup_tool_config(conf)
self.__setup_object_store(conf)
self.__setup_dependency_manager(conf)
self.__setup_job_metrics(conf)
self.__setup_managers(conf)
self.__setup_file_cache(conf)
self.__setup_bind_to_message_queue(conf)
self.__recover_jobs()
self.ensure_cleanup = conf.get("ensure_cleanup", False)
def shutdown(self, timeout=None):
for manager in self.managers.values():
try:
manager.shutdown(timeout)
except Exception:
pass
if self.__queue_state:
self.__queue_state.deactivate()
if self.ensure_cleanup:
self.__queue_state.join(timeout)
def __setup_bind_to_message_queue(self, conf):
message_queue_url = conf.get("message_queue_url", None)
queue_state = None
if message_queue_url:
queue_state = messaging.bind_app(self, message_queue_url, conf)
self.__queue_state = queue_state
def __setup_tool_config(self, conf):
"""
Setups toolbox object and authorization mechanism based
on supplied toolbox_path.
"""
tool_config_files = conf.get("tool_config_files", None)
if not tool_config_files:
# For compatibity with Galaxy, allow tool_config_file
# option name.
tool_config_files = conf.get("tool_config_file", None)
toolbox = None
if tool_config_files:
toolbox = ToolBox(tool_config_files)
else:
log.info(NOT_WHITELIST_WARNING)
self.toolbox = toolbox
self.authorizer = get_authorizer(toolbox)
def __setup_staging_directory(self, staging_directory):
self.staging_directory = os.path.abspath(staging_directory)
def __setup_managers(self, conf):
self.managers = build_managers(self, conf)
def __recover_jobs(self):
for manager in self.managers.values():
manager.recover_active_jobs()
def __setup_private_token(self, private_token):
self.private_token = private_token
if private_token:
log.info("Securing Pulsar web app with private key, please verify you are using HTTPS so key cannot be obtained by monitoring traffic.")
def __setup_persistence_directory(self, persistence_directory):
persistence_directory = persistence_directory or DEFAULT_PERSISTENCE_DIRECTORY
if persistence_directory == "__none__":
persistence_directory = None
self.persistence_directory = persistence_directory
def __setup_file_cache(self, conf):
file_cache_dir = conf.get('file_cache_dir', None)
self.file_cache = Cache(file_cache_dir) if file_cache_dir else None
def __setup_object_store(self, conf):
if "object_store_config_file" not in conf:
self.object_store = None
return
object_store_config = Bunch(
object_store_config_file=conf['object_store_config_file'],
file_path=conf.get("object_store_file_path", None),
object_store_check_old_style=False,
job_working_directory=conf.get("object_store_job_working_directory", None),
new_file_path=conf.get("object_store_new_file_path", tempdir),
umask=int(conf.get("object_store_umask", "0000")),
)
self.object_store = build_object_store_from_config(object_store_config)
def __setup_dependency_manager(self, conf):
dependencies_dir = conf.get("tool_dependency_dir", "dependencies")
resolvers_config_file = conf.get("dependency_resolvers_config_file", "dependency_resolvers_conf.xml")
self.dependency_manager = DependencyManager(dependencies_dir, resolvers_config_file)
def __setup_job_metrics(self, conf):
job_metrics_config_file = conf.get("job_metrics_config_file", "job_metrics_conf.xml")
self.job_metrics = JobMetrics(job_metrics_config_file)
@property
def only_manager(self):
# convience method for tests, etc... where when we know there
# is only one manager.
assert len(self.managers) == 1
return list(self.managers.values())[0]
|
ssorgatem/pulsar
|
pulsar/core.py
|
Python
|
apache-2.0
| 5,506
|
[
"Galaxy"
] |
1e4857506e41c8bfcdc20c569890ef9d9a66a4c7a2e2e5e387572532b0d36edd
|
# (C) British Crown Copyright 2013 - 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the :mod:`iris.fileformats.netcdf` module."""
from __future__ import (absolute_import, division, print_function)
|
Jozhogg/iris
|
lib/iris/tests/unit/fileformats/netcdf/__init__.py
|
Python
|
lgpl-3.0
| 850
|
[
"NetCDF"
] |
1a8a7828d2ee90251e3f424ec38207f8390434653e3f68a1ad13fcf1184498ef
|
#!/usr/bin/env python
"""
PolyGA - version 1.2b - Last modified: 30-DEC-2013
Usage:
python polyga.py -h
python polyga.py -a feature -c <filename> -i <filename> -g <filename> -s <filename> -o <file prefix>
python polyga.py -a feature -c <filename> -d <HDF5 filename> -o <file prefix>
Copyright (C) 2013 Michael Mooney
This file is part of PolyGA.
PolyGA is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PolyGA is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program (see the file COPYING).
If not, see <http://www.gnu.org/licenses/>.
Send bug reports, requests, etc. to mooneymi@ohsu.edu
"""
def polyga_main(cmd, opts):
"""
"""
if cmd == 'ga_main':
params = polyga_utils.process_config(opts['conf'])
polyga_core.ga_main(opts, params)
elif cmd == 'results':
polyga_utils.results_table(opts['results'], opts['analysis'], opts['threshold'])
elif cmd == 'performance_plot':
polyga_utils.results_table(opts['results'], opts['analysis'], opts['threshold'])
polyga_utils.performance_plot(opts['results'], opts['analysis'])
elif cmd == 'network_plot':
polyga_utils.network_plot(opts['data'], opts['results'], opts['analysis'], opts['generation'], opts['group'])
elif cmd == 'cytoscape':
polyga_utils.cytoscape_output(opts['data'], opts['results'], opts['analysis'], opts['generation'], opts['group'], opts['threshold'])
elif cmd == 'kegg':
polyga_utils.kegg_path_plot(opts['results'], opts['analysis'], opts['threshold'], opts['kegg'], opts['out'])
else:
print "There was an error processing the command-line options!\n"
return None
if __name__ == '__main__':
try:
import argparse
import datetime
import polyga_utils
import polyga_core
except ImportError as imperr:
print "Error: one or more required modules failed to load.\n"
raise ImportError(str(imperr))
else:
parser = argparse.ArgumentParser(description="Performs a search to identify polygenic signatures associated with a trait of interest. Candidate feature sets are identified by searching a gene-gene interaction network using a Genetic Algorithm, which is guided by user-supplied expert knowledge.")
parser.add_argument('-a', '--analysis', required=False, default='feature', choices=['feature', 'gene'], help="The level (either 'gene' or 'feature') at which to perform the search.")
parser.add_argument('-c', '--conf', required=False, help="Location of the GA configuration file containing the algorithm parameters.")
parser.add_argument('-d', '--data', required=False, help="Location of data in HDF5 format. This option assumes HDF5 format for output also.")
parser.add_argument('-i', '--interactions', required=False, help="Location of the gene-gene interactions file.")
parser.add_argument('-f', '--features', required=False, help="Location of the gene-feature map file.")
parser.add_argument('-g', '--genes', required=False, help="Location of the gene annotation file.")
parser.add_argument('-o', '--out', required=False, help="Location and prefix to save the data and results.")
parser.add_argument('-r', '--results', required=False, help="Location of the polyGA results file. This option will produce a tab-delimited file containing the top hits identified by the algorithm (p-value less than the threshold specified with the '-t' option).")
parser.add_argument('-t', '--threshold', required=False, type=float, help="The fitness threshold for reported feature groups (default = 5e-5). The results file must be specified with '-r'.")
parser.add_argument('-p', '--plot', required=False, action='store_true', default=False, help="Will produce a plot (as a PDF) of the GA performance (-log10(fitness) vs. generation). The results file must be specified with '-r'.")
parser.add_argument('-n', '--network', required=False, action='store_true', default=False, help="Will produce a network plot (as a PDF) of the specified feature group. Two integers, indicating the generation and group number of the group to be plotted, must be provided. Both a data file and a results file must also be specified with the '-d' and '-r' options.")
parser.add_argument('generation', type=int, help="The generation of the feature group to be plotted. Required only for the network plot '-n' option.")
parser.add_argument('group', type=int, help="The group number of the feature group to be plotted. Required only for the network plot '-n' option.")
parser.add_argument('-cyto', '--cytoscape', required=False, action='store_true', default=False, help="Will produce files that can be imported into Cytoscape. This will allow visualization of significant variant interactions within the context of the gene-gene interaction network. Currently this option can only be used for analyses investigating variant groups of size 2. Results and data files must be specified with the '-r' and '-d' options. Also, an output file prefix and a fitness threshold must be specified with the '-o' and '-t' options.")
parser.add_argument('-k', '--kegg', required=False, help="Location of the KEGG pathway XML file. This option will produce a file that can be used with the 'plotKEGGpathway.r' R script to produce a pathway figure annotated with association results. Currently this option can only be used for analyses investigating variant groups of size 2. A results file, an output file prefix and a fitness threshold must be specified with the '-r', '-o' and '-t' options.")
args_obj = parser.parse_args()
args = vars(args_obj)
print "\npolyGA (version 1.0b)\n"
start_time = datetime.datetime.now()
print "Start date and time: "+start_time.strftime("%Y-%m-%d %H:%M")+"\n"
cmd, opts = polyga_utils.process_options(args)
polyga_main(cmd, opts)
end_time = datetime.datetime.now()
print "End date and time: "+end_time.strftime("%Y-%m-%d %H:%M")+"\n"
elapsed_time = end_time - start_time
print "Elapsed time: "+str(elapsed_time)+"\n"
|
mooneymi/polyga
|
polyga.py
|
Python
|
gpl-3.0
| 6,315
|
[
"Cytoscape"
] |
756e016b46744f91fa22cf1ca7905d2322f956b24bd82fa2542d2d0eb98e781e
|
#############################################################################
# Code for managing and training a variational Iterative Refinement Model. #
#############################################################################
# basic python
import numpy as np
import numpy.random as npr
from collections import OrderedDict
# theano business
import theano
import theano.tensor as T
#from theano.tensor.shared_randomstreams import RandomStreams as RandStream
from theano.sandbox.cuda.rng_curand import CURAND_RandomStreams as RandStream
# phil's sweetness
from NetLayers import HiddenLayer, DiscLayer, relu_actfun, softplus_actfun, \
apply_mask
from InfNet import InfNet
from DKCode import get_adam_updates, get_adadelta_updates
from LogPDFs import log_prob_bernoulli, log_prob_gaussian2, gaussian_kld
from HelperFuncs import to_fX
#
# Important symbolic variables:
# Xd: Xd represents input at the "data variables" of the inferencer
#
class MultiStageModel(object):
"""
Controller for training a multi-step iterative refinement model.
Parameters:
rng: numpy.random.RandomState (for reproducibility)
x_in: symbolic "data" input to this MultiStageModel
x_out: symbolic "target" output for this MultiStageModel
x_mask: symbolic binary "mask" describing known/missing target values
p_s0_obs_given_z_obs: InfNet for s0 given z_obs
p_hi_given_si: InfNet for hi given si
p_sip1_given_si_hi: InfNet for sip1 given si and hi
p_x_given_si_hi: InfNet for x given si and hi
q_z_given_x: InfNet for z given x
q_hi_given_x_si: InfNet for hi given x and si
model_init_obs: whether to use a model-based initial obs state
obs_dim: dimension of the observations to generate
z_dim: dimension of the "initial" latent space
h_dim: dimension of the "primary" latent space
ir_steps: number of "iterative refinement" steps to perform
params: REQUIRED PARAMS SHOWN BELOW
x_type: can be "bernoulli" or "gaussian"
obs_transform: can be 'none' or 'sigmoid'
"""
def __init__(self, rng=None, x_in=None, \
p_s0_obs_given_z_obs=None, p_hi_given_si=None, p_sip1_given_si_hi=None, \
p_x_given_si_hi=None, q_z_given_x=None, q_hi_given_x_si=None, \
obs_dim=None, z_dim=None, h_dim=None, \
model_init_obs=True, ir_steps=2, \
params=None):
# setup a rng for this GIPair
self.rng = RandStream(rng.randint(100000))
# TODO: implement functionality for working with "latent" si
assert(p_x_given_si_hi is None)
# decide whether to initialize from a model or from a "constant"
self.model_init_obs = model_init_obs
# grab the user-provided parameters
self.params = params
self.x_type = self.params['x_type']
assert((self.x_type == 'bernoulli') or (self.x_type == 'gaussian'))
if 'obs_transform' in self.params:
assert((self.params['obs_transform'] == 'sigmoid') or \
(self.params['obs_transform'] == 'none'))
if self.params['obs_transform'] == 'sigmoid':
self.obs_transform = lambda x: T.nnet.sigmoid(x)
else:
self.obs_transform = lambda x: x
else:
self.obs_transform = lambda x: T.nnet.sigmoid(x)
if self.x_type == 'bernoulli':
self.obs_transform = lambda x: T.nnet.sigmoid(x)
# record the dimensions of various spaces relevant to this model
self.obs_dim = obs_dim
self.z_dim = z_dim
self.h_dim = h_dim
self.ir_steps = ir_steps
# record the symbolic variables that will provide inputs to the
# computation graph created to describe this MultiStageModel
self.x = x_in
self.batch_reps = T.lscalar()
# setup switching variable for changing between sampling/training
zero_ary = np.zeros((1,)).astype(theano.config.floatX)
self.train_switch = theano.shared(value=zero_ary, name='msm_train_switch')
self.set_train_switch(1.0)
# setup a weight for pulling priors over hi given si towards a
# shared global prior -- e.g. zero mean and unit variance.
self.kzg_weight = theano.shared(value=zero_ary, name='msm_kzg_weight')
self.set_kzg_weight(0.1)
# this weight balances l1 vs. l2 penalty on posterior KLds
self.l1l2_weight = theano.shared(value=zero_ary, name='msm_l1l2_weight')
self.set_l1l2_weight(1.0)
# this parameter controls dropout rate in the generator read function
self.drop_rate = theano.shared(value=zero_ary, name='msm_drop_rate')
self.set_drop_rate(0.0)
#############################
# Setup self.z and self.s0. #
#############################
print("Building MSM step 0...")
obs_scale = 0.0
if self.model_init_obs: # initialize obs state from generative model
obs_scale = 1.0
self.q_z_given_x = q_z_given_x.shared_param_clone(rng=rng, Xd=self.x)
self.z = self.q_z_given_x.output
self.p_s0_obs_given_z_obs = p_s0_obs_given_z_obs.shared_param_clone( \
rng=rng, Xd=self.z)
_s0_obs_model = self.p_s0_obs_given_z_obs.output_mean
_s0_obs_const = self.p_s0_obs_given_z_obs.mu_layers[-1].b
self.s0_obs = (obs_scale * _s0_obs_model) + \
((1.0 - obs_scale) * _s0_obs_const)
self.output_logvar = self.p_s0_obs_given_z_obs.sigma_layers[-1].b
self.bounded_logvar = 8.0 * T.tanh((1.0/8.0) * self.output_logvar)
###############################################################
# Setup the iterative refinement loop, starting from self.s0. #
###############################################################
self.p_hi_given_si = [] # holds p_hi_given_si for each i
self.p_sip1_given_si_hi = [] # holds p_sip1_given_si_hi for each i
self.q_hi_given_x_si = [] # holds q_hi_given_x_si for each i
self.si = [self.s0_obs] # holds si for each i
self.hi = [] # holds hi for each i
for i in range(self.ir_steps):
print("Building MSM step {0:d}...".format(i+1))
si_obs = self.si[i]
# get samples of next hi, conditioned on current si
self.p_hi_given_si.append( \
p_hi_given_si.shared_param_clone(rng=rng, \
Xd=self.obs_transform(si_obs)))
hi_p = self.p_hi_given_si[i].output
# now we build the model for variational hi given si
grad_ll = self.x - self.obs_transform(si_obs)
self.q_hi_given_x_si.append(\
q_hi_given_x_si.shared_param_clone(rng=rng, \
Xd=T.horizontal_stack( \
grad_ll, self.obs_transform(si_obs))))
hi_q = self.q_hi_given_x_si[i].output
# make hi samples that can be switched between hi_p and hi_q
self.hi.append( ((self.train_switch[0] * hi_q) + \
((1.0 - self.train_switch[0]) * hi_p)) )
# p_sip1_given_si_hi is conditioned on hi.
self.p_sip1_given_si_hi.append( \
p_sip1_given_si_hi.shared_param_clone(rng=rng, \
Xd=self.hi[i]))
# construct the update from si_obs to sip1_obs
sip1_obs = si_obs + self.p_sip1_given_si_hi[i].output_mean
# record the updated state of the generative process
self.si.append(sip1_obs)
######################################################################
# ALL SYMBOLIC VARS NEEDED FOR THE OBJECTIVE SHOULD NOW BE AVAILABLE #
######################################################################
# shared var learning rate for generator and inferencer
zero_ary = np.zeros((1,)).astype(theano.config.floatX)
self.lr_1 = theano.shared(value=zero_ary, name='msm_lr_1')
self.lr_2 = theano.shared(value=zero_ary, name='msm_lr_2')
# shared var momentum parameters for generator and inferencer
self.mom_1 = theano.shared(value=zero_ary, name='msm_mom_1')
self.mom_2 = theano.shared(value=zero_ary, name='msm_mom_2')
# init parameters for controlling learning dynamics
self.set_sgd_params()
# init shared var for weighting nll of data given posterior sample
self.lam_nll = theano.shared(value=zero_ary, name='msm_lam_nll')
self.set_lam_nll(lam_nll=1.0)
# init shared var for weighting prior kld against reconstruction
self.lam_kld_1 = theano.shared(value=zero_ary, name='msm_lam_kld_1')
self.lam_kld_2 = theano.shared(value=zero_ary, name='msm_lam_kld_2')
self.set_lam_kld(lam_kld_1=1.0, lam_kld_2=1.0)
# init shared var for controlling l2 regularization on params
self.lam_l2w = theano.shared(value=zero_ary, name='msm_lam_l2w')
self.set_lam_l2w(1e-5)
# Grab all of the "optimizable" parameters in "group 1"
self.group_1_params = []
self.group_1_params.extend(self.q_z_given_x.mlp_params)
self.group_1_params.extend(self.p_s0_obs_given_z_obs.mlp_params)
# Grab all of the "optimizable" parameters in "group 2"
self.group_2_params = []
for i in range(self.ir_steps):
self.group_2_params.extend(self.q_hi_given_x_si[i].mlp_params)
self.group_2_params.extend(self.p_hi_given_si[i].mlp_params)
self.group_2_params.extend(self.p_sip1_given_si_hi[i].mlp_params)
# Make a joint list of parameters group 1/2
self.joint_params = self.group_1_params + self.group_2_params
#################################
# CONSTRUCT THE KLD-BASED COSTS #
#################################
self.kld_z, self.kld_hi_cond, self.kld_hi_glob = \
self._construct_kld_costs()
self.kld_cost = (self.lam_kld_1[0] * T.mean(self.kld_z)) + \
(self.lam_kld_2[0] * (T.mean(self.kld_hi_cond) + \
(self.kzg_weight[0] * T.mean(self.kld_hi_glob))))
#################################
# CONSTRUCT THE NLL-BASED COSTS #
#################################
self.nll_costs = self._construct_nll_costs()
self.nll_cost = self.lam_nll[0] * T.mean(self.nll_costs)
########################################
# CONSTRUCT THE REST OF THE JOINT COST #
########################################
param_reg_cost = self._construct_reg_costs()
self.reg_cost = self.lam_l2w[0] * param_reg_cost
self.joint_cost = self.nll_cost + self.kld_cost + self.reg_cost
# Get the gradient of the joint cost for all optimizable parameters
print("Computing gradients of self.joint_cost...")
self.joint_grads = OrderedDict()
grad_list = T.grad(self.joint_cost, self.joint_params)
for i, p in enumerate(self.joint_params):
self.joint_grads[p] = grad_list[i]
# Construct the updates for the generator and inferencer networks
self.group_1_updates = get_adam_updates(params=self.group_1_params, \
grads=self.joint_grads, alpha=self.lr_1, \
beta1=self.mom_1, beta2=self.mom_2, \
mom2_init=1e-3, smoothing=1e-5, max_grad_norm=10.0)
self.group_2_updates = get_adam_updates(params=self.group_2_params, \
grads=self.joint_grads, alpha=self.lr_2, \
beta1=self.mom_1, beta2=self.mom_2, \
mom2_init=1e-3, smoothing=1e-5, max_grad_norm=10.0)
self.joint_updates = OrderedDict()
for k in self.group_1_updates:
self.joint_updates[k] = self.group_1_updates[k]
for k in self.group_2_updates:
self.joint_updates[k] = self.group_2_updates[k]
# Construct a function for jointly training the generator/inferencer
print("Compiling training function...")
self.train_joint = self._construct_train_joint()
self.compute_post_klds = self._construct_compute_post_klds()
self.compute_fe_terms = self._construct_compute_fe_terms()
self.sample_from_prior = self._construct_sample_from_prior()
# make easy access points for some interesting parameters
self.inf_1_weights = self.q_z_given_x.shared_layers[0].W
self.gen_1_weights = self.p_s0_obs_given_z_obs.mu_layers[-1].W
self.inf_2_weights = self.q_hi_given_x_si[0].shared_layers[0].W
self.gen_2_weights = self.p_sip1_given_si_hi[0].mu_layers[-1].W
self.gen_inf_weights = self.p_hi_given_si[0].shared_layers[0].W
return
def set_sgd_params(self, lr_1=0.01, lr_2=0.01, \
mom_1=0.9, mom_2=0.999):
"""
Set learning rate and momentum parameter for all updates.
"""
zero_ary = np.zeros((1,))
# set learning rates
new_lr_1 = zero_ary + lr_1
self.lr_1.set_value(new_lr_1.astype(theano.config.floatX))
new_lr_2 = zero_ary + lr_2
self.lr_2.set_value(new_lr_2.astype(theano.config.floatX))
# set momentums
new_mom_1 = zero_ary + mom_1
self.mom_1.set_value(new_mom_1.astype(theano.config.floatX))
new_mom_2 = zero_ary + mom_2
self.mom_2.set_value(new_mom_2.astype(theano.config.floatX))
return
def set_lam_nll(self, lam_nll=1.0):
"""
Set weight for controlling the influence of the data likelihood.
"""
zero_ary = np.zeros((1,))
new_lam = zero_ary + lam_nll
self.lam_nll.set_value(new_lam.astype(theano.config.floatX))
return
def set_lam_kld(self, lam_kld_1=1.0, lam_kld_2=1.0):
"""
Set the relative weight of prior KL-divergence vs. data likelihood.
"""
zero_ary = np.zeros((1,))
new_lam = zero_ary + lam_kld_1
self.lam_kld_1.set_value(new_lam.astype(theano.config.floatX))
new_lam = zero_ary + lam_kld_2
self.lam_kld_2.set_value(new_lam.astype(theano.config.floatX))
return
def set_lam_l2w(self, lam_l2w=1e-3):
"""
Set the relative strength of l2 regularization on network params.
"""
zero_ary = np.zeros((1,))
new_lam = zero_ary + lam_l2w
self.lam_l2w.set_value(new_lam.astype(theano.config.floatX))
return
def set_train_switch(self, switch_val=0.0):
"""
Set the switch for changing between training and sampling behavior.
"""
if (switch_val < 0.5):
switch_val = 0.0
else:
switch_val = 1.0
zero_ary = np.zeros((1,))
new_val = zero_ary + switch_val
new_val = new_val.astype(theano.config.floatX)
self.train_switch.set_value(new_val)
return
def set_kzg_weight(self, kzg_weight=0.2):
"""
Set the weight for shaping penalty on conditional priors over zt.
"""
assert(kzg_weight >= 0.0)
zero_ary = np.zeros((1,))
new_val = zero_ary + kzg_weight
new_val = new_val.astype(theano.config.floatX)
self.kzg_weight.set_value(new_val)
return
def set_l1l2_weight(self, l1l2_weight=1.0):
"""
Set the weight for shaping penalty on posterior KLds.
"""
assert((l1l2_weight >= 0.0) and (l1l2_weight <= 1.0))
zero_ary = np.zeros((1,))
new_val = zero_ary + l1l2_weight
new_val = new_val.astype(theano.config.floatX)
self.l1l2_weight.set_value(new_val)
return
def set_drop_rate(self, drop_rate=0.0):
"""
Set the dropout rate for generator read function.
"""
assert((drop_rate >= 0.0) and (drop_rate <= 1.0))
zero_ary = np.zeros((1,))
new_val = zero_ary + drop_rate
new_val = new_val.astype(theano.config.floatX)
self.drop_rate.set_value(new_val)
return
def set_input_bias(self, new_bias=None):
"""
Set the output layer bias.
"""
new_bias = new_bias.astype(theano.config.floatX)
self.q_z_given_x.shared_layers[0].b_in.set_value(new_bias)
return
def set_obs_bias(self, new_obs_bias=None):
"""
Set initial bias on the obs part of state.
"""
assert(new_obs_bias.shape[0] == self.obs_dim)
new_bias = np.zeros((self.obs_dim,)) + new_obs_bias
new_bias = new_bias.astype(theano.config.floatX)
self.p_s0_obs_given_z_obs.mu_layers[-1].b.set_value(new_bias)
return
def _construct_nll_costs(self):
"""
Construct the negative log-likelihood part of free energy.
"""
# average log-likelihood over the refinement sequence
xh = self.obs_transform(self.si[-1])
if self.x_type == 'bernoulli':
ll_costs = log_prob_bernoulli(self.x, xh)
else:
ll_costs = log_prob_gaussian2(self.x, xh, \
log_vars=self.bounded_logvar)
nll_costs = -ll_costs
return nll_costs
def _construct_kld_costs(self):
"""
Construct the posterior KL-divergence part of cost to minimize.
"""
# construct KLd cost for the distributions over hi. the prior over
# hi is given by a distribution conditioned on si, which we estimate
# using self.p_hi_given_si[i]. the conditionals produced by each
# self.p_hi_given_si[i] will also be regularized towards a shared
# prior, e.g. a Gaussian with zero mean and unit variance.
kld_hi_conds = []
kld_hi_globs = []
for i in range(self.ir_steps):
kld_hi_cond = gaussian_kld( \
self.q_hi_given_x_si[i].output_mean, \
self.q_hi_given_x_si[i].output_logvar, \
self.p_hi_given_si[i].output_mean, \
self.p_hi_given_si[i].output_logvar)
kld_hi_glob = gaussian_kld( \
self.p_hi_given_si[i].output_mean, \
self.p_hi_given_si[i].output_logvar, \
0.0, 0.0)
kld_hi_cond_l1l2 = (self.l1l2_weight[0] * kld_hi_cond) + \
((1.0 - self.l1l2_weight[0]) * kld_hi_cond**2.0)
kld_hi_conds.append(T.sum(kld_hi_cond_l1l2, \
axis=1, keepdims=True))
kld_hi_globs.append(T.sum(kld_hi_glob**2.0, \
axis=1, keepdims=True))
# compute the batch-wise costs
kld_hi_cond = sum(kld_hi_conds)
kld_hi_glob = sum(kld_hi_globs)
# construct KLd cost for the distributions over z
kld_z_all = gaussian_kld(self.q_z_given_x.output_mean, \
self.q_z_given_x.output_logvar, \
0.0, 0.0)
kld_z_l1l2 = (self.l1l2_weight[0] * kld_z_all) + \
((1.0 - self.l1l2_weight[0]) * kld_z_all**2.0)
kld_z = T.sum(kld_z_l1l2, \
axis=1, keepdims=True)
return [kld_z, kld_hi_cond, kld_hi_glob]
def _construct_reg_costs(self):
"""
Construct the cost for low-level basic regularization. E.g. for
applying l2 regularization to the network activations and parameters.
"""
param_reg_cost = sum([T.sum(p**2.0) for p in self.joint_params])
return param_reg_cost
def _construct_train_joint(self):
"""
Construct theano function to train all networks jointly.
"""
# setup some symbolic variables for theano to deal with
x = T.matrix()
# collect the outputs to return from this function
outputs = [self.joint_cost, self.nll_cost, self.kld_cost, \
self.reg_cost]
# compile the theano function
func = theano.function(inputs=[ x, self.batch_reps ], \
outputs=outputs, \
givens={ self.x: x.repeat(self.batch_reps, axis=0) }, \
updates=self.joint_updates)
return func
def _construct_compute_fe_terms(self):
"""
Construct a function for computing terms in variational free energy.
"""
# setup some symbolic variables for theano to deal with
x_in = T.matrix()
# construct values to output
nll = self._construct_nll_costs()
kld = self.kld_z + self.kld_hi_cond
# compile theano function for a one-sample free-energy estimate
fe_term_sample = theano.function(inputs=[x_in], \
outputs=[nll, kld], givens={self.x: x_in})
# construct a wrapper function for multi-sample free-energy estimate
def fe_term_estimator(X, sample_count):
nll_sum = np.zeros((X.shape[0],))
kld_sum = np.zeros((X.shape[0],))
for i in range(sample_count):
result = fe_term_sample(X)
nll_sum += result[0].ravel()
kld_sum += result[1].ravel()
mean_nll = nll_sum / float(sample_count)
mean_kld = kld_sum / float(sample_count)
return [mean_nll, mean_kld]
return fe_term_estimator
def _construct_compute_post_klds(self):
"""
Construct theano function to compute the info about the variational
approximate posteriors for some inputs.
"""
# setup some symbolic variables for theano to deal with
x = T.matrix()
# construct symbolic expressions for the desired KLds
cond_klds = []
glob_klds = []
for i in range(self.ir_steps):
kld_hi_cond = gaussian_kld(self.q_hi_given_x_si[i].output_mean, \
self.q_hi_given_x_si[i].output_logvar, \
self.p_hi_given_si[i].output_mean, \
self.p_hi_given_si[i].output_logvar)
kld_hi_glob = gaussian_kld(self.p_hi_given_si[i].output_mean, \
self.p_hi_given_si[i].output_logvar, 0.0, 0.0)
cond_klds.append(kld_hi_cond)
glob_klds.append(kld_hi_glob)
# gather conditional and global klds for all IR steps
all_klds = cond_klds + glob_klds
# gather kld for the initialization step
kld_z_all = gaussian_kld(self.q_z_given_x.output_mean, \
self.q_z_given_x.output_logvar, \
0.0, 0.0)
all_klds.append(kld_z_all)
# compile theano function for a one-sample free-energy estimate
kld_func = theano.function(inputs=[x], outputs=all_klds, \
givens={ self.x: x })
def post_kld_computer(X):
f_all_klds = kld_func(X)
f_kld_z = f_all_klds[-1]
f_kld_hi_cond = np.zeros(f_all_klds[0].shape)
f_kld_hi_glob = np.zeros(f_all_klds[0].shape)
for j in range(self.ir_steps):
f_kld_hi_cond += f_all_klds[j]
f_kld_hi_glob += f_all_klds[j + self.ir_steps]
return [f_kld_z, f_kld_hi_cond, f_kld_hi_glob]
return post_kld_computer
def _construct_sample_from_prior(self):
"""
Construct a function for drawing independent samples from the
distribution generated by this MultiStageModel. This function returns
the full sequence of "partially completed" examples.
"""
z_sym = T.matrix()
x_sym = T.matrix()
oputs = [self.obs_transform(s) for s in self.si]
sample_func = theano.function(inputs=[z_sym, x_sym], outputs=oputs, \
givens={ self.z: z_sym, \
self.x: T.zeros_like(x_sym) })
def prior_sampler(samp_count):
x_samps = np.zeros((samp_count, self.obs_dim))
x_samps = x_samps.astype(theano.config.floatX)
old_switch = self.train_switch.get_value(borrow=False)
# set model to generation mode
self.set_train_switch(switch_val=0.0)
z_samps = npr.randn(samp_count, self.z_dim)
z_samps = z_samps.astype(theano.config.floatX)
model_samps = sample_func(z_samps, x_samps)
# set model back to either training or generation mode
self.set_train_switch(switch_val=old_switch)
return model_samps
return prior_sampler
if __name__=="__main__":
print("Hello world!")
##############
# EYE BUFFER #
##############
|
capybaralet/Sequential-Generation
|
MultiStageModel.py
|
Python
|
mit
| 24,500
|
[
"Gaussian"
] |
ba4997c85036fe4886246d9acd70aeb8686746a176b712e184e0e5f658b931b8
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# jobschedule - Save schedule for a waiting or running job
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.jobschedule import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
|
heromod/migrid
|
mig/cgi-bin/jobschedule.py
|
Python
|
gpl-2.0
| 1,097
|
[
"Brian"
] |
600e40f10804725c7020324c03304926cf2805632b207ef7f081e74f81ecc47a
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Contribution 2009 by Brad Crittenden <brad [AT] bradcrittenden.net>
# Copyright (C) 2008 Benny Malengier
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Written by B.Malengier
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import os
import sys
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
log = logging.getLogger(".ExportAssistant")
#-------------------------------------------------------------------------
#
# Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GdkPixbuf
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import USER_HOME, ICON, SPLASH, GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.constfunc import conv_to_unicode
from gramps.gen.config import config
from ...pluginmanager import GuiPluginManager
from gramps.gen.utils.file import (find_folder, get_new_filename)
from ...managedwindow import ManagedWindow
from ...dialog import ErrorDialog
from ...user import User
#-------------------------------------------------------------------------
#
# ExportAssistant
#
#-------------------------------------------------------------------------
_ExportAssistant_pages = {
'intro' : 0,
'exporttypes' : 1,
'options' : 2,
'fileselect' : 3,
'confirm' : 4,
'summary' : 5,
}
class ExportAssistant(Gtk.Assistant, ManagedWindow) :
"""
This class creates a GTK assistant to guide the user through the various
Save as/Export options.
The overall goal is to keep things simple by presenting few choice options
on each assistant page.
The export formats and options are obtained from the plugins.
"""
#override predefined do_xxx signal handlers
__gsignals__ = {"apply": "override", "cancel": "override",
"close": "override", "prepare": "override"}
def __init__(self,dbstate,uistate):
"""
Set up the assistant, and build all the possible assistant pages.
Some page elements are left empty, since their contents depends
on the user choices and on the success of the attempted save.
"""
self.dbstate = dbstate
self.uistate = uistate
self.writestarted = False
self.confirm = None
#set up Assistant
Gtk.Assistant.__init__(self)
#set up ManagedWindow
self.top_title = _("Export Assistant")
ManagedWindow.__init__(self, uistate, [], self.__class__)
#set_window is present in both parent classes
ManagedWindow.set_window(self, self, None,
self.top_title, isWindow=True)
#set up callback method for the export plugins
self.callback = self.pulse_progressbar
person_handle = self.uistate.get_active('Person')
self.person = self.dbstate.db.get_person_from_handle(person_handle)
if not self.person:
self.person = self.dbstate.db.find_initial_person()
pmgr = GuiPluginManager.get_instance()
self.__exporters = pmgr.get_export_plugins()
self.map_exporters = {}
self.__previous_page = -1
#create the assistant pages
self.create_page_intro()
self.create_page_exporttypes()
self.create_page_options()
self.create_page_fileselect()
self.create_page_confirm()
#no progress page, looks ugly, and user needs to hit forward at end!
self.create_page_summary()
self.option_box_instance = None
#we need our own forward function as options page must not always be shown
self.set_forward_page_func(self.forward_func, None)
#ManagedWindow show method
ManagedWindow.show(self)
def build_menu_names(self, obj):
"""Override ManagedWindow method."""
return (self.top_title, None)
def create_page_intro(self):
"""Create the introduction page."""
label = Gtk.Label(label=self.get_intro_text())
label.set_line_wrap(True)
label.set_use_markup(True)
if (Gtk.get_major_version(), Gtk.get_minor_version()) >= (3, 10):
label.set_max_width_chars(60)
image = Gtk.Image()
image.set_from_file(SPLASH)
box = Gtk.VBox()
box.set_size_request(600, -1) # wide enough it won't have to expand
box.pack_start(image, False, False, 5)
box.pack_start(label, False, False, 5)
page = box
page.show_all()
self.append_page(page)
self.set_page_title(page, _('Saving your data'))
self.set_page_complete(page, True)
self.set_page_type(page, Gtk.AssistantPageType.INTRO)
def create_page_exporttypes(self):
"""Create the export type page.
A Title label.
A table of format radio buttons and their descriptions.
"""
self.format_buttons = []
box = Gtk.VBox()
box.set_border_width(12)
box.set_spacing(12)
table = Gtk.Table(n_rows=2*len(self.__exporters), n_columns=2)
table.set_row_spacings(6)
table.set_col_spacings(6)
button = None
recent_type = config.get('behavior.recent-export-type')
exporters = [(x.get_name().replace("_", ""), x) for x in self.__exporters]
exporters.sort()
ix = 0
for sort_title, exporter in exporters:
title = exporter.get_name()
description= exporter.get_description()
self.map_exporters[ix] = exporter
button = Gtk.RadioButton.new_with_mnemonic_from_widget(button, title)
button.set_tooltip_text(description)
self.format_buttons.append(button)
table.attach(button, 0, 2, 2*ix, 2*ix+1)
if ix == recent_type:
button.set_active(True)
ix += 1
box.pack_start(table, False, False, 0)
page = box
page.show_all()
self.append_page(page)
self.set_page_title(page, _('Choose the output format'))
self.set_page_type(page, Gtk.AssistantPageType.CONTENT)
def create_page_options(self):
# as we do not know yet what to show, we create an empty page
page = Gtk.VBox()
page.set_border_width(12)
page.set_spacing(12)
page.show_all()
self.append_page(page)
self.set_page_title(page, _('Export options'))
self.set_page_complete(page, False)
self.set_page_type(page, Gtk.AssistantPageType.CONTENT)
def forward_func(self, pagenumber, data):
"""This function is called on forward press.
Normally, go to next page, however, before options,
we decide if options to show
"""
if pagenumber == _ExportAssistant_pages['exporttypes'] :
#decide if options need to be shown:
self.option_box_instance = None
ix = self.get_selected_format_index()
if not self.map_exporters[ix].get_config():
# no options needed
return pagenumber + 2
elif pagenumber == _ExportAssistant_pages['options']:
# need to check to see if we should show file selection
if (self.option_box_instance and
hasattr(self.option_box_instance, "no_fileselect")):
# don't show fileselect, but mark it ok
return pagenumber + 2
return pagenumber + 1
def create_options(self):
"""This method gets the option page, and fills it with the options."""
option = self.get_selected_format_index()
vbox = self.get_nth_page(_ExportAssistant_pages['options'])
(config_title, config_box_class) = self.map_exporters[option].get_config()
#self.set_page_title(vbox, config_title)
# remove present content of the vbox
list(map(vbox.remove, vbox.get_children()))
# add new content
if config_box_class:
self.option_box_instance = config_box_class(self.person, self.dbstate, self.uistate)
box = self.option_box_instance.get_option_box()
vbox.add(box)
else:
self.option_box_instance = None
vbox.show_all()
# We silently assume all options lead to accepted behavior
self.set_page_complete(vbox, True)
def create_page_fileselect(self):
self.chooser = Gtk.FileChooserWidget(Gtk.FileChooserAction.SAVE)
#add border
self.chooser.set_border_width(12)
#global files, ask before overwrite
self.chooser.set_local_only(False)
self.chooser.set_do_overwrite_confirmation(True)
#created, folder and name not set
self.folder_is_set = False
#connect changes in filechooser with check to mark page complete
self.chooser.connect("selection-changed", self.check_fileselect)
self.chooser.connect("key-release-event", self.check_fileselect)
#first selection does not give a selection-changed event, grab the button
self.chooser.connect("button-release-event", self.check_fileselect)
#Note, we can induce an exotic error, delete filename,
# do not release button, click forward. We expect user not to do this
# In case he does, recheck on confirmation page!
self.chooser.show_all()
page = self.chooser
self.append_page(page)
self.set_page_title(page, _('Select save file'))
self.set_page_type(page, Gtk.AssistantPageType.CONTENT)
def check_fileselect(self, filechooser, event=None, show=True):
"""Given a filechooser, determine if it can be marked complete in
the Assistant.
Used as normal callback and event callback. For callback, we will have
show=True
"""
filename = conv_to_unicode(filechooser.get_filename())
if not filename:
self.set_page_complete(filechooser, False)
else:
folder = conv_to_unicode(filechooser.get_current_folder())
if not folder:
folder = find_folder(filename)
else:
folder = find_folder(folder)
#the file must be valid, not a folder, and folder must be valid
if (filename and os.path.basename(filename.strip()) and folder):
#this page of the assistant is complete
self.set_page_complete(filechooser, True)
else :
self.set_page_complete(filechooser, False)
def create_page_confirm(self):
# Construct confirm page
self.confirm = Gtk.Label()
self.confirm.set_line_wrap(True)
self.confirm.set_use_markup(True)
self.confirm.show()
image = Gtk.Image()
image.set_from_file(SPLASH)
box = Gtk.VBox()
box.set_border_width(12)
box.set_spacing(6)
box.pack_start(image, False, False, 5)
box.pack_start(self.confirm, False, False, 5)
page = box
self.append_page(page)
self.set_page_title(page, _('Final confirmation'))
self.set_page_type(page, Gtk.AssistantPageType.CONFIRM)
self.set_page_complete(page, True)
def create_page_summary(self):
# Construct summary page
# As this is the last page needs to be of page_type
# Gtk.AssistantPageType.CONFIRM or Gtk.AssistantPageType.SUMMARY
vbox = Gtk.VBox()
vbox.set_border_width(12)
vbox.set_spacing(6)
image = Gtk.Image()
image.set_from_file(SPLASH)
vbox.pack_start(image, False, False, 5)
self.labelsum = Gtk.Label(label=_("Please wait while your data is selected and exported"))
self.labelsum.set_line_wrap(True)
self.labelsum.set_use_markup(True)
vbox.pack_start(self.labelsum, False, False, 0)
self.progressbar = Gtk.ProgressBar()
vbox.pack_start(self.progressbar, True, True, 0)
page = vbox
page.show_all()
self.append_page(page)
self.set_page_title(page, _('Summary'))
self.set_page_complete(page, False)
self.set_page_type(page, Gtk.AssistantPageType.SUMMARY)
def do_apply(self):
pass
def do_close(self):
if self.writestarted :
pass
else :
self.close()
def do_cancel(self):
self.do_close()
def do_prepare(self, page):
"""
The "prepare" signal is emitted when a new page is set as the
assistant's current page, but before making the new page visible.
:param page: the new page to prepare for display.
"""
#determine if we go backward or forward
page_number = self.get_current_page()
assert page == self.get_nth_page(page_number)
if page_number <= self.__previous_page :
back = True
else :
back = False
if back :
#when moving backward, show page as it was,
#page we come from is set incomplete so as to disallow user jumping
# to last page after backward move
self.set_page_complete(self.get_nth_page(self.__previous_page),
False)
elif page_number == _ExportAssistant_pages['options']:
self.create_options()
self.set_page_complete(page, True)
elif page == self.chooser :
# next page is the file chooser, reset filename, keep folder where user was
folder, name = self.suggest_filename()
page.set_action(Gtk.FileChooserAction.SAVE)
if self.folder_is_set:
page.set_current_name(name)
else :
page.set_current_name(name)
page.set_current_folder(folder)
self.folder_is_set = True
# see if page is complete with above
self.check_fileselect(page, show=True)
elif self.get_page_type(page) == Gtk.AssistantPageType.CONFIRM:
# The confirm page with apply button
# Present user with what will happen
ix = self.get_selected_format_index()
format = self.map_exporters[ix].get_name()
page_complete = False
# If no file select:
if (self.option_box_instance and
hasattr(self.option_box_instance, "no_fileselect")):
# No file selection
filename = ''
confirm_text = _(
'The data will be exported as follows:\n\n'
'Format:\t%s\n\n'
'Press Apply to proceed, Back to revisit '
'your options, or Cancel to abort') % (format.replace("_",""), )
page_complete = True
else:
#Allow for exotic error: file is still not correct
self.check_fileselect(self.chooser, show=False)
if self.get_page_complete(self.chooser) :
filename = conv_to_unicode(self.chooser.get_filename())
name = os.path.split(filename)[1]
folder = os.path.split(filename)[0]
confirm_text = _(
'The data will be saved as follows:\n\n'
'Format:\t%(format)s\nName:\t%(name)s\nFolder:\t%(folder)s\n\n'
'Press Apply to proceed, Go Back to revisit '
'your options, or Cancel to abort') % {
'format': format.replace("_",""),
'name': name,
'folder': folder}
page_complete = True
else :
confirm_text = _(
'The selected file and folder to save to '
'cannot be created or found.\n\n'
'Press Back to return and select a valid filename.'
)
page_complete = False
# Set the page_complete status
self.set_page_complete(page, page_complete)
# If it is ok, then look for alternate confirm_text
if (page_complete and
self.option_box_instance and
hasattr(self.option_box_instance, "confirm_text")):
# Override message
confirm_text = self.option_box_instance.confirm_text
self.confirm.set_label(confirm_text)
elif self.get_page_type(page) == Gtk.AssistantPageType.SUMMARY :
# The summary page
# Lock page, show progress bar
self.pre_save(page)
# save
success = self.save()
# Unlock page
self.post_save()
#update the label and title
if success:
conclusion_title = _('Your data has been saved')
conclusion_text = _(
'The copy of your data has been '
'successfully saved. You may press Close button '
'now to continue.\n\n'
'Note: the database currently opened in your Gramps '
'window is NOT the file you have just saved. '
'Future editing of the currently opened database will '
'not alter the copy you have just made. ')
#add test, what is dir
conclusion_text += '\n\n' + _('Filename: %s') %self.chooser.get_filename()
else:
conclusion_title = _('Saving failed')
conclusion_text = _(
'There was an error while saving your data. '
'You may try starting the export again.\n\n'
'Note: your currently opened database is safe. '
'It was only '
'a copy of your data that failed to save.')
self.labelsum.set_label(conclusion_text)
self.set_page_title(page, conclusion_title)
self.set_page_complete(page, True)
else :
#whatever other page, if we show it, it is complete to
self.set_page_complete(page, True)
#remember previous page for next time
self.__previous_page = page_number
def close(self, *obj) :
#clean up ManagedWindow menu, then destroy window, bring forward parent
Gtk.Assistant.destroy(self)
ManagedWindow.close(self,*obj)
def get_intro_text(self):
return _('Under normal circumstances, Gramps does not require you '
'to directly save your changes. All changes you make are '
'immediately saved to the database.\n\n'
'This process will help you save a copy of your data '
'in any of the several formats supported by Gramps. '
'This can be used to make a copy of your data, backup '
'your data, or convert it to a format that will allow '
'you to transfer it to a different program.\n\n'
'If you change your mind during this process, you '
'can safely press the Cancel button at any time and your '
'present database will still be intact.')
def get_selected_format_index(self):
"""
Query the format radiobuttons and return the index number of the
selected one.
"""
for ix in range(len(self.format_buttons)):
button = self.format_buttons[ix]
if button.get_active():
return ix
else:
return 0
def suggest_filename(self):
"""Prepare suggested filename and set it in the file chooser."""
ix = self.get_selected_format_index()
ext = self.map_exporters[ix].get_extension()
# Suggested folder: try last export, then last import, then home.
default_dir = config.get('paths.recent-export-dir')
if len(default_dir)<=1:
default_dir = config.get('paths.recent-import-dir')
if len(default_dir)<=1:
default_dir = USER_HOME
if ext == 'gramps':
new_filename = os.path.join(default_dir,'data.gramps')
elif ext == 'burn':
new_filename = os.path.basename(self.dbstate.db.get_save_path())
else:
new_filename = get_new_filename(ext,default_dir)
return (default_dir, os.path.split(new_filename)[1])
def save(self):
"""
Perform the actual Save As/Export operation.
Depending on the success status, set the text for the final page.
"""
success = False
try:
if (self.option_box_instance and
hasattr(self.option_box_instance, "no_fileselect")):
filename = ""
else:
filename = conv_to_unicode(self.chooser.get_filename())
config.set('paths.recent-export-dir', os.path.split(filename)[0])
ix = self.get_selected_format_index()
config.set('behavior.recent-export-type', ix)
export_function = self.map_exporters[ix].get_export_function()
success = export_function(self.dbstate.db,
filename,
User(error=ErrorDialog, callback=self.callback),
self.option_box_instance)
except:
#an error not catched in the export_function itself
success = False
log.error(_("Error exporting your Family Tree"), exc_info=True)
return success
def pre_save(self,page):
#as all is locked, show the page, which assistant normally only does
# after prepare signal!
self.writestarted = True
page.set_child_visible(True)
self.show_all()
self.uistate.set_busy_cursor(True)
self.set_busy_cursor(1)
def post_save(self):
self.uistate.set_busy_cursor(False)
self.set_busy_cursor(0)
self.progressbar.hide()
self.writestarted = False
def set_busy_cursor(self,value):
"""Set or unset the busy cursor while saving data.
Note : self.get_window() is the Gtk.Assistant Gtk.Window, not
a part of ManagedWindow
"""
if value:
self.get_window().set_cursor(Gdk.Cursor.new(Gdk.CursorType.WATCH))
#self.set_sensitive(0)
else:
self.get_window().set_cursor(None)
#self.set_sensitive(1)
while Gtk.events_pending():
Gtk.main_iteration()
def pulse_progressbar(self, value, text=None):
self.progressbar.set_fraction(min(value/100.0, 1.0))
if text:
self.progressbar.set_text("%s: %d%%" % (text, value))
else:
self.progressbar.set_text("%d%%" % value)
while Gtk.events_pending():
Gtk.main_iteration()
|
pmghalvorsen/gramps_branch
|
gramps/gui/plug/export/_exportassistant.py
|
Python
|
gpl-2.0
| 24,859
|
[
"Brian"
] |
c76363ab541dff98fe431196804b7c1819b07e062889084ba4d0fe285354da35
|
#!/usr/bin/env python
from numpy.distutils.core import setup
from numpy.distutils.core import Extension
setup(name='py-bgo',
version='0.2',
descreption='Bayesian gloabl optimization',
author='Ilias Bilionis',
author_email='ibilion@purdue.edu',
keywords=['Bayesian global optimization', 'Gaussian process regression'],
packages=['pybgo']
)
|
PredictiveScienceLab/py-bgo
|
setup.py
|
Python
|
mit
| 384
|
[
"Gaussian"
] |
17861af70232c6f0833d4f33e4505339d094b804f4169115aa0bc13e7a39670b
|
#
# FRETBursts - A single-molecule FRET burst analysis toolkit.
#
# Copyright (C) 2014-2016 Antonino Ingargiola <tritemio@gmail.com>
#
"""
This module provides functions to fit gaussian distributions and gaussian
distribution mixtures (2 components). These functions can be used directly,
or more often, in a typical FRETBursts workflow they are passed to higher
level methods like :meth:`fretbursts.burstlib.Data.fit_E_generic`.
Single Gaussian distribution fit:
* :func:`gaussian_fit_hist`
* :func:`gaussian_fit_cdf`
* :func:`gaussian_fit_pdf`
For 2-Gaussians fit we have the following models:
* :func:`two_gauss_mix_pdf`: *PDF of 2-components Gaussians mixture*
* :func:`two_gauss_mix_ab`: *linear combination of 2 Gaussians*
Main functions for mixture of 2 Gaussian distribution fit:
* :func:`two_gaussian_fit_hist` *histogram fit using `leastsq`*
* :func:`two_gaussian_fit_hist_min` *histogram fit using `minimize`*
* :func:`two_gaussian_fit_hist_min_ab` *the same but using _ab model*
* :func:`two_gaussian_fit_cdf` *curve fit of the CDF*
* :func:`two_gaussian_fit_EM` *Expectation-Maximization fit*
* :func:`two_gaussian_fit_EM_b` *the same with boundaries*
Also, some functions to fit 2-D gaussian distributions and mixtures are
implemented but not thoroughly tested.
The reference documentation for **all** the functions follows.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import numpy.random as R
import scipy.optimize as O
import scipy.stats as S
from scipy.special import erf
from scipy.optimize import leastsq, minimize
import scipy.ndimage as ndi
#from scipy.stats import gaussian_kde
from .weighted_kde import gaussian_kde_w # this version supports weights
def normpdf(x, mu=0, sigma=1.):
"""Return the normal pdf evaluated at `x`."""
assert sigma > 0
u = (x-mu)/sigma
y = 1/(np.sqrt(2*np.pi)*sigma)*np.exp(-u*u/2)
return y
##
# Single gaussian distribution
#
def gaussian_fit_curve(x, y, mu0=0, sigma0=1, a0=None, return_all=False,
**kwargs):
"""Gaussian fit of curve (x,y).
If a0 is None then only (mu,sigma) are fitted (to a gaussian density).
`kwargs` are passed to the leastsq() function.
If return_all=False then return only the fitted (mu,sigma) values
If return_all=True (or full_output=True is passed to leastsq)
then the full output of leastsq is returned.
"""
if a0 is None:
gauss_pdf = lambda x, m, s: np.exp(-((x-m)**2)/(2*s**2))/\
(np.sqrt(2*np.pi)*s)
err_fun = lambda p, x, y: gauss_pdf(x, *p) - y
res = leastsq(err_fun, x0=[mu0, sigma0], args=(x, y), **kwargs)
else:
gauss_fun = lambda x, m, s, a: a*np.sign(s)*np.exp(-((x-m)**2)/(2*s**2))
err_fun = lambda p, x, y: gauss_fun(x, *p) - y
res = leastsq(err_fun, x0=[mu0, sigma0, a0], args=(x, y), **kwargs)
if 'full_output' in kwargs:
return_all = kwargs['full_output']
mu, sigma = res[0][0], res[0][1]
if return_all: return res
return mu, sigma
def get_epdf(s, smooth=0, N=1000, smooth_pdf=False, smooth_cdf=True):
"""Compute the empirical PDF of the sample `s`.
If smooth > 0 then apply a gaussian filter with sigma=smooth.
N is the number of points for interpolation of the CDF on a uniform range.
"""
ecdf = [np.sort(s), np.arange(0.5, s.size+0.5)*1./s.size]
#ecdf = [np.sort(s), np.arange(s.size)*1./s.size]
_x = np.linspace(s.min(), s.max(), N)
ecdfi = [_x, np.interp(_x, ecdf[0], ecdf[1])]
if smooth_cdf and smooth > 0:
ecdfi[1] = ndi.filters.gaussian_filter1d(ecdfi[1], sigma=smooth)
epdf = [ecdfi[0][:-1], np.diff(ecdfi[1])/np.diff(ecdfi[0])]
if smooth_pdf and smooth > 0:
epdf[1] = ndi.filters.gaussian_filter1d(epdf[1], sigma=smooth)
return epdf
def gaussian_fit_pdf(s, mu0=0, sigma0=1, a0=1, return_all=False,
leastsq_kwargs={}, **kwargs):
"""Gaussian fit of samples s using a fit to the empirical PDF.
If a0 is None then only (mu,sigma) are fitted (to a gaussian density).
`kwargs` are passed to get_epdf().
If return_all=False then return only the fitted (mu,sigma) values
If return_all=True (or full_output=True is passed to leastsq)
then the full output of leastsq and the PDF curve is returned.
"""
## Empirical PDF
epdf = get_epdf(s, **kwargs)
res = gaussian_fit_curve(epdf[0], epdf[1], mu0, sigma0, a0, return_all,
**leastsq_kwargs)
if return_all: return res, epdf
return res
def gaussian_fit_hist(s, mu0=0, sigma0=1, a0=None, bins=np.r_[-0.5:1.5:0.001],
return_all=False, leastsq_kwargs={}, weights=None, **kwargs):
"""Gaussian fit of samples s fitting the hist to a Gaussian function.
If a0 is None then only (mu,sigma) are fitted (to a gaussian density).
kwargs are passed to the histogram function.
If return_all=False then return only the fitted (mu,sigma) values
If return_all=True (or full_output=True is passed to leastsq)
then the full output of leastsq and the histogram is returned.
`weights` optional weights for the histogram.
"""
histogram_kwargs = dict(bins=bins, density=True, weights=weights)
histogram_kwargs.update(**kwargs)
H = np.histogram(s, **histogram_kwargs)
x, y = 0.5*(H[1][:-1] + H[1][1:]), H[0]
#bar(H[1][:-1], H[0], H[1][1]-H[1][0], alpha=0.3)
res = gaussian_fit_curve(x, y, mu0, sigma0, a0, return_all,
**leastsq_kwargs)
if return_all: return res, H, x, y
return res
def gaussian_fit_cdf(s, mu0=0, sigma0=1, return_all=False, **leastsq_kwargs):
"""Gaussian fit of samples s fitting the empirical CDF.
Additional kwargs are passed to the leastsq() function.
If return_all=False then return only the fitted (mu,sigma) values
If return_all=True (or full_output=True is passed to leastsq)
then the full output of leastsq and the histogram is returned.
"""
## Empirical CDF
ecdf = [np.sort(s), np.arange(0.5, s.size+0.5)*1./s.size]
## Analytical Gaussian CDF
gauss_cdf = lambda x, mu, sigma: 0.5*(1+erf((x-mu)/(np.sqrt(2)*sigma)))
## Fitting the empirical CDF
err_func = lambda p, x, y: y - gauss_cdf(x, p[0], p[1])
res = leastsq(err_func, x0=[mu0, sigma0], args=(ecdf[0], ecdf[1]),
**leastsq_kwargs)
if return_all: return res, ecdf
return res[0]
def gaussian_fit_ml(s, mu_sigma_guess=[0.5, 1]):
"""Gaussian fit of samples s using the Maximum Likelihood (ML method).
Didactical, since scipy.stats.norm.fit() implements the same method.
"""
n = s.size
## Log-likelihood (to be maximized)
log_l = lambda mu, sig: -n/2.*np.log(sig**2) - \
1./(2*sig**2)*np.sum((s-mu)**2)
## Function to be minimized
min_fun = lambda p: -log_l(p[0], p[1])
res = O.minimize(min_fun, [0, 0.5], method='powell',
options={'xtol': 1e-6, 'disp': True, 'maxiter': 1e9})
print(res)
mu, sigma = res['x']
return mu, sigma
##
# Two-component gaussian mixtures
#
def two_gauss_mix_pdf(x, p):
"""PDF for the distribution of a mixture of two Gaussians."""
mu1, sig1, mu2, sig2, a = p
return a*normpdf(x, mu1, sig1) + (1-a)*normpdf(x, mu2, sig2)
def two_gauss_mix_ab(x, p):
"""Mixture of two Gaussians with no area constrain."""
mu1, sig1, a1, mu2, sig2, a2 = p
return a1*normpdf(x, mu1, sig1) + a2*normpdf(x, mu2, sig2)
def reorder_parameters(p):
"""Reorder 2-gauss mix params to have the 1st component with smaller mean.
"""
if p[0] > p[2]:
p = p[np.array([2, 3, 0, 1, 4])] # swap (mu1, sig1) with (mu2, sig2)
p[4] = 1 - p[4] # "swap" the alpha of the mixture
return p
def reorder_parameters_ab(p):
"""Reorder 2-gauss mix params to have the 1st component with smaller mean.
"""
if p[0] > p[3]:
p = p[np.array([3, 4, 5, 0, 1, 2])]
return p
def bound_check(val, bounds):
"""Returns `val` clipped inside the interval `bounds`."""
if bounds[0] is not None and val < bounds[0]:
val = bounds[0]
if bounds[1] is not None and val > bounds[1]:
val = bounds[1]
return val
def two_gaussian_fit_curve(x, y, p0, return_all=False, verbose=False, **kwargs):
"""Fit a 2-gaussian mixture to the (x,y) curve.
`kwargs` are passed to the leastsq() function.
If return_all=False then return only the fitted paramaters
If return_all=True then the full output of leastsq is returned.
"""
if kwargs['method'] == 'leastsq':
kwargs.pop('method')
kwargs.pop('bounds')
def err_func(p, x, y):
return (y - two_gauss_mix_pdf(x, p))
res = leastsq(err_func, x0=p0, args=(x, y), **kwargs)
p = res[0]
else:
def err_func(p, x, y):
return ((y - two_gauss_mix_pdf(x, p))**2).sum()
res = minimize(err_func, x0=p0, args=(x, y), **kwargs)
p = res.x
if verbose:
print(res, '\n')
if return_all: return res
return reorder_parameters(p)
def two_gaussian_fit_KDE_curve(s, p0=[0, 0.1, 0.6, 0.1, 0.5], weights=None,
bandwidth=0.05, x_pdf=None, debug=False,
method='SLSQP', bounds=None,
verbose=False, **kde_kwargs):
"""Fit sample `s` with two gaussians using a KDE pdf approximation.
The 2-gaussian pdf is then curve-fitted to the KDE pdf.
Arguments:
s (array): population of samples to be fitted
p0 (sequence-like): initial parameters [mu0, sig0, mu1, sig1, a]
bandwidth (float): bandwidth for the KDE algorithm
method (string): fit method, can be 'leastsq' or one of the methods
accepted by scipy `minimize()`
bounds (None or 5-element list): if not None, each element is a
(min,max) pair of bounds for the corresponding parameter. This
argument can be used only with L-BFGS-B, TNC or SLSQP methods.
If bounds are used, parameters cannot be fixed
x_pdf (array): array on which the KDE PDF is evaluated and curve-fitted
weights (array): optional weigths, same size as `s` (for ex.
1/sigma^2 ~ nt).
debug (bool): if True perfoms more tests and print more info.
Additional kwargs are passed to scipy.stats.gaussian_kde().
Returns:
Array of parameters for the 2-gaussians (5 elements)
"""
if x_pdf is None: x_pdf = np.linspace(s.min(), s.max(), 1000)
## Scikit-learn KDE estimation
#kde_skl = KernelDensity(bandwidth=bandwidth, **kde_kwargs)
#kde_skl.fit(x)[:, np.newaxis])
## score_samples() returns the log-likelihood of the samples
#log_pdf = kde_skl.score_samples(x_pdf)[:, np.newaxis])
#kde_pdf = np.exp(log_pdf)
## Weighted KDE estimation
kde = gaussian_kde_w(s, bw_method=bandwidth, weights=weights)
kde_pdf = kde.evaluate(x_pdf)
p = two_gaussian_fit_curve(x_pdf, kde_pdf, p0=p0, method=method,
bounds=bounds, verbose=verbose)
return p
def two_gaussian_fit_EM_b(s, p0=[0, 0.1, 0.6, 0.1, 0.5], weights=None,
bounds=[(None, None,)]*5,
max_iter=300, ptol=1e-4, debug=False):
"""
Fit the sample s with two gaussians using Expectation Maximization.
This version allows setting boundaries for each parameter.
Arguments:
s (array): population of samples to be fitted
p0 (sequence-like): initial parameters [mu0, sig0, mu1, sig1, a]
bound (tuple of pairs): sequence of (min, max) values that constrain
the parameters. If min or max are None, no boundary is set.
ptol (float): convergence condition. Relative max variation of any
parameter.
max_iter (int): max number of iteration in case of non convergence.
weights (array): optional weigths, same size as `s` (for ex.
1/sigma^2 ~ nt).
Returns:
Array of parameters for the 2-gaussians (5 elements)
"""
assert np.size(p0) == 5
if weights is None: weights = np.ones(s.size)
assert weights.size == s.size
weights *= (1.*weights.size)/weights.sum() # Normalize to (#samples)
#weights /= weights.sum() # Normalize to 1
if debug: assert np.abs(weights.sum() - s.size) < 1e-6
bounds_mu = [bounds[0], bounds[2]]
bounds_sig = [bounds[1], bounds[3]]
bounds_pi0 = bounds[4]
# Initial guess of parameters and initializations
mu = np.array([p0[0], p0[2]])
sig = np.array([p0[1], p0[3]])
pi_ = np.array([p0[4], 1-p0[4]])
gamma = np.zeros((2, s.size))
N_ = np.zeros(2)
p_new = np.array(p0)
# EM loop
counter = 0
stop_iter, converged = False, False
while not stop_iter:
# Compute the responsibility func. (gamma) and the new parameters
for k in [0, 1]:
gamma[k, :] = weights*pi_[k]*normpdf(s, mu[k], sig[k]) / \
two_gauss_mix_pdf(s, p_new)
N_[k] = gamma[k, :].sum()
mu[k] = np.sum(gamma[k]*s)/N_[k]
mu[k] = bound_check(mu[k], bounds_mu[k])
sig[k] = np.sqrt( np.sum(gamma[k]*(s-mu[k])**2)/N_[k] )
sig[k] = bound_check(sig[k], bounds_sig[k])
if k < 1:
pi_[k] = N_[k]/s.size
pi_[k] = bound_check(pi_[k], bounds_pi0)
else:
pi_[k] = 1 - pi_[0]
p_old = p_new
p_new = np.array([mu[0], sig[0], mu[1], sig[1], pi_[0]])
if debug:
assert np.abs(N_.sum() - s.size)/float(s.size) < 1e-6
assert np.abs(pi_.sum() - 1) < 1e-6
# Convergence check
counter += 1
relative_delta = np.abs(p_new - p_old)/p_new
converged = relative_delta.max() < ptol
stop_iter = converged or (counter >= max_iter)
if debug:
print("Iterations: ", counter)
if not converged:
print("WARNING: Not converged, max iteration (%d) reached." % max_iter)
return reorder_parameters(p_new)
def two_gaussian_fit_EM(s, p0=[0, 0.1, 0.6, 0.1, 0.5], max_iter=300, ptol=1e-4,
fix_mu=[0, 0], fix_sig=[0, 0], debug=False,
weights=None):
"""
Fit the sample s with two gaussians using Expectation Maximization.
This vesion allows to optionally fix mean or std. dev. of each component.
Arguments:
s (array): population of samples to be fitted
p0 (sequence-like): initial parameters [mu0, sig0, mu1, sig1, a]
bound (tuple of pairs): sequence of (min, max) values that constrain
the parameters. If min or max are None, no boundary is set.
ptol (float): convergence condition. Relative max variation of any
parameter.
max_iter (int): max number of iteration in case of non convergence.
weights (array): optional weigths, same size as `s` (for ex.
1/sigma^2 ~ nt).
Returns:
Array of parameters for the 2-gaussians (5 elements)
"""
assert np.size(p0) == 5
if weights is None: weights = np.ones(s.size)
assert weights.size == s.size
weights *= (1.*weights.size)/weights.sum() # Normalize to (#samples)
#weights /= weights.sum() # Normalize to 1
if debug: assert np.abs(weights.sum() - s.size) < 1e-6
# Initial guess of parameters and initializations
mu = np.array([p0[0], p0[2]])
sig = np.array([p0[1], p0[3]])
pi_ = np.array([p0[4], 1-p0[4]])
gamma = np.zeros((2, s.size))
N_ = np.zeros(2)
p_new = np.array(p0)
# EM loop
counter = 0
stop_iter, converged = False, False
while not stop_iter:
# Compute the responsibility func. (gamma) and the new parameters
for k in [0, 1]:
gamma[k, :] = weights*pi_[k]*normpdf(s, mu[k], sig[k]) / \
two_gauss_mix_pdf(s, p_new)
## Uncomment for SCHEME2
#gamma[k, :] = pi_[k]*normpdf(s, mu[k], sig[k]) / \
# two_gauss_mix_pdf(s, p_new)
N_[k] = gamma[k, :].sum()
if not fix_mu[k]:
mu[k] = np.sum(gamma[k]*s)/N_[k]
## Uncomment for SCHEME2
#mu[k] = np.sum(weights*gamma[k]*s)/N_[k]
if not fix_sig[k]:
sig[k] = np.sqrt( np.sum(gamma[k]*(s-mu[k])**2)/N_[k] )
pi_[k] = N_[k]/s.size
p_old = p_new
p_new = np.array([mu[0], sig[0], mu[1], sig[1], pi_[0]])
if debug:
assert np.abs(N_.sum() - s.size)/float(s.size) < 1e-6
assert np.abs(pi_.sum() - 1) < 1e-6
# Convergence check
counter += 1
fixed = np.concatenate([fix_mu, fix_sig, [0]]).astype(bool)
relative_delta = np.abs(p_new[~fixed] - p_old[~fixed])/p_new[~fixed]
converged = relative_delta.max() < ptol
stop_iter = converged or (counter >= max_iter)
if debug:
print("Iterations: ", counter)
if not converged:
print("WARNING: Not converged, max iteration (%d) reached." % max_iter)
return reorder_parameters(p_new)
def two_gaussian_fit_hist(s, bins=np.r_[-0.5:1.5:0.001], weights=None,
p0=[0.2,1,0.8,1,0.3], fix_mu=[0,0], fix_sig=[0,0], fix_a=False):
"""Fit the sample s with 2-gaussian mixture (histogram fit).
Uses scipy.optimize.leastsq function. Parameters can be fixed but
cannot be constrained in an interval.
Arguments:
s (array): population of samples to be fitted
p0 (5-element list or array): initial guess or parameters
bins (int or array): bins passed to `np.histogram()`
weights (array): optional weights passed to `np.histogram()`
fix_a (tuple of bools): Whether to fix the amplitude of the gaussians
fix_mu (tuple of bools): Whether to fix the mean of the gaussians
fix_sig (tuple of bools): Whether to fix the sigma of the gaussians
Returns:
Array of parameters for the 2-gaussians (5 elements)
"""
assert np.size(p0) == 5
fix = np.array([fix_mu[0], fix_sig[0], fix_mu[1], fix_sig[1], fix_a],
dtype=bool)
p0 = np.array(p0)
p0_free = p0[-fix]
p0_fix = p0[fix]
H = np.histogram(s, bins=bins, weights=weights, density=True)
x, y = 0.5*(H[1][:-1] + H[1][1:]), H[0]
assert x.size == y.size
## Fitting
def err_func(p, x, y, fix, p_fix, p_complete):
p_complete[-fix] = p
p_complete[fix] = p_fix
return y - two_gauss_mix_pdf(x, p_complete)
p_complete = np.zeros(5)
p, v = leastsq(err_func, x0=p0_free, args=(x, y, fix, p0_fix, p_complete))
p_new = np.zeros(5)
p_new[-fix] = p
p_new[fix] = p0_fix
return reorder_parameters(p_new)
def two_gaussian_fit_hist_min(s, bounds=None, method='L-BFGS-B',
bins=np.r_[-0.5:1.5:0.001], weights=None, p0=[0.2,1,0.8,1,0.3],
fix_mu=[0,0], fix_sig=[0,0], fix_a=False, verbose=False):
"""Fit the sample `s` with 2-gaussian mixture (histogram fit). [Bounded]
Uses scipy.optimize.minimize allowing constrained minimization.
Arguments:
s (array): population of samples to be fitted
method (string): one of the methods accepted by scipy `minimize()`
bounds (None or 5-element list): if not None, each element is a
(min,max) pair of bounds for the corresponding parameter. This
argument can be used only with L-BFGS-B, TNC or SLSQP methods.
If bounds are used, parameters cannot be fixed
p0 (5-element list or array): initial guess or parameters
bins (int or array): bins passed to `np.histogram()`
weights (array): optional weights passed to `np.histogram()`
fix_a (tuple of bools): Whether to fix the amplitude of the gaussians
fix_mu (tuple of bools): Whether to fix the mean of the gaussians
fix_sig (tuple of bools): Whether to fix the sigma of the gaussians
verbose (boolean): allows printing fit information
Returns:
Array of parameters for the 2-gaussians (5 elements)
"""
assert np.size(p0) == 5
fix = np.array([fix_mu[0], fix_sig[0], fix_mu[1], fix_sig[1], fix_a],
dtype=bool)
p0 = np.array(p0)
p0_free = p0[-fix]
p0_fix = p0[fix]
H = np.histogram(s, bins=bins, weights=weights, density=True)
x, y = 0.5*(H[1][:-1] + H[1][1:]), H[0]
assert x.size == y.size
## Fitting
def err_func(p, x, y, fix, p_fix, p_complete):
p_complete[-fix] = p
p_complete[fix] = p_fix
return ((y - two_gauss_mix_pdf(x, p_complete))**2).sum()
p_complete = np.zeros(5)
res = minimize(err_func, x0=p0_free, args=(x, y, fix, p0_fix, p_complete),
method=method, bounds=bounds)
if verbose: print(res)
p_new = np.zeros(5)
p_new[-fix] = res.x
p_new[fix] = p0_fix
return reorder_parameters(p_new)
def two_gaussian_fit_hist_min_ab(s, bounds=None, method='L-BFGS-B',
bins=np.r_[-0.5:1.5:0.001], weights=None, p0=[0.2,1,0.8,1,0.3],
fix_mu=[0,0], fix_sig=[0,0], fix_a=[0,0], verbose=False):
"""Histogram fit of sample `s` with 2-gaussian functions.
Uses scipy.optimize.minimize allowing constrained minimization. Also
each parameter can be fixed.
The order of the parameters is: mu1, sigma1, a1, mu2, sigma2, a2.
Arguments:
s (array): population of samples to be fitted
method (string): one of the methods accepted by scipy `minimize()`
bounds (None or 6-element list): if not None, each element is a
(min,max) pair of bounds for the corresponding parameter. This
argument can be used only with L-BFGS-B, TNC or SLSQP methods.
If bounds are used, parameters cannot be fixed
p0 (6-element list or array): initial guess or parameters
bins (int or array): bins passed to `np.histogram()`
weights (array): optional weights passed to `np.histogram()`
fix_a (tuple of bools): Whether to fix the amplitude of the gaussians
fix_mu (tuple of bools): Whether to fix the mean of the gaussians
fix_sig (tuple of bools): Whether to fix the sigma of the gaussians
verbose (boolean): allows printing fit information
Returns:
Array of parameters for the 2-gaussians (6 elements)
"""
nparams = 6
assert np.size(p0) == nparams
fix = np.array([fix_mu[0], fix_sig[0], fix_a[0],
fix_mu[1], fix_sig[1], fix_a[1]], dtype=bool)
p0 = np.asarray(p0)
p0_free = p0[-fix]
p0_fix = p0[fix]
counts, bins = np.histogram(s, bins=bins, weights=weights, density=True)
x = bins[:-1] + 0.5*(bins[1] - bins[0])
y = counts
assert x.size == y.size
## Fitting
def err_func(p, x, y, fix, p_fix, p_complete):
p_complete[-fix] = p
p_complete[fix] = p_fix
return ((y - two_gauss_mix_ab(x, p_complete))**2).sum()
p_complete = np.zeros(nparams)
res = minimize(err_func, x0=p0_free, args=(x, y, fix, p0_fix, p_complete),
method=method, bounds=bounds)
if verbose: print(res)
p_new = np.zeros(nparams)
p_new[-fix] = res.x
p_new[fix] = p0_fix
return reorder_parameters_ab(p_new)
def two_gaussian_fit_cdf(s, p0=[0., .05, .6, .1, .5],
fix_mu=[0, 0], fix_sig=[0, 0]):
"""Fit the sample s with two gaussians using a CDF fit.
Curve fit 2-gauss mixture Cumulative Distribution Function (CDF) to the
empirical CDF for sample `s`.
Note that with a CDF fit no weighting is possible.
Arguments:
s (array): population of samples to be fitted
p0 (5-element list or array): initial guess or parameters
fix_mu (tuple of bools): Whether to fix the mean of the gaussians
fix_sig (tuple of bools): Whether to fix the sigma of the gaussians
Returns:
Array of parameters for the 2-gaussians (5 elements)
"""
assert np.size(p0) == 5
fix = np.array([fix_mu[0], fix_sig[0], fix_mu[1], fix_sig[1], 0],
dtype=bool)
p0 = np.array(p0)
p0_free = p0[-fix]
p0_fix = p0[fix]
## Empirical CDF
ecdf = [np.sort(s), np.arange(0.5, s.size+0.5)*1./s.size]
x, y = ecdf
## Analytical gaussian CDF
gauss_cdf = lambda x, mu, sigma: 0.5*(1+erf((x-mu)/(np.sqrt(2)*sigma)))
def two_gauss_mix_cdf(x, p):
return p[4]*gauss_cdf(x, p[0], p[1]) + (1-p[4])*gauss_cdf(x, p[2], p[3])
## Fitting the empirical CDF
def err_func(p, x, y, fix, p_fix, p_complete):
p_complete[-fix] = p
p_complete[fix] = p_fix
return y - two_gauss_mix_cdf(x, p_complete)
p_complete = np.zeros(5)
p, v = leastsq(err_func, x0=p0_free, args=(x, y, fix, p0_fix, p_complete))
p_new = np.zeros(5)
p_new[-fix] = p
p_new[fix] = p0_fix
return reorder_parameters(p_new)
def test_two_gauss():
m01 = 0.
m02 = 0.6
s01 = 0.05
s02 = 0.1
alpha = 0.
p_real = [m01, s01, m02, s02, alpha]
N = 500
si1 = round(alpha*N)
si2 = round((1-alpha)*N)
s1 = R.normal(size=si1, loc=m01, scale=s01)
s2 = R.normal(size=si2, loc=m02, scale=s02)
s = np.r_[s1,s2]
pc = two_gaussian_fit_cdf(s, fix_mu=[1,0], p0=[-0.01,0.05,0.5,0.2,0.4])
ph = two_gaussian_fit_hist(s, fix_mu=[1,0], p0=[-0.01,0.05,0.5,0.2,0.4])
pe = two_gaussian_fit_EM(s, fix_mu=[1,0], p0=[-0.01,0.05,0.5,0.2,0.4])
hist(s, bins=40, normed=True)
x = np.r_[s.min()-1:s.max()+1:200j]
plot(x, a*normpdf(x,mu1,sig1), lw=2)
plot(x, (1-a)*normpdf(x,mu2,sig2), lw=2)
plot(x, two_gauss_mix_pdf(x, p0), lw=2)
axvline(m01, lw=2, color='k', alpha=0.3)
axvline(m02, lw=2, color='gray', alpha=0.3)
axvline(mu1, lw=2, ls='--', color='k', alpha=0.3)
axvline(mu2, lw=2, ls='--', color='gray', alpha=0.3)
axvline(mu1h, lw=2, ls='--', color='r', alpha=0.3)
axvline(mu2h, lw=2, ls='--', color='r', alpha=0.3)
def compare_two_gauss():
m01 = 0.
m02 = 0.5
s01 = 0.08
s02 = 0.15
alpha = 0.7
p_real = [m01, s01, m02, s02, alpha]
N = 1000
si1 = round(N*alpha)
si2 = round((1-alpha)*N)
p0 = [-0.01,0.05,0.6,0.2,0.4]
fix_mu = [0,0]
n = 500
PC, PH, PE = np.zeros((n,5)), np.zeros((n,5)), np.zeros((n,5))
for i in xrange(n):
s1 = R.normal(size=si1, loc=m01, scale=s01)
s2 = R.normal(size=si2, loc=m02, scale=s02)
s = np.r_[s1,s2]
pc = two_gaussian_fit_cdf(s, fix_mu=fix_mu, p0=p0)
ph = two_gaussian_fit_hist(s, fix_mu=fix_mu, p0=p0)
pe = two_gaussian_fit_EM(s, fix_mu=fix_mu, p0=p0)
PC[i], PH[i], PE[i] = pc, ph, pe
Label = ['Mu1', 'Sig1', 'Mu2', 'Sig2', 'Alpha']
ftype = 'png'
for i in range(5):
figure()
title(Label[i])
vmin = min([PC[:,i].min(), PH[:,i].min(), PE[:,i].min()])
vmax = max([PC[:,i].max(), PH[:,i].max(), PE[:,i].max()])
b = np.r_[vmin:vmax:80j]
if vmax == vmin: b = np.r_[vmin-.1:vmax+.1:200j]
hist(PC[:,i], bins=b, alpha=0.3, label='CDF')
hist(PH[:,i], bins=b, alpha=0.3, label='Hist')
hist(PE[:,i], bins=b, alpha=0.3, label='EM')
legend(loc='best')
axvline(p_real[i], color='k', lw=2)
#savefig('Two-gaussian Fit Comp - %s.png' % Label[i])
def gaussian2d_fit(sx, sy, guess=[0.5,1]):
"""2D-Gaussian fit of samples S using a fit to the empirical CDF."""
assert sx.size == sy.size
## Empirical CDF
ecdfx = [np.sort(sx), np.arange(0.5,sx.size+0.5)*1./sx.size]
ecdfy = [np.sort(sy), np.arange(0.5,sy.size+0.5)*1./sy.size]
## Analytical gaussian CDF
gauss_cdf = lambda x, mu, sigma: 0.5*(1+erf((x-mu)/(np.sqrt(2)*sigma)))
## Fitting the empirical CDF
fitfunc = lambda p, x: gauss_cdf(x, p[0], p[1])
errfunc = lambda p, x, y: fitfunc(p, x) - y
px,v = leastsq(errfunc, x0=guess, args=(ecdfx[0],ecdfx[1]))
py,v = leastsq(errfunc, x0=guess, args=(ecdfy[0],ecdfy[1]))
print("2D Gaussian CDF fit", px, py)
mux, sigmax = px[0], px[1]
muy, sigmay = py[0], py[1]
return mux, sigmax, muy, sigmay
def test_gaussian2d_fit():
mx0 = 0.1
my0 = 0.9
sigx0 = 0.4
sigy0 = 0.25
Size = 500
sx = R.normal(size=Size, loc=mx0, scale=sigx0)
sy = R.normal(size=Size, loc=my0, scale=sigy0)
mux, sigmax, muy, sigmay = gaussian2d_fit(sx, sy)
plot(sx, sy, 'o', alpha=0.2, mew=0)
X,Y = np.mgrid[sx.min()-1:sx.max()+1:200j, sy.min()-1:sy.max()+1:200j]
def gauss2d(X,Y, mx, my, sigx, sigy):
return np.exp(-((X-mx)**2)/(2*sigx**2))*np.exp(-((Y-my)**2)/(2*sigy**2))
contour(X,Y,gauss2d(X,Y,mux,muy,sigmax,sigmay))
plot(mx0,my0, 'ok', mew=0, ms=10)
plot(mux,muy, 'x', mew=2, ms=10, color='green')
def two_gaussian2d_fit(sx, sy, guess=[0.5,1]):
"""2D-Gaussian fit of samples S using a fit to the empirical CDF."""
## UNFINISHED (I have 2 alphas unp.sign the xy projections)
assert sx.size == sy.size
## Empirical CDF
ecdfx = [np.sort(sx), np.arange(0.5,sx.size+0.5)*1./sx.size]
ecdfy = [np.sort(sy), np.arange(0.5,sy.size+0.5)*1./sy.size]
## Analytical gaussian CDF
gauss_cdf = lambda x, mu, sigma: 0.5*(1+erf((x-mu)/(np.sqrt(2)*sigma)))
gauss2d_cdf = lambda X,Y,mx,sx,my,sy: gauss_cdf(X,mx,sx)*gauss_cdf(Y,my,sy)
two_cdf = lambda x, m1, s1, m2, s2, a:\
a*gauss_cdf(x,m1,s1)+(1-a)*gauss_cdf(x,m2,s2)
two2d_cdf = lambda X,Y, mx1, sx1, mx2, sx2, my1, sy1, my2, sy2, a:\
a*gauss2d_cdf(X,Y,mx1,sx1,my1,sy1)+(1-a)*gauss_cdf(X,Y,mx2,sx2,my2,sy2)
## Fitting the empirical CDF
fitfunc = lambda p, x: two_cdf(x, *p)
errfunc = lambda p, x, y: fitfunc(p, x) - y
fitfunc2d = lambda p, X,Y: two2d_cdf(X,Y, *p)
errfunc2d = lambda p, X,Y,Z: fitfunc2d(p, X,Y) - Z
px,v = leastsq(errfunc, x0=guess, args=(ecdfx[0],ecdfx[1]))
py,v = leastsq(errfunc, x0=guess, args=(ecdfy[0],ecdfy[1]))
print("2D Two-Gaussians CDF fit", px, py)
mux1, sigmax1, mux2, sigmax2, alphax = px
muy1, sigmay1, muy2, sigmay2, alphay = py
return mu1, sigma1, mu2, sigma2, alpha
def test_gaussian_fit():
m0 = 0.1
s0 = 0.4
size = 500
s = R.normal(size=size, loc=m0, scale=s0)
#s = s[s<0.4]
mu, sig = gaussian_fit(s)
mu1, sig1 = S.norm.fit(s)
mu2, sig2 = gaussian_fit_ml(s)
print("ECDF ", mu, sig)
print("ML ", mu1, sig1)
print("ML (manual)", mu2, sig2)
H = np.histogram(s, bins=20, density=True)
h = H[0]
bw = H[1][1] - H[1][0]
#bins_c = H[1][:-1]+0.5*bw
bar(H[1][:-1], H[0], bw, alpha=0.3)
x = np.r_[s.min()-1:s.max()+1:200j]
plot(x, normpdf(x,m0,s0), lw=2, color='grey')
plot(x, normpdf(x,mu,sig), lw=2, color='r', alpha=0.5)
plot(x, normpdf(x,mu1,sig1), lw=2, color='b', alpha=0.5)
if __name__ == '__main__':
#compare_two_gauss()
#test_gaussian2d_fit()
#test_gaussian_fit()
#show()
pass
|
chungjjang80/FRETBursts
|
fretbursts/fit/gaussian_fitting.py
|
Python
|
gpl-2.0
| 31,065
|
[
"Gaussian"
] |
3ce80be751d23ce2e8defa1055824ffaf5607fddda49bf82558d6c6fc6f095c9
|
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
Extensions to the scipy.linalg module
'''
import numpy
# Numpy/scipy does not seem to have a convenient interface for
# pivoted Cholesky factorization. Newer versions of scipy (>=1.4) provide
# access to the raw lapack function, which is wrapped around here.
# With older versions of scipy, we use our own implementation instead.
try:
from scipy.linalg.lapack import dpstrf as _dpstrf
except ImportError:
def _pivoted_cholesky_wrapper(A, tol, lower):
return pivoted_cholesky_python(A, tol=tol, lower=lower)
else:
def _pivoted_cholesky_wrapper(A, tol, lower):
N = A.shape[0]
assert(A.shape == (N, N))
L, piv, rank, info = _dpstrf(A, tol=tol, lower=lower)
if info < 0:
raise RuntimeError('Pivoted Cholesky factorization failed.')
if lower:
L[numpy.triu_indices(N, k=1)] = 0
L[:, rank:] = 0
else:
L[numpy.tril_indices(N, k=-1)] = 0
L[rank:, :] = 0
return L, piv-1, rank
def pivoted_cholesky(A, tol=-1.0, lower=False):
'''
Performs a Cholesky factorization of A with full pivoting.
A can be a (singular) positive semidefinite matrix.
P.T * A * P = L * L.T if lower is True
P.T * A * P = U.T * U if lower if False
Use regular Cholesky factorization for positive definite matrices instead.
Args:
A : the matrix to be factorized
tol : the stopping tolerance (see LAPACK documentation for dpstrf)
lower : return lower triangular matrix L if true
return upper triangular matrix U if false
Returns:
the factor L or U, the pivot vector (starting with 0), the rank
'''
return _pivoted_cholesky_wrapper(A, tol=tol, lower=lower)
def pivoted_cholesky_python(A, tol=-1.0, lower=False):
'''
Pedestrian implementation of Cholesky factorization with full column pivoting.
The LAPACK version should be used instead whenever possible!
Args:
A : the positive semidefinite matrix to be factorized
tol : stopping tolerance
lower : return the lower or upper diagonal factorization
Returns:
the factor, the permutation vector, the rank
'''
N = A.shape[0]
assert(A.shape == (N, N))
D = numpy.diag(A).copy()
if tol < 0:
machine_epsilon = numpy.finfo(numpy.float).eps
tol = N * machine_epsilon * numpy.amax(numpy.diag(A))
L = numpy.zeros((N, N))
piv = numpy.arange(N)
rank = 0
for k in range(N):
s = k + numpy.argmax(D[k:])
piv[k], piv[s] = piv[s], piv[k]
D[k], D[s] = D[s], D[k]
L[[k, s], :] = L[[s, k], :]
if D[k] <= tol:
break
rank += 1
L[k, k] = numpy.sqrt(D[k])
L[k+1:, k] = (A[piv[k+1:], piv[k]] - numpy.dot(L[k+1:, :k], L[k, :k])) / L[k, k]
D[k+1:] -= L[k+1:, k] ** 2
if lower:
return L, piv, rank
else:
return L.T, piv, rank
|
sunqm/pyscf
|
pyscf/lib/scipy_helper.py
|
Python
|
apache-2.0
| 3,604
|
[
"PySCF"
] |
bd3f73ad672a1f780dfdf4d445d9a309b6c2d2b5922329f943e9c84adbadb99a
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import vtk
import chigger
EDGE_COLOR = [0.5]*3
def frames():
camera = vtk.vtkCamera()
camera.SetViewUp(0.0000000000, 1.0000000000, 0.0000000000)
camera.SetPosition(0.1520000000, 0.0128500000, 0.3276535154)
camera.SetFocalPoint(0.1520000000, 0.0128500000, 0.0000000000)
reader = chigger.exodus.ExodusReader('step7d_adapt_blocks_out.e')
temp = chigger.exodus.ExodusResult(reader,
camera=camera,
variable='temperature',
range=[300, 350],
edges=True, edge_color=EDGE_COLOR,
cmap='plasma')
cbar = chigger.exodus.ExodusColorBar(temp,
length=0.6,
viewport=[0,0,1,1],
colorbar_origin=[0.2, 0.7],
location='top')
cbar.setOptions('primary', title='Temperature (K)', font_size=48, font_color=[0,0,0])
time = chigger.annotations.TextAnnotation(position=[0.5,0.2], font_size=48, text_color=[0,0,0],
justification='center')
window = chigger.RenderWindow(temp, cbar, time, size=[1920, 1080], motion_factor=0.1, background=[1,1,1])
for i, t in enumerate(reader.getTimes()):
reader.setOptions(timestep=i)
reader.setOptions(timestep=i)
reader.setOptions(timestep=i)
time.setOptions(text='Time = {:.1f} sec.'.format(t))
filename = 'output/step07d_{:05d}.png'.format(i)
window.write(filename)
window.start()
def movie():
chigger.utils.img2mov('output/step07d_*.png', 'step07d_result.mp4',
duration=20, num_threads=6)
if __name__ == '__main__':
if not os.path.isdir('output'):
os.mkdir('output')
#frames()
movie()
|
nuclear-wizard/moose
|
tutorials/darcy_thermo_mech/step07_adaptivity/problems/step7d.py
|
Python
|
lgpl-2.1
| 2,298
|
[
"MOOSE",
"VTK"
] |
e487fcd737e7ea714bf7e2a936e722e1ca0427f8431da474a1e5eb1cc7dfcc99
|
import os
import re
import logging
log = logging.getLogger(__name__)
import elements
def get_language(tag):
"""
Helper function for extracting code highlight language from tag attributes.
"""
# Map from lower case hljs name to latex listings name
languages = {'c++':'C++', 'python':'Python'}
lang = ''
if 'class' in tag.attrs and 'hljs' in tag.attrs['class']:
idx = tag.attrs['class'].index('hljs') + 1
lang = tag.attrs['class'][idx]
if lang.lower() in languages:
lang = languages[lang.lower()]
else:
lang = ''
return lang
def escape(content):
"""
Escape special latex characters.
"""
map = dict()
map['_'] = '\\_'
map['{'] = '\\{'
map['}'] = '\\}'
map['$'] = '\\$'
map['&'] = '\\&'
map['%'] = '\\%'
map['\\'] = '\\textbackslash '
map['~'] = '\\textasciitilde '
map['^'] = '\\textasciicircum '
def sub(match):
return map[match.group(1)]
return re.sub(r'([_{}$\\%&~^])', sub, content)
def admonition_preamble():
"""
Returns commands to create admonition in latex.
"""
out = ['\\usepackage{xparse}']
out += ['\\usepackage{tabularx}']
out += ['\\usepackage[table]{xcolor}']
out += ['\\definecolor{code-background}{HTML}{ECF0F1}']
out += ['\\definecolor{info-title}{HTML}{528452} \\definecolor{info}{HTML}{82E0AA}']
out += ['\\definecolor{note-title}{HTML}{3A7296} \\definecolor{note}{HTML}{85C1E9}']
out += ['\\definecolor{important-title}{HTML}{B100B0} \\definecolor{important}{HTML}{FF00FF}']
out += ['\\definecolor{warning-title}{HTML}{968B2B} \\definecolor{warning}{HTML}{FFEC46}']
out += ['\\definecolor{danger-title}{HTML}{B14D00} \\definecolor{danger}{HTML}{F75E1D}']
out += ['\\definecolor{error-title}{HTML}{940000} \\definecolor{error}{HTML}{FFB4B4}']
cmd = '\\DeclareDocumentCommand{\\admonition}{O{warning-title}O{warning}mm}\n'
cmd += '{\n'
cmd += ' \\rowcolors{1}{#1}{#2}\n'
cmd += ' \\renewcommand{\\arraystretch}{1.5}\n'
cmd += ' \\begin{tabularx}{\\textwidth}{X}\n'
cmd += ' \\textcolor[rgb]{1,1,1}{\\textbf{#3}} \\\\ #4\n'
cmd += ' \\end{tabularx}\n'
cmd += ' \\rowcolors{1}{white}{white}\n'
cmd += '}\n'
return out + [cmd]
def listings_settings():
out = ['\\usepackage[table]{xcolor}']
out += ['\\definecolor{code-background}{HTML}{ECF0F1}']
out += ['\\lstset{basicstyle=\\footnotesize\\rmfamily, breaklines=true, backgroundcolor=\\color{code-background}}']
return out
class moose_table(elements.BlockElement):
"""
Builds table fitted to the width of the document.
"""
name = 'table'
def convert(self, tag, content):
tr = tag.tr
td = tr.find_all('th')
frmt = ['l']*len(td)
frmt[-1] = 'X'
return '\\begin{tabularx}{\linewidth}{%s}%s\\end{tabularx}' % (''.join(frmt), content)
def preamble(self):
return ['\\usepackage{tabularx}']
class admonition_div(elements.BlockElement):
"""
Create an admonition in latex, assumes that the \admonition command is defined
in the latex preamble.
"""
name = 'div'
attrs = ['class']
def test(self, tag):
return super(admonition_div, self).test(tag) and 'admonition' in tag.attrs['class']
def convert(self, tag, content):
atype = tag.attrs['class'][tag.attrs['class'].index('admonition')+1]
title = self.content(tag.contents[1])
#Message is optional
if len(tag.contents) < 4:
msg = ''
else:
msg = self.content(tag.contents[3])
return '\\admonition[%s-title][%s]{%s}{%s}' % (atype, atype, title, msg)
def preamble(self):
return admonition_preamble()
class moose_hide_hr(elements.hr):
"""
Hides horizontal hr tags in latex.
"""
def convert(self, tag, content):
return ''
class moose_inline_code(elements.InlineElement):
"""
Improved inline code that wraps lines and escapes latex special commands.
"""
name = 'code'
def convert(self, tag, content):
return '\\textrm{%s}' % escape(content)
class moose_pre(elements.pre):
"""
Uses listing package rather than verbatim for code.
"""
def convert(self, tag, content):
lang = get_language(tag.contents[0])
return '\\begin{lstlisting}[language=%s]\n%s\\end{lstlisting}' % (lang, content)
def preamble(self):
return ['\\usepackage{listings}'] + listings_settings()
class moose_pre_code(elements.pre_code):
"""
Handles the code environments that have the filename as a heading.
"""
def convert(self, tag, content):
lang = get_language(tag.contents[0])
return '\\begin{lstlisting}[language=%s]\n%s\\end{lstlisting}' % (lang, self.content(tag.contents[0]))
def preamble(self):
return ['\\usepackage{listings}'] + listings_settings()
class moose_code_div(elements.BlockElement):
"""
Create a listing block for code blocks generated by MooseMarkdown.
"""
name = 'div'
attrs = ['class']
def __init__(self):
super(moose_code_div, self).__init__()
def test(self, tag):
return super(moose_code_div, self).test(tag) and 'moosedocs-code-div' in tag.attrs['class']
def convert(self, tag, content):
# Locate the code
for code in tag.descendants:
if code.name == 'code':
break
# Determine the language and include the listings conversion from html to listing names
lang = get_language(code)
return '\\begin{lstlisting}[caption=%s, language=%s]\n%s\\end{lstlisting}' % (tag.contents[0], lang, self.content(code))
def preamble(self):
out = '\\usepackage{caption}\n'
out += '\\DeclareCaptionFormat{listing}{#1#2#3}\n'
out += '\\captionsetup[lstlisting]{format=listing, singlelinecheck=false, margin=0pt, font={sf}}'
return ['\\usepackage{listings}', out]
class moose_internal_links(elements.a):
"""
Create section-based hyper links
"""
def test(self, tag):
return super(moose_internal_links, self).test(tag) and tag['href'].startswith('#')
def convert(self, tag, content):
return '\\hyperref[sec:%s]{%s}' % (tag['href'][1:], content)
def preamble(self):
return ['\\usepackage{hyperref}']
class moose_markdown_links(elements.a):
"""
<a> tag that links to website if markdown file is provided.
"""
def __init__(self, site=None):
self._site = site
self._path = None # when testing the path is determined and used in
def test(self, tag):
"""
Test if the <a> tag contains a .md file include handling # for page section links.
"""
if super(moose_markdown_links, self).test(tag):
if tag['href'].endswith('.md'):
self._path = tag['href'][:-3].strip('/')
return True
elif '.md#' in tag['href']:
self._path = tag['href'].replace('.md', '/')
return True
self._path = None
return False
def convert(self, tag, content):
url = '{}/{}'.format(self._site, self._path)
return '\\href{%s}{%s}' % (url, content)
def preamble(self):
return ['\\usepackage{hyperref}']
class moose_img(elements.img):
"""
Handles images with MOOSE markdown by doing some extra work to make sure path is correct.
"""
def convert(self, tag, content):
path = tag.attrs['src']
if not os.path.exists(path):
lpath = path.strip('/')
if os.path.exists(lpath):
path = lpath
if not os.path.exists(path):
log.error('Image file does not exist: {}'.format(path))
path = os.path.abspath(path)
width = tag.attrs.get('width', '\linewidth')
return "\\begin{center}\n\\includegraphics[width=%s]{%s}\n\\end{center}" % (width, path)
def preamble(self):
return ['\\usepackage{graphicx}']
class moose_bib(elements.ol):
"""
Convert html bibliography (from MooseBibtex) to the correct latex entry.
"""
attrs = ['data-moose-bibfiles']
def convert(self, tag, content):
bibfiles = eval(tag.attrs['data-moose-bibfiles'])
return '\\bibliographystyle{unsrtnat}\n\\bibliography{%s}' % ','.join(bibfiles)
def preamble(self):
return ['\\usepackage{natbib}']
class moose_bib_span(elements.span):
"""
Convert the cite command from MooseBibtex to the proper latex cite command.
"""
attrs = ['data-moose-cite']
def convert(self, tag, content):
return tag.attrs['data-moose-cite']
class moose_slider(elements.BlockElement):
"""
Produces error for unsupported syntax for PDF creation.
"""
name = 'div'
attrs = ['class']
def test(self, tag):
return super(moose_slider, self).test(tag) and 'slider' in tag.attrs['class']
def convert(self, tag, content):
log.warning("!slideshow markdown is not currently supported for latex/pdf output.")
return '\\admonition[error-title][error]{ERROR: Un-supported Markdown!}{MOOSE Slider is not currently supported for pdf output.}'
def preamble(self):
return admonition_preamble()
class moose_buildstatus(elements.BlockElement):
"""
Produces error for unsupported syntax for PDF creation.
"""
name = 'div'
attrs = ['class']
def test(self, tag):
return super(moose_buildstatus, self).test(tag) and 'moose-buildstatus' in tag.attrs['class']
def convert(self, tag, content):
log.warning("!buildstatus markdown is not currently supported for latex/pdf output.")
return '\\admonition[error-title][error]{ERROR: Un-supported Markdown!}{MOOSE build status (!buildstatus) is not currently supported for pdf output.}'
def preamble(self):
return admonition_preamble()
class moose_diagram(elements.BlockElement):
"""
Produce and error for unsupported syntax for diagrams.
@TODO: graphviz needs to support png output, the one in the MOOSE package does not.
"""
name = 'img'
attrs = ['class']
def test(self, tag):
return super(moose_diagram, self).test(tag) and 'moose-diagram' in tag.attrs['class']
def convert(self, tag, content):
log.warning("Dot diagram markdown syntax is not currently supported for latex/pdf output.")
return '\\admonition[error-title][error]{ERROR: Un-supported Markdown!}{Dot diagram markdown syntax is not currently supported for latex/pdf output.}'
def preamble(self):
return admonition_preamble()
|
paulthulstrup/moose
|
python/MooseDocs/html2latex/moose_elements.py
|
Python
|
lgpl-2.1
| 10,005
|
[
"MOOSE"
] |
7739e8c0386490b587b88cad3e4ec2c794f3b25795c03bed1a0079ece7285fbf
|
#!/usr/bin/env python
#
#@BEGIN LICENSE
#
# PSI4: an ab initio quantum chemistry software package
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#@END LICENSE
#
from __future__ import absolute_import
from __future__ import print_function
import sys
import math
import os
import re
import importlib
import collections
qcdbpkg_path = os.path.dirname(__file__)
sys.path.append(qcdbpkg_path + '/../')
import qcdb
import qcdb.basislist
from qcdb.exceptions import *
sys.path.append(qcdbpkg_path + '/../databases')
# load docstring info from database files (doesn't actually import database modules)
DBdocstrings = qcdb.dictify_database_docstrings()
# instructions
print("""
Welcome to imake-db.
Just fill in the variables when prompted.
Hit ENTER to accept default.
Strings should not be in quotes.
Elements in arrays should be space-delimited.
Nothing is case sensitive.
""")
# query database name
module_choices = dict(zip([x.upper() for x in DBdocstrings.keys()], DBdocstrings.keys()))
print('\n Choose your database.')
for item in module_choices.keys():
print(""" %-12s %s""" % ('[' + module_choices[item] + ']', DBdocstrings[module_choices[item]]['general'][0].lstrip(' |')))
print('\n')
user_obedient = False
while not user_obedient:
temp = raw_input(' dbse = ').strip()
if temp.upper() in module_choices.keys():
db_name = module_choices[temp.upper()]
user_obedient = True
# query database subset
subset_choices = dict(zip([x.upper() for x in DBdocstrings[db_name]['subset'].keys()], DBdocstrings[db_name]['subset'].keys()))
print('\n Choose your subset (multiple allowed).')
for key, val in DBdocstrings[db_name]['subset'].items():
print(""" %-12s %s""" % ('[' + key + ']', val))
print('\n')
subset = []
user_obedient = False
while not user_obedient:
temp = raw_input(' subset [all] = ').strip()
ltemp = temp.split()
if temp == "":
user_obedient = True
for item in ltemp:
if item.upper() in subset_choices.keys():
subset.append(subset_choices[item.upper()])
user_obedient = True
else:
user_obedient = False
subset = []
break
# query qc program
print("""
Choose your quantum chemistry program.
[qchem]
[molpro] writes Molpro input files
[molpro2] writes Molpro input files
[psi4] writes Psi4 input files
#[nwchem]
#[xyz] writes basic xyz files only
""")
user_obedient = False
while not user_obedient:
temp = raw_input(' qcprog = ').strip()
if temp.lower() in ['molpro', 'psi4', 'molpro2', 'qchem']:
qcprog = temp.lower()
user_obedient = True
# Load module for QC program
try:
qcmod = importlib.import_module('qcdb.' + qcprog)
except ImportError:
print('\nPython module for QC program %s failed to load\n\n' % (qcprog))
print('\nSearch path that was tried:\n')
print(", ".join(map(str, sys.path)))
raise ValidationError("Python module loading problem for QC program " + str(qcprog))
else:
print(qcmod)
qcmtdIN = qcmod.qcmtdIN
# query quantum chemical method(s)
method_choices = dict(zip([x.upper() for x in qcmtdIN.keys()], qcmtdIN.keys()))
print('\n Choose your quantum chemical methods (multiple allowed).')
for key, val in qcmtdIN.items():
print(""" %-12s""" % ('[' + key + ']'))
print('\n')
methods = []
user_obedient = False
while not user_obedient:
temp = raw_input(' methods = ').strip()
ltemp = temp.split()
for item in ltemp:
if item.upper() in method_choices:
methods.append(method_choices[item.upper()])
user_obedient = True
else:
user_obedient = False
methods = []
break
# query basis set(s)
print("""
Choose your basis set (multiple allowed).
e.g., aug-cc-pvdz or 6-31+G* or cc-pvtz may-cc-pvtz aug-cc-pvtz
""")
bases = []
user_obedient = False
while not user_obedient:
temp = raw_input(' bases = ').strip()
ltemp = temp.split()
for item in ltemp:
btemp = qcdb.basislist.corresponding_basis(item, role='BASIS')
if btemp:
bases.append(btemp)
user_obedient = True
else:
print(' Basis set %s not recognized.' % (item))
proceed = qcdb.query_yes_no(' Proceed anyway? =', False)
if proceed:
bases.append(item)
user_obedient = True
else:
bases = []
user_obedient = False
break
# query castup preference
print("""
Do cast up from smaller basis set?
""")
user_obedient = False
while not user_obedient:
castup = qcdb.query_yes_no(' castup [F] = ', False)
user_obedient = True
# below, options['SCF']['BASIS_GUESS']['value'] = castup
# query directory prefix
print("""
State your destination directory prefix.
""")
user_obedient = False
while not user_obedient:
temp = raw_input(' dirprefix [try] = ').strip()
if temp == "":
dirprefix = 'try'
user_obedient = True
if temp.isalnum():
dirprefix = temp
user_obedient = True
# query memory
print("""
Choose your memory usage in MB.
""")
user_obedient = False
while not user_obedient:
temp = raw_input(' memory [1600] = ').strip()
if temp == "":
memory = 1600
user_obedient = True
if temp.isdigit():
memory = int(temp)
user_obedient = True
# Load module for requested database
try:
database = __import__(db_name)
except ImportError:
print('\nPython module for database %s failed to load\n\n' % (db_name))
print('\nSearch path that was tried:\n')
print(", ".join(map(str, sys.path)))
raise ValidationError("Python module loading problem for database " + str(db_name))
else:
dbse = database.dbse
HRXN = database.HRXN
ACTV = database.ACTV
RXNM = database.RXNM
BIND = database.BIND
TAGL = database.TAGL
GEOS = database.GEOS
try:
DATA = database.DATA
except AttributeError:
DATA = {}
print("""
<<< SCANNED SETTINGS SCANNED SETTINGS SCANNED SETTINGS SCANNED SETTINGS >>>
dbse = %s
subset = %s
qcprog = %s
methods = %s
bases = %s
dirprefix = %s
memory [MB] = %d
usesymm =
cast up = %s
<<< SCANNED SETTINGS DISREGARD RESULTS IF INAPPROPRIATE SCANNED SETTINGS >>>
""" % (dbse, subset, qcprog, methods, bases, dirprefix, memory, castup))
# establish multiplicity hash table
mult = {
1: "singlet",
2: "doublet",
3: "triplet",
4: "quartet",
5: "quintet",
6: "sextet",
7: "septet",
8: "octet"}
# file extension
fext = 'xyz' if qcprog == 'xyz' else 'in'
# merge and condense HRXN from subset
if len(subset) == 0:
pass
else:
temp = []
for item in subset:
if item == 'small':
temp.append(database.HRXN_SM)
elif item == 'large':
temp.append(database.HRXN_LG)
elif item == 'equilibrium':
temp.append(database.HRXN_EQ)
else:
try:
temp.append(getattr(database, item))
except AttributeError:
try:
temp.append(getattr(database, 'HRXN_' + item))
except AttributeError:
raise ValidationError('Special subset \'%s\' not available for database %s.' % (item, db_name))
HRXN = qcdb.drop_duplicates(temp)
# assemble reagent list from reaction list
temp = []
for rxn in HRXN:
temp.append(database.ACTV['%s-%s' % (dbse, rxn)])
try:
temp.append(database.ACTV_CP['%s-%s' % (dbse, rxn)])
except AttributeError:
pass
HSYS = qcdb.drop_duplicates(temp)
# commence the file-writing loop
tdir = '-'.join([dirprefix, dbse, qcprog])
try:
os.mkdir(tdir)
except OSError:
print('Warning: directory %s already present.' % (tdir))
for basis in bases:
# below, options['GLOBALS']['BASIS']['value'] = basis
basdir = qcdb.basislist.sanitize_basisname(basis)
basdir = re.sub('-', '', basdir)
for method in methods:
mtddir = qcdb.basislist.sanitize_basisname(method).upper()
mtddir = re.sub('-', '', mtddir)
subdir = '-'.join([basdir, mtddir])
try:
os.mkdir(tdir + '/' + subdir)
except OSError:
print('Warning: directory %s/%s already present.' % (tdir, subdir))
# TODO: forcing c1 symm skipped - still needed for xdm and molpro
for system in HSYS:
# set up options dict
options = collections.defaultdict(lambda: collections.defaultdict(dict))
options['GLOBALS']['BASIS']['value'] = basis
options['SCF']['BASIS_GUESS']['value'] = castup
# QC program may reorient but at least input file geometry will match database
GEOS[system].fix_orientation(True)
GEOS[system].PYmove_to_com = False
GEOS[system].tagline = 'index %s label %s' % (system, TAGL[system])
GEOS[system].update_geometry()
dertype = 0
try:
if qcprog == 'molpro':
infile = qcmod.MolproIn(memory, method, basis, GEOS[system], system, castup).format_infile_string()
elif qcprog in ['psi4', 'molpro2', 'qchem']:
infile = qcmod.Infile(memory, GEOS[system], method, dertype, options).format_infile_string()
except FragmentCountError:
pass
# We're passing ACTV rgt list for SAPT methods so this error is to be expected
else:
sfile = tdir + '/' + subdir + '/' + system + '.' + fext
with open(sfile, 'w') as handle:
handle.write(infile)
|
loriab/qcdb
|
bin/imake-DB.py
|
Python
|
lgpl-3.0
| 10,650
|
[
"Molpro",
"NWChem",
"Psi4"
] |
f1dc975e5c53ec3a34b7bdb0b3872af84f29585d00491ddba43475ce55fe3004
|
#!/usr/bin/python
import pysam
import string
import argparse
# The MIT License (MIT)
# Copyright (c) [2014] [Peter Hickey (peter.hickey@gmail.com)]
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
### Program description ###
############################################################################################################################################################################################
# Convert the ADS, ADS-adipose or ADS-iPSC MethylC-Seq mapped reads files from the Lister et al. 2011 (Nature) paper (downloaded from http://neomorph.salk.edu/ips_methylomes/data.html on 08/05/2012) to BAM format.
############################################################################################################################################################################################
### TODOs ###
############################################################################################################################################################################################
# TODO: Might add MD and NM tags with samtools calmd
############################################################################################################################################################################################
### INPUT FILE FORMAT ###
############################################################################################################################################################################################
## assembly = chromosome name (numeric, hg18)
## strand = strand for which the read is informative ("+" = OT, "-" = OB)
## start = start of read1 (0-based position)
## end = end of read2 (1-based), i.e. intervals are of the form (start, stop] = {start + 1, start + 2, ..., stop}
## sequenceA = sequence of read1 in left-to-right-Watson-strand orientation. Sequence complemented if strand = "-"
## sequenceB = sequence of read2 in left-to-right-Watson-strand orientation. Sequence complemented if strand = "-"
## id = read-ID
# NB: start < end by definition
############################################################################################################################################################################################
### Command line passer ###
############################################################################################################################################################################################
parser = argparse.ArgumentParser(description='Convert Lister-style alignment files of MethylC-Seq data to BAM format.')
parser.add_argument('infile', metavar = 'infile',
help='The filename of the Lister-style file that is to be converted to BAM format')
parser.add_argument('outfile', metavar = 'out.bam',
help='The path to the new SAM/BAM file.')
parser.add_argument('ref_index', metavar = 'reference.fa.fai',
help='The path to the index (.fai file) of reference genome FASTA file.')
args = parser.parse_args()
############################################################################################################################################################################################
### Function definitions ###
# All functions return a 2-tuple - the first element for readL (the leftmost read, regardless of strand) and the second element for readR (the rightmost read, regardless of strand)
# If single-end data then only the first element should be used and the second element is set to None
#############################################################################################################################################################################################
def makeRNAME(assembly, sequenceB):
rname = ''.join(['chr', assembly])
if sequenceB != '':
rnameL = rname
rnameR = rname
return rnameL, rnameR
else:
return rname, None
def makeQNAME(RNAMEL, ID, sequenceB):
qname = '_'.join([RNAMEL, ID])
if sequenceB != '':
qnameL = qname
qnameR = qname
return qnameL, qnameR
else:
return qname, None
def makePOS(start, end, sequenceA, sequenceB):
if sequenceB != '': # Read is paired-end
startL = int(start) # 0-based leftmost mapping position
startR = int(end) - len(sequenceB) # 0-based leftmost mapping position
return startL, startR
else:
start = int(start) # 0-based leftmost mapping position
return start, None
def makeFLAG(sequenceA, sequenceB, strand):
if sequenceB != '': # Read is paired-end in-sequencing
flagL = 0x0 # Flag value for read1 in a paired-end read
flagR = 0x0 # Flag value for read2 in a paired-end read
flagL += 0x01 # Paired-end read
flagR += 0x01 # Paired-end read
flagL += 0x02 # Flag is properly-paired according to the aligner (forcing to be true)
flagR += 0x02 # Flag is properly-paired according to the aligner (forcing to be true)
if strand == '+':
flagL += 0x20 # Seq of readR is reverse-complemented
flagR += 0x10 # Seq of readR is reverse-complemented
flagL += 0x40 # Leftmost read is read1
flagR += 0x80 # Rightmost read is read2
elif strand == '-':
flagL += 0x20 # Seq of read1 is reverse-complemented
flagR += 0x10 # Seq of read1 is reverse-complemented
flagR += 0x40 # Rightmost read is read1
flagL += 0x80 # Leftmost read is read2
return flagL, flagR
else: # Read is single-end
flag = 0x0
if strand == '-':
flag += 0x10
return flag, None
def makeMAPQ(sequenceB):
if sequenceB != '':
return 255, 255 # No mapping information available
else:
return 255, None # No mapping information available
def makeCIGAR(sequenceA, sequenceB):
if sequenceB != '':
cigarL = [(0, len(sequenceA))]
cigarR = [(0, len(sequenceB))]
return cigarL, cigarR
else:
cigar = [(0, len(sequenceA))]
return cigar, None
def makeRNEXT(RNAMEL, RNAMER):
if RNAMER is not None:
return RNAMER, RNAMEL
else:
return '*', None
def makePNEXT(startL, startR):
if startR is not None:
return startR, startL
else:
return 0, None
def makeTLEN(start, end, sequenceB):
if sequenceB != '': # Paired-end read
abs_tlen = int(end) - int(start) # absolute value of TLEN
return abs_tlen, -abs_tlen
else:
return 0, None
def makeSEQ(sequenceA, sequenceB, strand):
if strand == '+':
seqL = sequenceA
seqR = sequenceB
elif strand == '-':
seqL = DNAComplement(sequenceA)
seqR = DNAComplement(sequenceB)
if sequenceB != '':
return seqL, seqR
else:
return seqL, None
def DNAComplement(strand):
return strand.translate(string.maketrans('TAGCNtagcn', 'ATCGNATCGN'))
def makeQUAL(sequenceA, sequenceB):
qualL = 'E' * len(sequenceA)
qualR = 'E' * len(sequenceB)
if sequenceB != '':
return qualL, qualR
else:
return qualL, None
def makeXG(sequenceB, strand):
if strand == '+':
XG = 'CT'
elif strand == '-':
XG = 'GA'
if sequenceB != '':
XGL = ('XG', XG)
XGR = ('XG', XG)
return XGL, XGR
else:
return ('XG', XG), None
def createHeader():
FAIDX = open(args.ref_index, 'r')
faidx = FAIDX.read().rstrip().rsplit('\n')
hd = {'VN': '1.0', 'SO': 'unsorted'}
sq = []
for i in range(0, len(faidx)):
line = faidx[i].rsplit('\t')
sq.append({'LN': int(line[1]), 'SN': line[0], 'AS': 'hg18+lambda_phage'})
pgid = 'Lister_style_6_to_bam.py'
vn = '1.0'
cl = ' '.join([pgid, args.infile, args.outfile, args.ref_index])
pg = [{'ID': pgid, 'VN': vn, 'CL': cl}]
header = {'HD': hd, 'SQ': sq, 'PG': pg}
FAIDX.close()
return header
#############################################################################################################################################################################################
### Open files ###
############################################################################################################################################################################################
INFILE = open(args.infile, 'r')
header = createHeader()
BAM = pysam.Samfile(args.outfile, 'wb', header = header)
############################################################################################################################################################################################
### The main loop ###
############################################################################################################################################################################################
# Loop over methylC_seq_reads files file-by-file (i.e. chromosome-by-chromosome)
print 'Input file is', args.infile
linecounter = 1
for line in INFILE: # Loop over the file line-by-line and convert to an AlignedRead instance
if linecounter == 1: # Skip the header line
linecounter +=1
continue
line = line.rstrip('\n').rsplit('\t')
# Fields of the Lister-style file
assembly = line[0]
strand = line[1]
start = line[2]
end = line[3]
sequenceA = line[4]
sequenceB = line[5]
ID = line[6]
# Make the SAM/BAM fields
RNAMEL, RNAMER = makeRNAME(assembly, sequenceB)
QNAMEL, QNAMER = makeQNAME(RNAMEL, ID, sequenceB)
FLAGL, FLAGR = makeFLAG(sequenceA, sequenceB, strand)
POSL, POSR = makePOS(start, end, sequenceA, sequenceB)
MAPQL, MAPQR = makeMAPQ(sequenceB)
CIGARL, CIGARR = makeCIGAR(sequenceA, sequenceB)
RNEXTL, RNEXTR = makeRNEXT(RNAMEL, RNAMER)
PNEXTL, PNEXTR = makePNEXT(POSL, POSR)
TLENL, TLENR = makeTLEN(start, end, sequenceB)
SEQL, SEQR = makeSEQ(sequenceA, sequenceB, strand)
QUALL, QUALR = makeQUAL(sequenceA, sequenceB)
XGL, XGR = makeXG(sequenceB, strand)
if sequenceA == '':
print 'WARNING: Empty sequenceA at line', linecounter, 'in file', args.infile
print line
if sequenceB == '':
print 'WARNING: Empty sequenceB at line', linecounter, 'in file', args.infile
print line
# Paired-end: using readL/readR notation, thus for the Lister protocol a OT-strand readL=read1 and readR=read2 whereas for OB-strand readL=read2 and readR=read1
readL = pysam.AlignedRead()
readR = pysam.AlignedRead()
readL.rname = BAM.gettid(RNAMEL)
readR.rname = BAM.gettid(RNAMER)
readL.qname = QNAMEL
readR.qname = QNAMER
readL.flag = FLAGL
readR.flag = FLAGR
readL.pos = POSL
readR.pos = POSR
readL.mapq = MAPQL
readR.mapq = MAPQR
readL.cigar = CIGARL
readR.cigar = CIGARR
readL.rnext = BAM.gettid(RNEXTL)
readR.rnext = BAM.gettid(RNEXTR)
readL.pnext = PNEXTL
readR.pnext = PNEXTR
readL.tlen = TLENL
readR.tlen = TLENR
readL.seq = SEQL
readR.seq = SEQR
readL.qual = QUALL
readR.qual = QUALR
readL.tags = readL.tags + [XGL]
readR.tags = readR.tags + [XGL]
if not readL.is_paired:
if readL.opt('XG') == 'CT':
readL.tags = readL.tags + [('XR', 'CT')]
elif readL.opt('XG') == 'GA':
readL.tags = readL.tags + [('XR', 'CT')]
elif readL.is_paired:
if readL.opt('XG') == 'CT' and readL.is_readL1:
readL.tags = readL.tags + [('XR', 'CT')]
elif readL.opt('XG') == 'CT' and readL.is_readL2:
readL.tags = readL.tags + [('XR', 'GA')]
elif readL.opt('XG') == 'GA' and readL.is_readL1:
readL.tags = readL.tags + [('XR', 'CT')]
elif readL.opt('XG') == 'GA' and readL.is_readL2:
readL.tags = readL.tags + [('XR', 'GA')]
if not readR.is_paired:
if readR.opt('XG') == 'CT':
readR.tags = readR.tags + [('XR', 'CT')]
elif readR.opt('XG') == 'GA':
readR.tags = readR.tags + [('XR', 'CT')]
elif readR.is_paired:
if readR.opt('XG') == 'CT' and readR.is_readR1:
readR.tags = readR.tags + [('XR', 'CT')]
elif readR.opt('XG') == 'CT' and readR.is_readR2:
readR.tags = readR.tags + [('XR', 'GA')]
elif readR.opt('XG') == 'GA' and readR.is_readR1:
readR.tags = readR.tags + [('XR', 'CT')]
elif readR.opt('XG') == 'GA' and readR.is_readR2:
readR.tags = readR.tags + [('XR', 'GA')]
BAM.write(readL)
BAM.write(readR)
linecounter += 1
############################################################################################################################################################################################
### Close files
############################################################################################################################################################################################
INFILE.close()
BAM.close()
############################################################################################################################################################################################
|
PeteHaitch/Lister2BAM
|
Python/Lister_style_6_to_bam.py
|
Python
|
mit
| 14,247
|
[
"pysam"
] |
2a7eea413006638e028a988fe06898ca316a446a848c33abe2bc25d7db44bb61
|
from os.path import basename
from os.path import join
from os.path import dirname
from os import sep
from ..util import PathHelper
COMMAND_VERSION_FILENAME = "COMMAND_VERSION"
class ClientJobDescription(object):
""" A description of how client views job - command_line, inputs, etc..
**Parameters**
command_line : str
The local command line to execute, this will be rewritten for
the remote server.
config_files : list
List of Galaxy 'configfile's produced for this job. These will
be rewritten and sent to remote server.
input_files : list
List of input files used by job. These will be transferred and
references rewritten.
client_outputs : ClientOutputs
Description of outputs produced by job (at least output files along
with optional version string and working directory outputs.
tool_dir : str
Directory containing tool to execute (if a wrapper is used, it will
be transferred to remote server).
working_directory : str
Local path created by Galaxy for running this job.
dependencies_description : list
galaxy.tools.deps.dependencies.DependencyDescription object describing
tool dependency context for remote depenency resolution.
env: list
List of dict object describing environment variables to populate.
version_file : str
Path to version file expected on the client server
arbitrary_files : dict()
Additional non-input, non-tool, non-config, non-working directory files
to transfer before staging job. This is most likely data indices but
can be anything. For now these are copied into staging working
directory but this will be reworked to find a better, more robust
location.
rewrite_paths : boolean
Indicates whether paths should be rewritten in job inputs (command_line
and config files) while staging files).
"""
def __init__(
self,
tool,
command_line,
config_files,
input_files,
client_outputs,
working_directory,
dependencies_description=None,
env=[],
arbitrary_files=None,
rewrite_paths=True,
):
self.tool = tool
self.command_line = command_line
self.config_files = config_files
self.input_files = input_files
self.client_outputs = client_outputs
self.working_directory = working_directory
self.dependencies_description = dependencies_description
self.env = env
self.rewrite_paths = rewrite_paths
self.arbitrary_files = arbitrary_files or {}
@property
def output_files(self):
return self.client_outputs.output_files
@property
def version_file(self):
return self.client_outputs.version_file
@property
def tool_dependencies(self):
if not self.remote_dependency_resolution:
return None
return dict(
requirements=(self.tool.requirements or []),
installed_tool_dependencies=(self.tool.installed_tool_dependencies or [])
)
class ClientOutputs(object):
""" Abstraction describing the output datasets EXPECTED by the Galaxy job
runner client.
"""
def __init__(self, working_directory, output_files, work_dir_outputs=None, version_file=None):
self.working_directory = working_directory
self.work_dir_outputs = work_dir_outputs
self.output_files = output_files
self.version_file = version_file
def to_dict(self):
return dict(
working_directory=self.working_directory,
work_dir_outputs=self.work_dir_outputs,
output_files=self.output_files,
version_file=self.version_file
)
@staticmethod
def from_dict(config_dict):
return ClientOutputs(
working_directory=config_dict.get('working_directory'),
work_dir_outputs=config_dict.get('work_dir_outputs'),
output_files=config_dict.get('output_files'),
version_file=config_dict.get('version_file'),
)
class LwrOutputs(object):
""" Abstraction describing the output files PRODUCED by the remote LWR
server. """
def __init__(self, working_directory_contents, output_directory_contents, remote_separator=sep):
self.working_directory_contents = working_directory_contents
self.output_directory_contents = output_directory_contents
self.path_helper = PathHelper(remote_separator)
@staticmethod
def from_status_response(complete_response):
# Default to None instead of [] to distinguish between empty contents and it not set
# by the LWR - older LWR instances will not set these in complete response.
working_directory_contents = complete_response.get("working_directory_contents", None)
output_directory_contents = complete_response.get("outputs_directory_contents", None)
# Older (pre-2014) LWR servers will not include separator in response,
# so this should only be used when reasoning about outputs in
# subdirectories (which was not previously supported prior to that).
remote_separator = complete_response.get("system_properties", {}).get("separator", sep)
return LwrOutputs(
working_directory_contents,
output_directory_contents,
remote_separator
)
def has_output_file(self, output_file):
if self.output_directory_contents is None:
# Legacy LWR doesn't report this, return None indicating unsure if
# output was generated.
return None
else:
return basename(output_file) in self.output_directory_contents
def has_output_directory_listing(self):
return self.output_directory_contents is not None
def output_extras(self, output_file):
"""
Returns dict mapping local path to remote name.
"""
if not self.has_output_directory_listing():
# Fetching $output.extra_files_path is not supported with legacy
# LWR (pre-2014) severs.
return {}
output_directory = dirname(output_file)
def local_path(name):
return join(output_directory, self.path_helper.local_name(name))
files_directory = "%s_files%s" % (basename(output_file)[0:-len(".dat")], self.path_helper.separator)
names = filter(lambda o: o.startswith(files_directory), self.output_directory_contents)
return dict(map(lambda name: (local_path(name), name), names))
|
jmchilton/lwr
|
lwr/lwr_client/staging/__init__.py
|
Python
|
apache-2.0
| 6,647
|
[
"Galaxy"
] |
83ecfa38c71a55b7ab27924186e71abf9e3c9f65b51933088c2ee8af3fe1c482
|
# This test calculates spherical harmonic expansion of all-electron kohn-sham potential of helium atom
# using nucleus.calculate_all_electron_potential method
from gpaw import *
from ase import *
from gpaw.atom.all_electron import AllElectron
# Calculate Helium atom using 3D-code
he = Atoms(positions=[(0,0,0)], symbols='He')
he.center(vacuum=3.0)
calc = GPAW(h=0.17)
he.set_calculator(calc)
he.get_potential_energy()
# Get the all-electron potential around the nucleus
vKS_sLg = calc.nuclei[0].calculate_all_electron_potential(calc.hamiltonian.vHt_g)
# Calculate Helium atom using 1D-code
he_atom =AllElectron('He')
he_atom.run()
# Get the KS-potential
vKS_atom = he_atom.vr / he_atom.r
vKS_atom[0] = vKS_atom[1]
# Get the spherical symmetric part and multiply with Y_00
vKS = vKS_sLg[0][0] / sqrt(4*pi)
# Compare
avg_diff = 0.0
for i, v in enumerate(vKS):
avg_diff += abs(vKS_atom[i]-v)
avg_diff /= len(vKS)
print "Potential expansion is correct to", avg_diff * calc.Ha, " eV"
assert abs(avg_diff * calc.Ha < 0.02)
|
qsnake/gpaw
|
oldtest/he_ae.py
|
Python
|
gpl-3.0
| 1,030
|
[
"ASE",
"GPAW"
] |
8cf735432ebfb67b1e53b0aabfd46a27f0f434b76f48598e628f21ef293ff215
|
#!/usr/bin/env python
###########################################################################
## ##
## Language Technologies Institute ##
## Carnegie Mellon University ##
## Copyright (c) 2012 ##
## All Rights Reserved. ##
## ##
## Permission is hereby granted, free of charge, to use and distribute ##
## this software and its documentation without restriction, including ##
## without limitation the rights to use, copy, modify, merge, publish, ##
## distribute, sublicense, and/or sell copies of this work, and to ##
## permit persons to whom this work is furnished to do so, subject to ##
## the following conditions: ##
## 1. The code must retain the above copyright notice, this list of ##
## conditions and the following disclaimer. ##
## 2. Any modifications must be clearly marked as such. ##
## 3. Original authors' names are not deleted. ##
## 4. The authors' names are not used to endorse or promote products ##
## derived from this software without specific prior written ##
## permission. ##
## ##
## CARNEGIE MELLON UNIVERSITY AND THE CONTRIBUTORS TO THIS WORK ##
## DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ##
## ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT ##
## SHALL CARNEGIE MELLON UNIVERSITY NOR THE CONTRIBUTORS BE LIABLE ##
## FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ##
## WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN ##
## AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ##
## ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF ##
## THIS SOFTWARE. ##
## ##
###########################################################################
## Author: Aasish Pappu (aasish@cs.cmu.edu) ##
## Date : November 2012 ##
###########################################################################
## Description: Example python backend module for olympus applications ##
## ##
## ##
###########################################################################
import os, sys, string, math, random
import exceptions
from copy import copy, deepcopy
import re
from time import sleep
from random import randint
from threading import Thread, Timer
import logging
import os.path as path
import Control #@yipeiw
import Loader
import NLG
os.environ['GC_HOME'] = os.path.join(os.environ['OLYMPUS_ROOT'], 'Libraries', 'Galaxy')
sys.path.append(os.path.join(os.environ['GC_HOME'], 'contrib', 'MITRE', 'templates'))
sys.path.append(os.path.join(os.environ['OLYMPUS_ROOT'], 'bin', 'x86-nt'))
import GC_py_init
import Galaxy, GalaxyIO
import time
galaxyServer = None
current_dialog_state = None
home_dialog_state = None
current_dialog_state_counter = 0
current_dialog_state_begin = None
global_dialog_state_counter = 0
from random import randrange
logger = None
def InitLogging():
global logger
logger = logging.getLogger('BE')
hdlr = logging.FileHandler('BE.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.WARNING)
def Log(input):
global logger
print input
logger.error(input)
sys.stdout.flush()
#@yipeiw
database = {}
resource = {}
listfile = 'conversation.list'
rescource_root = 'resource'
template_list=['template/template_new.txt', 'template/template_end.txt', 'template/template_open.txt', 'template/template_expand.txt']
template_list = [path.join(rescource_root, name) for name in template_list]
topicfile = path.join(rescource_root, 'topic.txt')
#currentime = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
#fileout = open(currentime, 'w')
def InitResource():
global database, resource
datalist=[line.strip() for line in open(listfile)]
database = Loader.LoadDataPair(datalist)
resource = Loader.LoadLanguageResource()
global TemplateLib, TopicLib, TreeState, Template
TemplateLib = Loader.LoadTemplate(template_list)
TopicLib = Loader.LoadTopic(topicfile)
TreeState, Template = Control.Init()
def Welcome(env, dict):
Log(dict)
user_id = dict[":user_id"]
# Log(user_id)
# if env and user_id not in provider_env:
# provider_env[user_id] = env
# Log('Stored the env for user_id %s' %(user_id))
Log("Welcome to the new Backend Server")
prog_name = "reinitialize"
#print Galaxy.Frame(prog_name, Galaxy.GAL_CLAUSE,dict)
print dict
return Galaxy.Frame(prog_name, Galaxy.GAL_CLAUSE,dict)
def SetDialogState(env, dict):
global current_dialog_state
global home_dialog_state
global current_dialog_state_counter
global current_dialog_state_begin
global global_dialog_state_counter
inframe = dict[":dialog_state"]
# extracting the dialog state and turn number
# main logic of updating the dialog state, such as sleeping, awake, etc
lines = inframe.split('\n')
new_dialog_state = None
turn_counter = 0
for l in lines:
components = l.split(' = ')
if (len(components)!=2):
continue
prefix = components[0]
suffix = components[1]
if (prefix == "dialog_state"):
new_dialog_state = suffix
if (global_dialog_state_counter == 0):
home_dialog_state = new_dialog_state
print "current_dialog_state", current_dialog_state
print "new_dialog_state", new_dialog_state
if (current_dialog_state == new_dialog_state):
current_dialog_state_counter = turn_counter - current_dialog_state_begin
current_dialog_state = new_dialog_state
print "cur == new, cur_counter =", current_dialog_state_counter
else:
current_dialog_state = new_dialog_state
current_dialog_state_counter = 0
current_dialog_state_begin = turn_counter
print "cur != new, cur_begin =", current_dialog_state_begin
print "cur_counter =", current_dialog_state_counter
elif (prefix == "turn_number"):
turn_counter = int(suffix)
print "get turn counter", turn_counter
if (global_dialog_state_counter == -1 or turn_counter == 0):
global_dialog_state_counter = 0
#print "set g_d_s_c to 0"
else:
global_dialog_state_counter = turn_counter
#print "g_d_s_c =", turn_counter
#print "end of turn counter"
print "==============================="
print "DIALOG STATE is", current_dialog_state
print "CURRENT TURN NUMBER is", current_dialog_state_counter
state_out = -1
if (current_dialog_state.endswith(aware_state)):
print "system is aware of the person but can't see"
state_out = 4
elif (current_dialog_state == home_dialog_state):
print "system is sleeping now ... zzz"
state_out = 1
elif (current_dialog_state_counter >= 1):
print "system is puzzled ... "
state_out = 2
else:
print "system can understand you."
state_out = 3
count = 1
onDialogState(state_out)
print "==============================="
# end of the main logic
prog_name = "main"
outframe = "got dialog state"
f = Galaxy.Frame(prog_name, Galaxy.GAL_CLAUSE, {":outframe": outframe})
return f
def ReadRawInFrame(inframe_str):
Log("In Read Raw InFrame")
inframe_str = inframe_str.strip('\n').strip('}').strip('{')
inframe_dict = {}
inframe_lines = inframe_str.split('\n')
list_holder = None
current_list_key = None
in_array = False
Log(inframe_lines)
Log("######")
for line in inframe_lines:
line = line.strip('\n').strip(' ').lower()
if in_array is False:
# very likely key value pairs
if ' ' in line:
if ':' in line:
#beginning of array?
Log(line)
key, value = line.split(' ')
if re.match('^:\d+$', value) is not None:
in_array = True
list_holder = []
current_list_key = key
else:
Log(line)
key, value = line.split(' ')
inframe_dict[key] = value
else:
if line != '{' and line !='}':
if ' ' in line:
line = line.replace(' ', '_')
list_holder.append(line)
elif line == '}':
new_list = list_holder
inframe_dict[current_list_key] = new_list
list_holder = None
in_array = False
return inframe_dict
def SayThanks():
msg = {'[schedule_final]':'Your activity has been scheduled'}
SendMessageToDM('A', '[schedule]', msg)
SendMessageToDM('B', '[schedule]', msg)
#@yipeiw
def get_response(user_input):
global database, resource
global TemplateLib, TopicLib, TreeState, Template
relavance, answer = Control.FindCandidate(database, resource, user_input)
state = Control.SelectState(relavance, TreeState)
Log('DM STATE is [ %s ]' %(state))
print 'state:', state['name']
print "candidate answer ", relavance, answer
output = NLG.FillTemplate(TemplateLib, TopicLib, Template[state['name']], answer)
Log('OUTPUT is [ %s ]' %(output))
fileout = open('input_response_history.txt', 'a')
fileout.write(str(user_input) + '\n')
fileout.write(str(output) + '\n')
fileout.close()
return output
def LaunchQuery(env, dict):
global requestCounter
Log("Launching a query")
Log(dict.keys())
propertiesframe = env.GetSessionProperties(dict.keys())
hub_opaque_data = propertiesframe[':hub_opaque_data']
provider_id = hub_opaque_data[':provider_id'].strip('[').strip(']')
try: prog_name = dict[":program"]
except: prog_name = "main"
inframe = dict[":inframe"]
inframe = inframe.replace("\n{c inframe \n}", "")
Log("Converting inframe to galaxy frame")
#Log(inframe)
raw_inframe_str = dict[":inframe"]
inframe_raw_dict = ReadRawInFrame(raw_inframe_str)
Log('RAW INFRAME is \n%s' %(str(inframe_raw_dict)))
user_input = ''
system_response = 'pardon me'
try:
user_input = inframe_raw_dict['user_input'].strip('"')
user_input = user_input.replace('_', ' ')
except KeyError:
system_response = 'I am Tick Tock, how are you doing'
pass
if user_input:
#system_response = user_input
#system_response = get_response(user_input)
filehistory = open('input_response_history.txt', 'r')
system_tail = tail(filehistory, 4)
filehistory.close()
Log('USER INPUT is [ %s ]' %(user_input))
if user_input == '':
system_response = 'pardon me'
elif (user_input == 'repeat') or (user_input == 'say that again') or (user_input == 'what did you say'):
filein = open('history.txt','r')
system_response = filein.readline()
filein.close()
elif (system_tail[0] == system_tail[2]) and (system_tail[0] == user_input):
system_response = 'I am having a good time talking to you.{ {BREAK TIME="2s"/}} Do you want to keep going,' \
' if not, you can say goodbye'
else:
system_response = get_response(user_input)
fileout = open('history.txt', 'w')
fileout.write(str(system_response) + '\n')
fileout.close()
prefix = ['', 'well ... ', 'uh ... ', '', 'let me see ... ', 'oh ... ']
cur_index = -1
while True:
random_index = randrange(0, len(prefix))
if random_index != cur_index:
break
cur_index = random_index
system_response = prefix[cur_index] + system_response
resultsFrame = '{\n res %s \n}\n}' %(system_response)
#Log("outframe")
f = Galaxy.Frame(prog_name, Galaxy.GAL_CLAUSE, {":outframe": resultsFrame})
#Log(f)
return f
def tail(f, n, offset=0):
"""Reads a n lines from f with an offset of offset lines."""
avg_line_length = 74
to_read = n + offset
while 1:
try:
f.seek(-(avg_line_length * to_read), 2)
except IOError:
# woops. apparently file is smaller than what we want
# to step back, go to the beginning instead
f.seek(0)
pos = f.tell()
lines = f.read().splitlines()
if len(lines) >= to_read or pos == 0:
return lines[-to_read:offset and -offset or None]
avg_line_length *= 1.3
# oas in C is -increment i.
OAS = [("-increment i", "initial increment")]
# Write a wrapper for the usage check.
class BackEnd(GalaxyIO.Server):
def CheckUsage(self, oas_list, args):
global InitialIncrement
data, out_args = GalaxyIO.Server.CheckUsage(self, OAS + oas_list, args)
if data.has_key("-increment"):
InitialIncrement = data["-increment"][0]
del data["-increment"]
return data, out_args
def SendToHub(provider, frame):
prog_name = "main"
global provider_env
env = provider_env[provider]
if env:
f = Galaxy.Frame(prog_name, Galaxy.GAL_CLAUSE, frame)
try:
env.WriteFrame(f)
except GalaxyIO.DispatchError:
Log('ERROR: cannot send frame')
def SendMessageToDM(provider, msgtype, msg):
prog_name = "main"
print 'lets say hello to DM async way'
nets = []
parse_str = []
hyp_str = []
for k, v in msg.iteritems():
net = Galaxy.Frame("slot", Galaxy.GAL_CLAUSE, {':name':k, ':contents':v})
nets.append(net)
parse_str.append('( %s ( %s ) )' %(k, v))
hyp_str.append(v)
#Log('Test Printing the nets\n %s' %(Galaxy.OPr(nets)))
#Log('----------THEEND OF NETS -------')
gfSlot = {}
gfParse = {}
gfSlot[":nets"] = nets
gfSlot[":numnets"] = len(nets)
gfSlot[":name"] = msgtype
gfSlot[":contents"] = ' '.join(hyp_str)
gfSlot[":frame"] = "Fake Frame"
gfSlotFrame = Galaxy.Frame("slot", Galaxy.GAL_CLAUSE, gfSlot)
slots = [gfSlotFrame]
#Log('Test Printing the slots\n %s' %(Galaxy.OPr(slots)))
#Log('----------THEEND OF SLOTS-------')
gfParse[":gal_slotsstring"] = Galaxy.OPr(slots)
gfParse[":slots"] = slots
gfParse[":numslots"] = 1
gfParse[":uttid"] = "-1"
gfParse[":hyp"] = ' '.join(hyp_str)
gfParse[":hyp_index"] = 0
gfParse[":hyp_num_parses"] = 1
gfParse[":decoder_score"] = 0.0
gfParse[":am_score"] = 0.0
gfParse[":lm_score"] = 0.0
gfParse[":frame_num"] = 0
gfParse[":acoustic_gap_norm"] = 0.0
gfParse[":avg_wordconf"] = 0.0
gfParse[":min_wordconf"] = 0.0
gfParse[":max_wordconf"] = 0.0
gfParse[":avg_validwordconf"] = 0.0
gfParse[":min_validwordconf"] = 0.0
gfParse[":max_validwordconf"] = 0.0
gfParse[":parsestring"] = ' '.join(parse_str)
Log('Test printing the parse frame')
gfParseFrame = Galaxy.Frame("utterance", Galaxy.GAL_CLAUSE, gfParse)
#gfParseFrame.Print()
parses = [gfParseFrame]
confhyps = [gfParseFrame]
f = Galaxy.Frame(prog_name, Galaxy.GAL_CLAUSE, {":confhyps": confhyps,
":parses": parses,
':total_numparses': 1,
':input_source': 'gal_be',
':gated_input': 'gated_input'})
Log("Sending the message to DM")
#Log(f)
SendToHub(provider, f)
Log("Sent to DM")
def GalInterface():
InitLogging()
Log("Starting Galaxy Server")
global galaxyServer
#load database and other resources @yipeiw
InitResource()
galaxyServer = BackEnd(sys.argv, "gal_be",
default_port = 2900)
galaxyServer.AddDispatchFunction("set_dialog_state", SetDialogState,
[[], Galaxy.GAL_OTHER_KEYS_NEVER,
Galaxy.GAL_REPLY_NONE, [],
Galaxy.GAL_OTHER_KEYS_NEVER])
galaxyServer.AddDispatchFunction("launch_query", LaunchQuery,
[[], Galaxy.GAL_OTHER_KEYS_NEVER,
Galaxy.GAL_REPLY_NONE, [],
Galaxy.GAL_OTHER_KEYS_NEVER])
galaxyServer.AddDispatchFunction("reinitialize", Welcome,
[[], Galaxy.GAL_OTHER_KEYS_NEVER,
Galaxy.GAL_REPLY_NONE, [],
Galaxy.GAL_OTHER_KEYS_NEVER])
galaxyServer.AddDispatchFunction("welcome", Welcome,
[[], Galaxy.GAL_OTHER_KEYS_NEVER,
Galaxy.GAL_REPLY_NONE, [],
Galaxy.GAL_OTHER_KEYS_NEVER])
galaxyServer.RunServer()
def MonitorThread():
current_focus = {}
def LaunchQueryDebug(user_input):
# this guy is only used in debugging
#system_response = user_input
#system_response = get_response(user_input)
filehistory = open('input_response_history.txt', 'r')
system_tail = tail(filehistory, 4)
filehistory.close()
Log('USER INPUT is [ %s ]' %(user_input))
if user_input == '':
system_response = 'pardon me?'
elif (user_input == 'repeat') or (user_input == 'say that again') or (user_input == 'what did you say'):
filein = open('history.txt','r')
system_response = filein.readline()
filein.close()
elif (system_tail[0] == system_tail[2]) and (system_tail[0] == user_input):
system_response = 'I am having a good time, do you want to keep going,...' \
' if not, you can say goodbye'
else:
system_response = get_response(user_input)
fileout = open('history.txt', 'w')
fileout.write(str(system_response) + '\n')
fileout.close()
prefix = ['', 'well ... ', 'uh ... ', '', 'let me see ... ', 'oh ... ']
cur_index = -1
while True:
random_index = randrange(0, len(prefix))
if random_index != cur_index:
break
cur_index = random_index
system_response = prefix[cur_index] + system_response
print(system_response)
if __name__ == "__main__":
gt = Thread(target=GalInterface)
gt.start()
gt.join()
|
leahrnh/ticktock_text_api
|
galbackend_conversation.py
|
Python
|
gpl-2.0
| 20,084
|
[
"Galaxy"
] |
01c599d66ee9343ae03423c3223e58413294a39abb45f72c6b3dac429835dad4
|
from functools import partial
import numpy as np
from numpy.testing import assert_allclose
import scipy.sparse as sps
from scipy.optimize import approx_fprime
from sklearn.datasets import make_regression
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import KFold
from sklearn.model_selection import GridSearchCV, cross_val_score
from nose.tools import assert_true, assert_equal, assert_raises
from pyglmnet import (GLM, GLMCV, _grad_L2loss, _L2loss, simulate_glm,
_gradhess_logloss_1d)
def test_gradients():
"""Test gradient accuracy."""
# data
scaler = StandardScaler()
n_samples, n_features = 1000, 100
X = np.random.normal(0.0, 1.0, [n_samples, n_features])
X = scaler.fit_transform(X)
density = 0.1
beta_ = np.zeros(n_features + 1)
beta_[0] = np.random.rand()
beta_[1:] = sps.rand(n_features, 1, density=density).toarray()[:, 0]
reg_lambda = 0.1
distrs = ['gaussian', 'binomial', 'softplus', 'poisson', 'probit', 'gamma']
for distr in distrs:
glm = GLM(distr=distr, reg_lambda=reg_lambda)
y = simulate_glm(glm.distr, beta_[0], beta_[1:], X)
func = partial(_L2loss, distr, glm.alpha,
glm.Tau, reg_lambda, X, y, glm.eta, glm.group)
grad = partial(_grad_L2loss, distr, glm.alpha, glm.Tau,
reg_lambda, X, y,
glm.eta)
approx_grad = approx_fprime(beta_, func, 1.5e-8)
analytical_grad = grad(beta_)
assert_allclose(approx_grad, analytical_grad, rtol=1e-5, atol=1e-3)
def test_tikhonov():
"""Tikhonov regularization test."""
n_samples, n_features = 100, 10
# design covariance matrix of parameters
Gam = 15.
PriorCov = np.zeros([n_features, n_features])
for i in np.arange(0, n_features):
for j in np.arange(i, n_features):
PriorCov[i, j] = np.exp(-Gam * 1. / (np.float(n_features) ** 2) *
(np.float(i) - np.float(j)) ** 2)
PriorCov[j, i] = PriorCov[i, j]
if i == j:
PriorCov[i, j] += 0.01
PriorCov = 1. / np.max(PriorCov) * PriorCov
# sample parameters as multivariate normal
beta0 = np.random.randn()
beta = np.random.multivariate_normal(np.zeros(n_features), PriorCov)
# sample train and test data
glm_sim = GLM(distr='softplus', score_metric='pseudo_R2')
X = np.random.randn(n_samples, n_features)
y = simulate_glm(glm_sim.distr, beta0, beta, X)
from sklearn.cross_validation import train_test_split
Xtrain, Xtest, ytrain, ytest = \
train_test_split(X, y, test_size=0.5, random_state=42)
# design tikhonov matrix
[U, S, V] = np.linalg.svd(PriorCov, full_matrices=False)
Tau = np.dot(np.diag(1. / np.sqrt(S)), U)
Tau = 1. / np.sqrt(np.float(n_samples)) * Tau / Tau.max()
# fit model with batch gradient
glm_tikhonov = GLM(distr='softplus',
alpha=0.0,
Tau=Tau,
solver='batch-gradient',
tol=1e-5,
score_metric='pseudo_R2')
glm_tikhonov.fit(Xtrain, ytrain)
R2_train, R2_test = dict(), dict()
R2_train['tikhonov'] = glm_tikhonov.score(Xtrain, ytrain)
R2_test['tikhonov'] = glm_tikhonov.score(Xtest, ytest)
# fit model with cdfast
glm_tikhonov = GLM(distr='softplus',
alpha=0.0,
Tau=Tau,
solver='cdfast',
tol=1e-5,
score_metric='pseudo_R2')
glm_tikhonov.fit(Xtrain, ytrain)
R2_train, R2_test = dict(), dict()
R2_train['tikhonov'] = glm_tikhonov.score(Xtrain, ytrain)
R2_test['tikhonov'] = glm_tikhonov.score(Xtest, ytest)
def test_group_lasso():
"""Group Lasso test."""
n_samples, n_features = 100, 90
# assign group ids
groups = np.zeros(90)
groups[0:29] = 1
groups[30:59] = 2
groups[60:] = 3
# sample random coefficients
beta0 = np.random.normal(0.0, 1.0, 1)
beta = np.random.normal(0.0, 1.0, n_features)
beta[groups == 2] = 0.
# create an instance of the GLM class
glm_group = GLM(distr='softplus', alpha=1.)
# simulate training data
Xr = np.random.normal(0.0, 1.0, [n_samples, n_features])
yr = simulate_glm(glm_group.distr, beta0, beta, Xr)
# scale and fit
scaler = StandardScaler().fit(Xr)
glm_group.fit(scaler.transform(Xr), yr)
def test_glmnet():
"""Test glmnet."""
scaler = StandardScaler()
n_samples, n_features = 100, 10
# coefficients
beta0 = 1. / (np.float(n_features) + 1.) * \
np.random.normal(0.0, 1.0)
beta = 1. / (np.float(n_features) + 1.) * \
np.random.normal(0.0, 1.0, (n_features,))
distrs = ['softplus', 'gaussian', 'poisson', 'binomial', 'probit']
solvers = ['batch-gradient', 'cdfast']
score_metric = 'pseudo_R2'
learning_rate = 2e-1
for solver in solvers:
for distr in distrs:
glm = GLM(distr, learning_rate=learning_rate,
solver=solver, score_metric=score_metric)
assert_true(repr(glm))
np.random.seed(glm.random_state)
X_train = np.random.normal(0.0, 1.0, [n_samples, n_features])
y_train = simulate_glm(glm.distr, beta0, beta, X_train)
X_train = scaler.fit_transform(X_train)
glm.fit(X_train, y_train)
beta_ = glm.beta_
assert_allclose(beta, beta_, atol=0.5) # check fit
y_pred = glm.predict(scaler.transform(X_train))
assert_equal(y_pred.shape[0], X_train.shape[0])
# test fit_predict
glm_poisson = GLM(distr='softplus')
glm_poisson.fit_predict(X_train, y_train)
assert_raises(ValueError, glm_poisson.fit_predict,
X_train[None, ...], y_train)
def test_glmcv():
"""Test GLMCV class."""
scaler = StandardScaler()
n_samples, n_features = 100, 10
# coefficients
beta0 = 1. / (np.float(n_features) + 1.) * \
np.random.normal(0.0, 1.0)
beta = 1. / (np.float(n_features) + 1.) * \
np.random.normal(0.0, 1.0, (n_features,))
distrs = ['softplus', 'gaussian', 'poisson', 'binomial', 'probit', 'gamma']
solvers = ['batch-gradient', 'cdfast']
score_metric = 'pseudo_R2'
learning_rate = 2e-1
for solver in solvers:
for distr in distrs:
if distr == 'gamma' and solver == 'cdfast':
continue
glm = GLMCV(distr, learning_rate=learning_rate,
solver=solver, score_metric=score_metric)
assert_true(repr(glm))
np.random.seed(glm.random_state)
X_train = np.random.normal(0.0, 1.0, [n_samples, n_features])
y_train = simulate_glm(glm.distr, beta0, beta, X_train)
X_train = scaler.fit_transform(X_train)
glm.fit(X_train, y_train)
beta_ = glm.beta_
assert_allclose(beta, beta_, atol=0.5) # check fit
y_pred = glm.predict(scaler.transform(X_train))
assert_equal(y_pred.shape[0], X_train.shape[0])
def test_cv():
"""Simple CV check."""
# XXX: don't use scikit-learn for tests.
X, y = make_regression()
cv = KFold(X.shape[0], 5)
glm_normal = GLM(distr='gaussian', alpha=0.01, reg_lambda=0.1)
# check that it returns 5 scores
scores = cross_val_score(glm_normal, X, y, cv=cv)
assert_equal(len(scores), 5)
param_grid = [{'alpha': np.linspace(0.01, 0.99, 2)},
{'reg_lambda': np.logspace(np.log(0.5), np.log(0.01),
10, base=np.exp(1))}]
glmcv = GridSearchCV(glm_normal, param_grid, cv=cv)
glmcv.fit(X, y)
def test_cdfast():
"""Test all functionality related to fast coordinate descent"""
scaler = StandardScaler()
n_samples = 1000
n_features = 100
n_classes = 5
density = 0.1
distrs = ['softplus', 'gaussian', 'binomial', 'poisson', 'probit']
for distr in distrs:
glm = GLM(distr, solver='cdfast')
np.random.seed(glm.random_state)
# coefficients
beta0 = np.random.rand()
beta = sps.rand(n_features, 1, density=density).toarray()[:, 0]
# data
X = np.random.normal(0.0, 1.0, [n_samples, n_features])
X = scaler.fit_transform(X)
y = simulate_glm(glm.distr, beta0, beta, X)
# compute grad and hess
beta_ = np.zeros((n_features + 1,))
beta_[0] = beta0
beta_[1:] = beta
z = beta_[0] + np.dot(X, beta_[1:])
k = 1
xk = X[:, k - 1]
gk, hk = _gradhess_logloss_1d(glm.distr, xk, y, z, glm.eta)
# test grad and hess
if distr != 'multinomial':
assert_equal(np.size(gk), 1)
assert_equal(np.size(hk), 1)
assert_true(isinstance(gk, float))
assert_true(isinstance(hk, float))
else:
assert_equal(gk.shape[0], n_classes)
assert_equal(hk.shape[0], n_classes)
assert_true(isinstance(gk, np.ndarray))
assert_true(isinstance(hk, np.ndarray))
assert_equal(gk.ndim, 1)
assert_equal(hk.ndim, 1)
# test cdfast
ActiveSet = np.ones(n_features + 1)
beta_ret, z_ret = glm._cdfast(X, y, z,
ActiveSet, beta_, glm.reg_lambda)
assert_equal(beta_ret.shape, beta_.shape)
assert_equal(z_ret.shape, z.shape)
|
the872/pyglmnet
|
tests/test_pyglmnet.py
|
Python
|
mit
| 9,602
|
[
"Gaussian"
] |
d2f934f7cf554f4f5e48cb7bef1a9b7633e2c53c6d24258a456fda289b97d0e8
|
import numpy as np
import os
import cv2
import sys
#sys.setrecursionlimit(1000000)
import Config
def init_parts(bkg_path,em_it = 1):
emit_parts_root = Config.parts_root_folder%em_it
if not os.path.exists(emit_parts_root):
os.mkdir(emit_parts_root)
for cam in Config.cameras_list:
if not os.path.exists(emit_parts_root + 'c%d/'%cam):
os.mkdir(emit_parts_root + 'c%d/'%cam)
for fid in Config.img_index_list:
print cam,fid
im = cv2.imread(bkg_path%(cam,fid))[:,:,0]>0
H,W = im.shape[0:2]
parts_out = np.zeros((H,W,Config.n_parts))
parts_out[:,:,-1] = im
np.save(emit_parts_root+ 'c%d/%d.npy'%(cam,fid),parts_out)
#Initialize gaussian parts
gaussian_parts = np.zeros(8*(Config.n_parts-1))+5
np.savetxt(emit_parts_root + 'gaussian_params.txt', gaussian_parts)
|
pierrebaque/EM
|
EM_funcs.py
|
Python
|
gpl-3.0
| 910
|
[
"Gaussian"
] |
31552f2a3b3668950b3962179da346f0926cbb9e32e85ae10625a9ff40ee6992
|
# pylint: disable=bad-continuation
"""
Certificate HTML webview.
"""
import logging
import urllib
from datetime import datetime
from uuid import uuid4
import pytz
from django.conf import settings
from django.contrib.auth.models import User
from django.http import Http404, HttpResponse
from django.template import RequestContext
from django.utils.encoding import smart_str
from django.utils import translation
from eventtracking import tracker
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from badges.events.course_complete import get_completion_badge
from badges.utils import badges_enabled
from lms.djangoapps.certificates.api import (
emit_certificate_event,
get_active_web_certificate,
get_certificate_footer_context,
get_certificate_header_context,
get_certificate_template,
get_certificate_url
)
from lms.djangoapps.certificates.models import (
CertificateGenerationCourseSetting,
CertificateHtmlViewConfiguration,
CertificateSocialNetworks,
CertificateStatuses,
GeneratedCertificate
)
from courseware.access import has_access
from courseware.courses import get_course_by_id
from edxmako.shortcuts import render_to_response
from edxmako.template import Template
from openedx.core.djangoapps.catalog.utils import get_course_run_details
from openedx.core.djangoapps.lang_pref.api import get_closest_released_language
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.lib.courses import course_image_url
from openedx.core.djangoapps.certificates.api import display_date_for_certificate, certificates_viewable_for_course
from student.models import LinkedInAddToProfileConfiguration
from util import organizations_helpers as organization_api
from util.date_utils import strftime_localized
from util.views import handle_500
from openedx.features.student_certificates.helpers import override_update_certificate_context
log = logging.getLogger(__name__)
_ = translation.ugettext
INVALID_CERTIFICATE_TEMPLATE_PATH = 'certificates/invalid.html'
def get_certificate_description(mode, certificate_type, platform_name):
"""
:return certificate_type_description on the basis of current mode
"""
certificate_type_description = None
if mode == 'honor':
# Translators: This text describes the 'Honor' course certificate type.
certificate_type_description = _("An {cert_type} certificate signifies that a "
"learner has agreed to abide by the honor code established by {platform_name} "
"and has completed all of the required tasks for this course under its "
"guidelines.").format(cert_type=certificate_type,
platform_name=platform_name)
elif mode == 'verified':
# Translators: This text describes the 'ID Verified' course certificate type, which is a higher level of
# verification offered by edX. This type of verification is useful for professional education/certifications
certificate_type_description = _("A {cert_type} certificate signifies that a "
"learner has agreed to abide by the honor code established by {platform_name} "
"and has completed all of the required tasks for this course under its "
"guidelines. A {cert_type} certificate also indicates that the "
"identity of the learner has been checked and "
"is valid.").format(cert_type=certificate_type,
platform_name=platform_name)
elif mode == 'xseries':
# Translators: This text describes the 'XSeries' course certificate type. An XSeries is a collection of
# courses related to each other in a meaningful way, such as a specific topic or theme, or even an organization
certificate_type_description = _("An {cert_type} certificate demonstrates a high level of "
"achievement in a program of study, and includes verification of "
"the student's identity.").format(cert_type=certificate_type)
return certificate_type_description
def _update_certificate_context(context, course, user_certificate, platform_name):
"""
Build up the certificate web view context using the provided values
(Helper method to keep the view clean)
"""
# Populate dynamic output values using the course/certificate data loaded above
certificate_type = context.get('certificate_type')
# Override the defaults with any mode-specific static values
context['certificate_id_number'] = user_certificate.verify_uuid
context['certificate_verify_url'] = "{prefix}{uuid}{suffix}".format(
prefix=context.get('certificate_verify_url_prefix'),
uuid=user_certificate.verify_uuid,
suffix=context.get('certificate_verify_url_suffix')
)
# Translators: The format of the date includes the full name of the month
date = display_date_for_certificate(course, user_certificate)
context['certificate_date_issued'] = _('{month} {day}, {year}').format(
month=strftime_localized(date, "%B"),
day=date.day,
year=date.year
)
# Translators: This text represents the verification of the certificate
context['document_meta_description'] = _('This is a valid {platform_name} certificate for {user_name}, '
'who participated in {partner_short_name} {course_number}').format(
platform_name=platform_name,
user_name=context['accomplishment_copy_name'],
partner_short_name=context['organization_short_name'],
course_number=context['course_number']
)
# Translators: This text is bound to the HTML 'title' element of the page and appears in the browser title bar
context['document_title'] = _("{partner_short_name} {course_number} Certificate | {platform_name}").format(
partner_short_name=context['organization_short_name'],
course_number=context['course_number'],
platform_name=platform_name
)
# Translators: This text fragment appears after the student's name (displayed in a large font) on the certificate
# screen. The text describes the accomplishment represented by the certificate information displayed to the user
context['accomplishment_copy_description_full'] = _("successfully completed, received a passing grade, and was "
"awarded this {platform_name} {certificate_type} "
"Certificate of Completion in ").format(
platform_name=platform_name,
certificate_type=context.get("certificate_type"))
certificate_type_description = get_certificate_description(user_certificate.mode, certificate_type, platform_name)
if certificate_type_description:
context['certificate_type_description'] = certificate_type_description
# Translators: This text describes the purpose (and therefore, value) of a course certificate
context['certificate_info_description'] = _("{platform_name} acknowledges achievements through "
"certificates, which are awarded for course activities "
"that {platform_name} students complete.").format(
platform_name=platform_name,
tos_url=context.get('company_tos_url'),
verified_cert_url=context.get('company_verified_certificate_url'))
def _update_context_with_basic_info(context, course_id, platform_name, configuration):
"""
Updates context dictionary with basic info required before rendering simplest
certificate templates.
"""
context['platform_name'] = platform_name
context['course_id'] = course_id
# Update the view context with the default ConfigurationModel settings
context.update(configuration.get('default', {}))
# Translators: 'All rights reserved' is a legal term used in copyrighting to protect published content
reserved = _("All rights reserved")
context['copyright_text'] = u'© {year} {platform_name}. {reserved}.'.format(
year=datetime.now(pytz.timezone(settings.TIME_ZONE)).year,
platform_name=platform_name,
reserved=reserved
)
# Translators: This text is bound to the HTML 'title' element of the page and appears
# in the browser title bar when a requested certificate is not found or recognized
context['document_title'] = _("Invalid Certificate")
context['company_tos_urltext'] = _("Terms of Service & Honor Code")
# Translators: A 'Privacy Policy' is a legal document/statement describing a website's use of personal information
context['company_privacy_urltext'] = _("Privacy Policy")
# Translators: This line appears as a byline to a header image and describes the purpose of the page
context['logo_subtitle'] = _("Certificate Validation")
# Translators: Accomplishments describe the awards/certifications obtained by students on this platform
context['accomplishment_copy_about'] = _('About {platform_name} Accomplishments').format(
platform_name=platform_name
)
# Translators: This line appears on the page just before the generation date for the certificate
context['certificate_date_issued_title'] = _("Issued On:")
# Translators: The Certificate ID Number is an alphanumeric value unique to each individual certificate
context['certificate_id_number_title'] = _('Certificate ID Number')
context['certificate_info_title'] = _('About {platform_name} Certificates').format(
platform_name=platform_name
)
context['certificate_verify_title'] = _("How {platform_name} Validates Student Certificates").format(
platform_name=platform_name
)
# Translators: This text describes the validation mechanism for a certificate file (known as GPG security)
context['certificate_verify_description'] = _('Certificates issued by {platform_name} are signed by a gpg key so '
'that they can be validated independently by anyone with the '
'{platform_name} public key. For independent verification, '
'{platform_name} uses what is called a '
'"detached signature""".').format(platform_name=platform_name)
context['certificate_verify_urltext'] = _("Validate this certificate for yourself")
# Translators: This text describes (at a high level) the mission and charter the edX platform and organization
context['company_about_description'] = _("{platform_name} offers interactive online classes and MOOCs.").format(
platform_name=platform_name)
context['company_about_title'] = _("About {platform_name}").format(platform_name=platform_name)
context['company_about_urltext'] = _("Learn more about {platform_name}").format(platform_name=platform_name)
context['company_courselist_urltext'] = _("Learn with {platform_name}").format(platform_name=platform_name)
context['company_careers_urltext'] = _("Work at {platform_name}").format(platform_name=platform_name)
context['company_contact_urltext'] = _("Contact {platform_name}").format(platform_name=platform_name)
# Translators: This text appears near the top of the certficate and describes the guarantee provided by edX
context['document_banner'] = _("{platform_name} acknowledges the following student accomplishment").format(
platform_name=platform_name
)
def _update_course_context(request, context, course, course_key, platform_name):
"""
Updates context dictionary with course info.
"""
context['full_course_image_url'] = request.build_absolute_uri(course_image_url(course))
course_title_from_cert = context['certificate_data'].get('course_title', '')
accomplishment_copy_course_name = course_title_from_cert if course_title_from_cert else course.display_name
context['accomplishment_copy_course_name'] = accomplishment_copy_course_name
course_number = course.display_coursenumber if course.display_coursenumber else course.number
context['course_number'] = course_number
if context['organization_long_name']:
# Translators: This text represents the description of course
context['accomplishment_copy_course_description'] = _('a course of study offered by {partner_short_name}, '
'an online learning initiative of '
'{partner_long_name}.').format(
partner_short_name=context['organization_short_name'],
partner_long_name=context['organization_long_name'],
platform_name=platform_name)
else:
# Translators: This text represents the description of course
context['accomplishment_copy_course_description'] = _('a course of study offered by '
'{partner_short_name}.').format(
partner_short_name=context['organization_short_name'],
platform_name=platform_name)
def _update_social_context(request, context, course, user, user_certificate, platform_name):
"""
Updates context dictionary with info required for social sharing.
"""
share_settings = configuration_helpers.get_value("SOCIAL_SHARING_SETTINGS", settings.SOCIAL_SHARING_SETTINGS)
context['facebook_share_enabled'] = share_settings.get('CERTIFICATE_FACEBOOK', False)
context['facebook_app_id'] = configuration_helpers.get_value("FACEBOOK_APP_ID", settings.FACEBOOK_APP_ID)
context['facebook_share_text'] = share_settings.get(
'CERTIFICATE_FACEBOOK_TEXT',
_("I completed the {course_title} course on {platform_name}.").format(
course_title=context['accomplishment_copy_course_name'],
platform_name=platform_name
)
)
context['twitter_share_enabled'] = share_settings.get('CERTIFICATE_TWITTER', False)
context['twitter_share_text'] = share_settings.get(
'CERTIFICATE_TWITTER_TEXT',
_("I completed a course at {platform_name}. Take a look at my certificate.").format(
platform_name=platform_name
)
)
share_url = request.build_absolute_uri(get_certificate_url(course_id=course.id, uuid=user_certificate.verify_uuid))
context['share_url'] = share_url
twitter_url = ''
if context.get('twitter_share_enabled', False):
twitter_url = 'https://twitter.com/intent/tweet?text={twitter_share_text}&url={share_url}'.format(
twitter_share_text=smart_str(context['twitter_share_text']),
share_url=urllib.quote_plus(smart_str(share_url))
)
context['twitter_url'] = twitter_url
context['linked_in_url'] = None
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
linkedin_share_enabled = share_settings.get('CERTIFICATE_LINKEDIN', linkedin_config.enabled)
if linkedin_share_enabled:
context['linked_in_url'] = linkedin_config.add_to_profile_url(
course.id,
course.display_name,
user_certificate.mode,
smart_str(share_url)
)
def _update_context_with_user_info(context, user, user_certificate):
"""
Updates context dictionary with user related info.
"""
user_fullname = user.profile.name
context['username'] = user.username
context['course_mode'] = user_certificate.mode
context['accomplishment_user_id'] = user.id
context['accomplishment_copy_name'] = user_fullname
context['accomplishment_copy_username'] = user.username
context['accomplishment_more_title'] = _("More Information About {user_name}'s Certificate:").format(
user_name=user_fullname
)
# Translators: This line is displayed to a user who has completed a course and achieved a certification
context['accomplishment_banner_opening'] = _("{fullname}, you earned a certificate!").format(
fullname=user_fullname
)
# Translators: This line congratulates the user and instructs them to share their accomplishment on social networks
context['accomplishment_banner_congrats'] = _("Congratulations! This page summarizes what "
"you accomplished. Show it off to family, friends, and colleagues "
"in your social and professional networks.")
# Translators: This line leads the reader to understand more about the certificate that a student has been awarded
context['accomplishment_copy_more_about'] = _("More about {fullname}'s accomplishment").format(
fullname=user_fullname
)
def _get_user_certificate(request, user, course_key, course, preview_mode=None):
"""
Retrieves user's certificate from db. Creates one in case of preview mode.
Returns None if there is no certificate generated for given user
otherwise returns `GeneratedCertificate` instance.
"""
user_certificate = None
if preview_mode:
# certificate is being previewed from studio
if has_access(request.user, 'instructor', course) or has_access(request.user, 'staff', course):
if course.certificate_available_date and not course.self_paced:
modified_date = course.certificate_available_date
else:
modified_date = datetime.now().date()
user_certificate = GeneratedCertificate(
mode=preview_mode,
verify_uuid=unicode(uuid4().hex),
modified_date=modified_date
)
elif certificates_viewable_for_course(course):
# certificate is being viewed by learner or public
try:
user_certificate = GeneratedCertificate.eligible_certificates.get(
user=user,
course_id=course_key,
status=CertificateStatuses.downloadable
)
except GeneratedCertificate.DoesNotExist:
pass
return user_certificate
def _track_certificate_events(request, context, course, user, user_certificate):
"""
Tracks web certificate view related events.
"""
# Badge Request Event Tracking Logic
course_key = course.location.course_key
if 'evidence_visit' in request.GET:
badge_class = get_completion_badge(course_key, user)
if not badge_class:
log.warning('Visit to evidence URL for badge, but badges not configured for course "%s"', course_key)
badges = []
else:
badges = badge_class.get_for_user(user)
if badges:
# There should only ever be one of these.
badge = badges[0]
tracker.emit(
'edx.badge.assertion.evidence_visited',
{
'badge_name': badge.badge_class.display_name,
'badge_slug': badge.badge_class.slug,
'badge_generator': badge.backend,
'issuing_component': badge.badge_class.issuing_component,
'user_id': user.id,
'course_id': unicode(course_key),
'enrollment_mode': badge.badge_class.mode,
'assertion_id': badge.id,
'assertion_image_url': badge.image_url,
'assertion_json_url': badge.assertion_url,
'issuer': badge.data.get('issuer'),
}
)
else:
log.warn(
"Could not find badge for %s on course %s.",
user.id,
course_key,
)
# track certificate evidence_visited event for analytics when certificate_user and accessing_user are different
if request.user and request.user.id != user.id:
emit_certificate_event('evidence_visited', user, unicode(course.id), course, {
'certificate_id': user_certificate.verify_uuid,
'enrollment_mode': user_certificate.mode,
'social_network': CertificateSocialNetworks.linkedin
})
def _update_configuration_context(context, configuration):
"""
Site Configuration will need to be able to override any hard coded
content that was put into the context in the
_update_certificate_context() call above. For example the
'company_about_description' talks about edX, which we most likely
do not want to keep in configurations.
So we need to re-apply any configuration/content that
we are sourcing from the database. This is somewhat duplicative of
the code at the beginning of this method, but we
need the configuration at the top as some error code paths
require that to be set up early on in the pipeline
"""
config_key = configuration_helpers.get_value('domain_prefix')
config = configuration.get("microsites", {})
if config_key and config:
context.update(config.get(config_key, {}))
def _update_badge_context(context, course, user):
"""
Updates context with badge info.
"""
badge = None
if badges_enabled() and course.issue_badges:
badges = get_completion_badge(course.location.course_key, user).get_for_user(user)
if badges:
badge = badges[0]
context['badge'] = badge
def _update_organization_context(context, course):
"""
Updates context with organization related info.
"""
partner_long_name, organization_logo = None, None
partner_short_name = course.display_organization if course.display_organization else course.org
organizations = organization_api.get_course_organizations(course_id=course.id)
if organizations:
#TODO Need to add support for multiple organizations, Currently we are interested in the first one.
organization = organizations[0]
partner_long_name = organization.get('name', partner_long_name)
partner_short_name = organization.get('short_name', partner_short_name)
organization_logo = organization.get('logo', None)
context['organization_long_name'] = partner_long_name
context['organization_short_name'] = partner_short_name
context['accomplishment_copy_course_org'] = partner_short_name
context['organization_logo'] = organization_logo
def render_cert_by_uuid(request, certificate_uuid):
"""
This public view generates an HTML representation of the specified certificate
"""
try:
certificate = GeneratedCertificate.eligible_certificates.get(
verify_uuid=certificate_uuid,
status=CertificateStatuses.downloadable
)
return render_html_view(request, certificate.user.id, unicode(certificate.course_id))
except GeneratedCertificate.DoesNotExist:
raise Http404
@handle_500(
template_path="certificates/server-error.html",
test_func=lambda request: request.GET.get('preview', None)
)
def render_html_view(request, user_id, course_id):
"""
This public view generates an HTML representation of the specified user and course
If a certificate is not available, we display a "Sorry!" screen instead
"""
try:
user_id = int(user_id)
except ValueError:
raise Http404
preview_mode = request.GET.get('preview', None)
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
configuration = CertificateHtmlViewConfiguration.get_config()
# Kick the user back to the "Invalid" screen if the feature is disabled globally
if not settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False):
return _render_invalid_certificate(course_id, platform_name, configuration)
# Load the course and user objects
try:
course_key = CourseKey.from_string(course_id)
user = User.objects.get(id=user_id)
course = get_course_by_id(course_key)
# For any course or user exceptions, kick the user back to the "Invalid" screen
except (InvalidKeyError, User.DoesNotExist, Http404) as exception:
error_str = (
"Invalid cert: error finding course %s or user with id "
"%d. Specific error: %s"
)
log.info(error_str, course_id, user_id, str(exception))
return _render_invalid_certificate(course_id, platform_name, configuration)
# Kick the user back to the "Invalid" screen if the feature is disabled for the course
if not course.cert_html_view_enabled:
log.info(
"Invalid cert: HTML certificates disabled for %s. User id: %d",
course_id,
user_id,
)
return _render_invalid_certificate(course_id, platform_name, configuration)
# Load user's certificate
user_certificate = _get_user_certificate(request, user, course_key, course, preview_mode)
if not user_certificate:
log.info(
"Invalid cert: User %d does not have eligible cert for %s.",
user_id,
course_id,
)
return _render_invalid_certificate(course_id, platform_name, configuration)
# Get the active certificate configuration for this course
# If we do not have an active certificate, we'll need to send the user to the "Invalid" screen
# Passing in the 'preview' parameter, if specified, will return a configuration, if defined
active_configuration = get_active_web_certificate(course, preview_mode)
if active_configuration is None:
log.info(
"Invalid cert: course %s does not have an active configuration. User id: %d",
course_id,
user_id,
)
return _render_invalid_certificate(course_id, platform_name, configuration)
# Get data from Discovery service that will be necessary for rendering this Certificate.
catalog_data = _get_catalog_data_for_course(course_key)
# Determine whether to use the standard or custom template to render the certificate.
custom_template = None
custom_template_language = None
if settings.FEATURES.get('CUSTOM_CERTIFICATE_TEMPLATES_ENABLED', False):
custom_template, custom_template_language = _get_custom_template_and_language(
course.id,
user_certificate.mode,
catalog_data.pop('content_language', None)
)
# Determine the language that should be used to render the certificate.
# For the standard certificate template, use the user language. For custom templates, use
# the language associated with the template.
user_language = translation.get_language()
certificate_language = custom_template_language if custom_template else user_language
# Generate the certificate context in the correct language, then render the template.
with translation.override(certificate_language):
context = {'user_language': user_language}
_update_context_with_basic_info(context, course_id, platform_name, configuration)
context['certificate_data'] = active_configuration
# Append/Override the existing view context values with any mode-specific ConfigurationModel values
context.update(configuration.get(user_certificate.mode, {}))
# Append organization info
_update_organization_context(context, course)
# Append course info
_update_course_context(request, context, course, course_key, platform_name)
# Append course run info from discovery
context.update(catalog_data)
# Append user info
_update_context_with_user_info(context, user, user_certificate)
# Append PhilU related context
override_update_certificate_context(request, context, course, user_certificate, preview_mode)
# Append/Override the existing view context values with certificate specific values
_update_certificate_context(context, course, user_certificate, platform_name)
# Append badge info
_update_badge_context(context, course, user)
# Append site configuration overrides
_update_configuration_context(context, configuration)
# Add certificate header/footer data to current context
context.update(get_certificate_header_context(is_secure=request.is_secure()))
context.update(get_certificate_footer_context())
# Append/Override the existing view context values with any course-specific static values from Advanced Settings
context.update(course.cert_html_view_overrides)
# Track certificate view events
_track_certificate_events(request, context, course, user, user_certificate)
# Render the certificate
return _render_valid_certificate(request, context, custom_template)
def _get_catalog_data_for_course(course_key):
"""
Retrieve data from the Discovery service necessary for rendering a certificate for a specific course.
"""
course_certificate_settings = CertificateGenerationCourseSetting.get(course_key)
if not course_certificate_settings:
return {}
catalog_data = {}
course_run_fields = []
if course_certificate_settings.language_specific_templates_enabled:
course_run_fields.append('content_language')
if course_certificate_settings.include_hours_of_effort:
course_run_fields.extend(['weeks_to_complete', 'max_effort'])
if course_run_fields:
course_run_data = get_course_run_details(course_key, course_run_fields)
if course_run_data.get('weeks_to_complete') and course_run_data.get('max_effort'):
try:
weeks_to_complete = int(course_run_data['weeks_to_complete'])
max_effort = int(course_run_data['max_effort'])
catalog_data['hours_of_effort'] = weeks_to_complete * max_effort
except ValueError:
log.exception('Error occurred while parsing course run details')
catalog_data['content_language'] = course_run_data.get('content_language')
return catalog_data
def _get_custom_template_and_language(course_id, course_mode, course_language):
"""
Return the custom certificate template, if any, that should be rendered for the provided course/mode/language
combination, along with the language that should be used to render that template.
"""
closest_released_language = get_closest_released_language(course_language) if course_language else None
template = get_certificate_template(course_id, course_mode, closest_released_language)
if template and template.language:
return (template, closest_released_language)
elif template:
return (template, settings.LANGUAGE_CODE)
else:
return (None, None)
def _render_invalid_certificate(course_id, platform_name, configuration):
context = {}
_update_context_with_basic_info(context, course_id, platform_name, configuration)
return render_to_response(INVALID_CERTIFICATE_TEMPLATE_PATH, context)
def _render_valid_certificate(request, context, custom_template=None):
if custom_template:
template = Template(
custom_template.template,
output_encoding='utf-8',
input_encoding='utf-8',
default_filters=['decode.utf8'],
encoding_errors='replace',
)
context = RequestContext(request, context)
return HttpResponse(template.render(context))
else:
return render_to_response("certificates/valid.html", context)
|
philanthropy-u/edx-platform
|
lms/djangoapps/certificates/views/webview.py
|
Python
|
agpl-3.0
| 32,149
|
[
"VisIt"
] |
3c3a88e600a8321d2adf2acc1310a3d7d48c06fe8f3514cdf26b3cc58efb97b2
|
import pysam
import argparse
import sys
import logging
import os
import re
from asyncore import read
def write_reads(fout, refmap, reads, counts):
hostReads = []
feedReads = []
hasSecondary = False
for read in reads:
if read.is_secondary:
hasSecondary = True
continue
if refmap[read.reference_id]:
hostReads.append(read)
else:
feedReads.append(read)
hasBoth = False
savedReads = []
if len(hostReads) > 0:
savedReads = hostReads
if len(feedReads) > 0:
counts["both"] += 1
else:
counts["host"] += 1
else:
savedReads = feedReads
counts["feed"] += 1
if hasBoth or hasSecondary:
readCount = {True:0, False:0}
for read in savedReads:
readCount[read.is_read1] += 1
numberOfHit = max(readCount.values())
#oldNumberOfHit = read.get_tag("NH")
#print("hasSecondary:%s, hasBoth:%s, old:%d, new:%d" % (str(hasSecondary), str(hasBoth), oldNumberOfHit, numberOfHit))
for read in savedReads:
read.set_tag("NH", numberOfHit)
for read in savedReads:
fout.write(read)
DEBUG = False
NOT_DEBUG= not DEBUG
parser = argparse.ArgumentParser(description="filter bam by chromosome priority using pattern.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', action='store', nargs='?', help='Input BAM file', required=NOT_DEBUG)
parser.add_argument('-o', '--output', action='store', nargs='?', help="Output BAM file", required=NOT_DEBUG)
parser.add_argument('--host_prefix', action='store', nargs='?', help="Input chromosome prefix for host genome (such as mm10_)", required=NOT_DEBUG)
args = parser.parse_args()
if DEBUG:
args.input="/scratch/vickers_lab/projects/20200805_5057_AD_rnaseq_hsammu_combined_byMars.tiger.bam/sort_by_name/result/Blank.sortedByName.bam"
args.output="/scratch/vickers_lab/projects/20200805_5057_AD_rnaseq_hsammu_combined_byMars.tiger.bam/Blank.name.filtered.bam"
args.host_prefix="mm10_"
# args.input="/scratch/vickers_lab/projects/20200805_5057_AD_rnaseq_hsammu_combined_byMars.tiger.bam/chr1.name.bam"
# args.output="/scratch/vickers_lab/projects/20200805_5057_AD_rnaseq_hsammu_combined_byMars.tiger.bam/chr1.name.filtered.bam"
# args.host_prefix="mm10_"
logger = logging.getLogger('filterMixBam')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
tmpfile = args.output + ".tmp.bam"
output = open(tmpfile, 'w')
counts={"host":0, "feed":0, "both":0}
refmap = {}
with pysam.Samfile(args.input, "rb") as sam:
for nf in range(0, sam.nreferences):
if(sam.get_reference_name(nf) != sam.references[nf]):
raise Exception("%s != %s" % (sam.get_reference_name(nf), sam.references[nf]))
refmap[nf] = sam.references[nf].startswith(args.host_prefix)
header = sam.header
with pysam.AlignmentFile(tmpfile, "wb", header=header) as fout:
processed = 0
accepted = 0
reads = []
lastQuery = ""
for read in sam.fetch(until_eof=True):
if read.is_unmapped:
continue
if read.qname != lastQuery:
processed += 1
if processed % 100000 == 0:
logger.info("processed %d, host %d, feed %d, both %d" % (processed, counts["host"], counts["feed"], counts["both"]))
write_reads(fout, refmap, reads, counts)
lastQuery = read.qname
reads = [read]
else:
reads.append(read)
write_reads(fout, refmap, reads, counts)
logger.info("processed %d, host %d, feed %d, both %d" % (processed, counts["host"], counts["feed"], counts["both"]))
txtfile = os.path.splitext(args.output)[0]+'.txt'
with open(txtfile, "wt") as flog:
flog.write("Category\tCount\n")
flog.write("processed\t%d\n" % processed)
flog.write("host\t%d\n" % counts["host"])
flog.write("feed\t%d\n" % counts["feed"])
flog.write("both\t%d\n" % counts["both"])
os.rename(tmpfile, args.output)
|
shengqh/ngsperl
|
lib/Alignment/filterMixBam.py
|
Python
|
apache-2.0
| 3,954
|
[
"pysam"
] |
4cd901994a880cd51f7732cfd8a40ac8adfe1ec62f5b111089648b03889183bc
|
#! /usr/bin/env python
# This file comes originally from: https://github.com/Kentzo/git-archive-all
#
# coding=utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2010 Ilya Kulakov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
from __future__ import unicode_literals
import logging
from os import extsep, path, readlink, curdir
from subprocess import CalledProcessError, Popen, PIPE
import sys
import tarfile
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED
import re
__version__ = "1.17"
class GitArchiver(object):
"""
GitArchiver
Scan a git repository and export all tracked files, and submodules.
Checks for .gitattributes files in each directory and uses 'export-ignore'
pattern entries for ignore files in the archive.
>>> archiver = GitArchiver(main_repo_abspath='my/repo/path')
>>> archiver.create('output.zip')
"""
LOG = logging.getLogger('GitArchiver')
def __init__(self, prefix='', exclude=True, force_sub=False, extra=None, main_repo_abspath=None):
"""
@param prefix: Prefix used to prepend all paths in the resulting archive.
Extra file paths are only prefixed if they are not relative.
E.g. if prefix is 'foo' and extra is ['bar', '/baz'] the resulting archive will look like this:
/
baz
foo/
bar
@type prefix: str
@param exclude: Determines whether archiver should follow rules specified in .gitattributes files.
@type exclude: bool
@param force_sub: Determines whether submodules are initialized and updated before archiving.
@type force_sub: bool
@param extra: List of extra paths to include in the resulting archive.
@type extra: list
@param main_repo_abspath: Absolute path to the main repository (or one of subdirectories).
If given path is path to a subdirectory (but not a submodule directory!) it will be replaced
with abspath to top-level directory of the repository.
If None, current cwd is used.
@type main_repo_abspath: str
"""
if extra is None:
extra = []
if main_repo_abspath is None:
main_repo_abspath = path.abspath('')
elif not path.isabs(main_repo_abspath):
raise ValueError("main_repo_abspath must be an absolute path")
try:
main_repo_abspath = path.abspath(self.run_git_shell('git rev-parse --show-toplevel', main_repo_abspath).rstrip())
except CalledProcessError:
raise ValueError("{0} is not part of a git repository".format(main_repo_abspath))
self.prefix = prefix
self.exclude = exclude
self.extra = extra
self.force_sub = force_sub
self.main_repo_abspath = main_repo_abspath
def create(self, output_path, dry_run=False, output_format=None):
"""
Create the archive at output_file_path.
Type of the archive is determined either by extension of output_file_path or by output_format.
Supported formats are: gz, zip, bz2, xz, tar, tgz, txz
@param output_path: Output file path.
@type output_path: str
@param dry_run: Determines whether create should do nothing but print what it would archive.
@type dry_run: bool
@param output_format: Determines format of the output archive. If None, format is determined from extension
of output_file_path.
@type output_format: str
"""
if output_format is None:
file_name, file_ext = path.splitext(output_path)
output_format = file_ext[len(extsep):].lower()
self.LOG.debug("Output format is not explicitly set, determined format is {0}.".format(output_format))
if not dry_run:
if output_format == 'zip':
archive = ZipFile(path.abspath(output_path), 'w')
def add_file(file_path, arcname):
if not path.islink(file_path):
archive.write(file_path, arcname, ZIP_DEFLATED)
else:
i = ZipInfo(arcname)
i.create_system = 3
i.external_attr = 0xA1ED0000
archive.writestr(i, readlink(file_path))
elif output_format in ['tar', 'bz2', 'gz', 'xz', 'tgz', 'txz']:
if output_format == 'tar':
t_mode = 'w'
elif output_format == 'tgz':
t_mode = 'w:gz'
elif output_format == 'txz':
t_mode = 'w:xz'
else:
t_mode = 'w:{0}'.format(output_format)
archive = tarfile.open(path.abspath(output_path), t_mode)
def add_file(file_path, arcname):
archive.add(file_path, arcname)
else:
raise RuntimeError("unknown format: {0}".format(output_format))
def archiver(file_path, arcname):
self.LOG.debug("Compressing {0} => {1}...".format(file_path, arcname))
add_file(file_path, arcname)
else:
archive = None
def archiver(file_path, arcname):
self.LOG.info("{0} => {1}".format(file_path, arcname))
self.archive_all_files(archiver)
if archive is not None:
archive.close()
def get_exclude_patterns(self, repo_abspath, repo_file_paths):
"""
Returns exclude patterns for a given repo. It looks for .gitattributes files in repo_file_paths.
Resulting dictionary will contain exclude patterns per path (relative to the repo_abspath).
E.g. {('.', 'Catalyst', 'Editions', 'Base'): ['Foo*', '*Bar']}
@param repo_abspath: Absolute path to the git repository.
@type repo_abspath: str
@param repo_file_paths: List of paths relative to the repo_abspath that are under git control.
@type repo_file_paths: list
@return: Dictionary representing exclude patterns.
Keys are tuples of strings. Values are lists of strings.
Returns None if self.exclude is not set.
@rtype: dict or None
"""
if not self.exclude:
return None
def read_attributes(attributes_abspath):
patterns = []
if path.isfile(attributes_abspath):
attributes = open(attributes_abspath, 'r').readlines()
patterns = []
for line in attributes:
tokens = line.strip().split()
if "export-ignore" in tokens[1:]:
patterns.append(tokens[0])
return patterns
exclude_patterns = {(): []}
# There may be no gitattributes.
try:
global_attributes_abspath = self.run_git_shell("git config --get core.attributesfile", repo_abspath).rstrip()
exclude_patterns[()] = read_attributes(global_attributes_abspath)
except:
# And it's valid to not have them.
pass
for attributes_abspath in [path.join(repo_abspath, f) for f in repo_file_paths if f.endswith(".gitattributes")]:
# Each .gitattributes affects only files within its directory.
key = tuple(self.get_path_components(repo_abspath, path.dirname(attributes_abspath)))
exclude_patterns[key] = read_attributes(attributes_abspath)
local_attributes_abspath = path.join(repo_abspath, ".git", "info", "attributes")
key = tuple(self.get_path_components(repo_abspath, repo_abspath))
if key in exclude_patterns:
exclude_patterns[key].extend(read_attributes(local_attributes_abspath))
else:
exclude_patterns[key] = read_attributes(local_attributes_abspath)
return exclude_patterns
def is_file_excluded(self, repo_abspath, repo_file_path, exclude_patterns):
"""
Checks whether file at a given path is excluded.
@param repo_abspath: Absolute path to the git repository.
@type repo_abspath: str
@param repo_file_path: Path to a file within repo_abspath.
@type repo_file_path: str
@param exclude_patterns: Exclude patterns with format specified for get_exclude_patterns.
@type exclude_patterns: dict
@return: True if file should be excluded. Otherwise False.
@rtype: bool
"""
if exclude_patterns is None or not len(exclude_patterns):
return False
from fnmatch import fnmatch
file_name = path.basename(repo_file_path)
components = self.get_path_components(repo_abspath, path.join(repo_abspath, path.dirname(repo_file_path)))
is_excluded = False
# We should check all patterns specified in intermediate directories to the given file.
# At the end we should also check for the global patterns (key '()' or empty tuple).
while not is_excluded:
key = tuple(components)
if key in exclude_patterns:
patterns = exclude_patterns[key]
for p in patterns:
if fnmatch(file_name, p) or fnmatch(repo_file_path, p):
self.LOG.debug("Exclude pattern matched {0}: {1}".format(p, repo_file_path))
is_excluded = True
if not len(components):
break
components.pop()
return is_excluded
def archive_all_files(self, archiver):
"""
Archive all files using archiver.
@param archiver: Callable that accepts 2 arguments:
abspath to file on the system and relative path within archive.
@type archiver: Callable
"""
for file_path in self.extra:
archiver(path.abspath(file_path), path.join(self.prefix, file_path))
for file_path in self.walk_git_files():
archiver(path.join(self.main_repo_abspath, file_path), path.join(self.prefix, file_path))
def walk_git_files(self, repo_path=''):
"""
An iterator method that yields a file path relative to main_repo_abspath
for each file that should be included in the archive.
Skips those that match the exclusion patterns found in
any discovered .gitattributes files along the way.
Recurs into submodules as well.
@param repo_path: Path to the git submodule repository relative to main_repo_abspath.
@type repo_path: str
@return: Iterator to traverse files under git control relative to main_repo_abspath.
@rtype: Iterable
"""
repo_abspath = path.join(self.main_repo_abspath, repo_path)
repo_file_paths = self.run_git_shell(
"git ls-files --cached --full-name --no-empty-directory",
repo_abspath
).splitlines()
exclude_patterns = self.get_exclude_patterns(repo_abspath, repo_file_paths)
for repo_file_path in repo_file_paths:
# Git puts path in quotes if file path has unicode characters.
repo_file_path = repo_file_path.strip('"') # file path relative to current repo
repo_file_abspath = path.join(repo_abspath, repo_file_path) # absolute file path
main_repo_file_path = path.join(repo_path, repo_file_path) # file path relative to the main repo
# Only list symlinks and files.
if not path.islink(repo_file_abspath) and path.isdir(repo_file_abspath):
continue
if self.is_file_excluded(repo_abspath, repo_file_path, exclude_patterns):
continue
yield main_repo_file_path
if self.force_sub:
self.run_git_shell("git submodule init", repo_abspath)
self.run_git_shell("git submodule update", repo_abspath)
try:
repo_gitmodules_abspath = path.join(repo_abspath, ".gitmodules")
with open(repo_gitmodules_abspath) as f:
lines = f.readlines()
for l in lines:
m = re.match("^\s*path\s*=\s*(.*)\s*$", l)
if m:
submodule_path = m.group(1)
submodule_abspath = path.join(repo_path, submodule_path)
if self.is_file_excluded(repo_abspath, submodule_path, exclude_patterns):
continue
for submodule_file_path in self.walk_git_files(submodule_abspath):
rel_file_path = submodule_file_path.replace(repo_path, "", 1).strip("/")
if self.is_file_excluded(repo_abspath, rel_file_path, exclude_patterns):
continue
yield submodule_file_path
except IOError:
pass
@staticmethod
def get_path_components(repo_abspath, abspath):
"""
Split given abspath into components relative to repo_abspath.
These components are primarily used as unique keys of files and folders within a repository.
E.g. if repo_abspath is '/Documents/Hobby/ParaView/' and abspath is
'/Documents/Hobby/ParaView/Catalyst/Editions/Base/', function will return:
['.', 'Catalyst', 'Editions', 'Base']
First element is always os.curdir (concrete symbol depends on OS).
@param repo_abspath: Absolute path to the git repository. Normalized via os.path.normpath.
@type repo_abspath: str
@param abspath: Absolute path to a file within repo_abspath. Normalized via os.path.normpath.
@type abspath: str
@return: List of path components.
@rtype: list
"""
repo_abspath = path.normpath(repo_abspath)
abspath = path.normpath(abspath)
if not path.isabs(repo_abspath):
raise ValueError("repo_abspath MUST be absolute path.")
if not path.isabs(abspath):
raise ValueError("abspath MUST be absoulte path.")
if not path.commonprefix([repo_abspath, abspath]):
raise ValueError(
"abspath (\"{0}\") MUST have common prefix with repo_abspath (\"{1}\")"
.format(abspath, repo_abspath)
)
components = []
while not abspath == repo_abspath:
abspath, tail = path.split(abspath)
if tail:
components.insert(0, tail)
components.insert(0, curdir)
return components
@staticmethod
def run_git_shell(cmd, cwd=None):
"""
Runs git shell command, reads output and decodes it into unicode string.
@param cmd: Command to be executed.
@type cmd: str
@type cwd: str
@param cwd: Working directory.
@rtype: str
@return: Output of the command.
@raise CalledProcessError: Raises exception if return code of the command is non-zero.
"""
p = Popen(cmd, shell=True, stdout=PIPE, cwd=cwd)
output, _ = p.communicate()
output = output.decode('unicode_escape').encode('raw_unicode_escape').decode('utf-8')
if p.returncode:
if sys.version_info > (2, 6):
raise CalledProcessError(returncode=p.returncode, cmd=cmd, output=output)
else:
raise CalledProcessError(returncode=p.returncode, cmd=cmd)
return output
def main():
from optparse import OptionParser
parser = OptionParser(
usage="usage: %prog [-v] [--prefix PREFIX] [--no-exclude] [--force-submodules]"
" [--extra EXTRA1 [EXTRA2]] [--dry-run] OUTPUT_FILE",
version="%prog {0}".format(__version__)
)
parser.add_option('--prefix',
type='string',
dest='prefix',
default=None,
help="""prepend PREFIX to each filename in the archive.
OUTPUT_FILE name is used by default to avoid tarbomb.
You can set it to '' in order to explicitly request tarbomb""")
parser.add_option('-v', '--verbose',
action='store_true',
dest='verbose',
help='enable verbose mode')
parser.add_option('--no-exclude',
action='store_false',
dest='exclude',
default=True,
help="don't read .gitattributes files for patterns containing export-ignore attrib")
parser.add_option('--force-submodules',
action='store_true',
dest='force_sub',
help='force a git submodule init && git submodule update at each level before iterating submodules')
parser.add_option('--extra',
action='append',
dest='extra',
default=[],
help="any additional files to include in the archive")
parser.add_option('--dry-run',
action='store_true',
dest='dry_run',
help="don't actually archive anything, just show what would be done")
options, args = parser.parse_args()
if len(args) != 1:
parser.error("You must specify exactly one output file")
output_file_path = args[0]
if path.isdir(output_file_path):
parser.error("You cannot use directory as output")
# avoid tarbomb
if options.prefix is not None:
options.prefix = path.join(options.prefix, '')
else:
import re
output_name = path.basename(output_file_path)
output_name = re.sub(
'(\.zip|\.tar|\.tgz|\.txz|\.gz|\.bz2|\.xz|\.tar\.gz|\.tar\.bz2|\.tar\.xz)$',
'',
output_name
) or "Archive"
options.prefix = path.join(output_name, '')
try:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('%(message)s'))
GitArchiver.LOG.addHandler(handler)
GitArchiver.LOG.setLevel(logging.DEBUG if options.verbose else logging.INFO)
archiver = GitArchiver(options.prefix,
options.exclude,
options.force_sub,
options.extra)
archiver.create(output_file_path, options.dry_run)
except Exception as e:
parser.exit(2, "{0}\n".format(e))
sys.exit(0)
if __name__ == '__main__':
main()
|
jorisv/Tasks
|
cmake/git-archive-all.py
|
Python
|
lgpl-3.0
| 19,609
|
[
"ParaView"
] |
08f802926083c5d3bd3a82477596bf76d627199f0e106a9b5ef6783ba56d5e35
|
"""Integrate stochastic master equations in vectorized form.
.. py:module:: integrate.py
:synopsis: Integrate stochastic master equations in vectorized form.
.. moduleauthor:: Jonathan Gross <jarthurgross@gmail.com>
"""
from functools import partial
import itertools as it
import numpy as np
from scipy.integrate import solve_ivp
import sparse
import pysme.system_builder as sb
import pysme.sparse_system_builder as ssb
import pysme.sde as sde
import pysme.gellmann as gm
def process_default_kwargs(kwargs, default_kwargs):
"""Update a default kwarg dict with user-supplied values
"""
if kwargs is None:
kwargs = {}
for kwarg, value in kwargs.items():
default_kwargs[kwarg] = value
def b_dx_b(G2, k_T_G, G, k_T, rho):
r"""A term in Taylor integration methods.
Function to return the :math:`\left(\vec{b}(\vec{\rho})\cdot
\vec{\nabla}_{\vec{\rho}}\right)\vec{b}(\vec{\rho})` term for Milstein
integration.
Parameters
----------
G2: numpy.array
:math:`G^2`.
k_T_G: numpy.array
:math:`\vec{k}^TG`.
G: numpy.array
:math:`G`.
k_T: numpy.array
:math:`\vec{k}^T`.
rho: numpy.array
:math:`\rho`.
Returns
-------
numpy.array
:math:`\left(\vec{b}(\vec{\rho})\cdot
\vec{\nabla}_{\vec{\rho}}\right)\vec{b}(\vec{\rho})`.
"""
k_rho_dot = np.dot(k_T, rho)
return ((np.dot(k_T_G, rho) + 2*k_rho_dot**2)*rho +
np.dot(G2 + 2*k_rho_dot*G, rho))
def b_dx_b_tr_dec(G2, rho):
r"""Same as :func:`b_dx_b`, but for the linear differential equation.
Because the nonlinear terms are discarded, this function requires fewer
arguments.
"""
return np.dot(G2, rho)
def b_dx_a(QG, k_T, Q, rho):
r"""A term in Taylor integration methods.
Function to return the :math:`\left(\vec{b}(\vec{\rho})\cdot
\vec{\nabla}_{\vec{\rho}}\right)\vec{a}(\vec{\rho})` term for stochastic
integration.
Parameters
----------
QG: numpy.array
:math:`QG`.
k_T: numpy.array
:math:`\vec{k}^T`.
Q: numpy.array
:math:`Q`.
rho: numpy.array
:math:`\rho`.
Returns
-------
numpy.array
:math:`\left(\vec{b}(\vec{\rho})\cdot
\vec{\nabla}_{\vec{\rho}}\right)\vec{a}(\vec{\rho})`.
"""
return np.dot(QG + np.dot(k_T, rho)*Q, rho)
def b_dx_a_tr_dec(QG, rho):
r"""Same as :func:`b_dx_a`, but for the linear differential equation.
Because the nonlinear terms are discarded, this function requires fewer
arguments.
"""
return np.dot(QG, rho)
def a_dx_b(GQ, k_T, Q, k_T_Q, rho):
r"""A term in Taylor integration methods.
Function to return the :math:`\left(\vec{a}(\vec{\rho})\cdot
\vec{\nabla}_{\vec{\rho}}\right)\vec{b}(\vec{\rho})` term for stochastic
integration.
GQ: numpy.array
:math:`GQ`.
k_T: numpy.array
:math:`\vec{k}^T`.
Q: numpy.array
:math:`Q`.
k_T_Q: numpy.array
:math:`\vec{k}^TQ`.
rho: numpy.array
:math:`\rho`.
Returns
-------
numpy.array
:math:`\left(\vec{a}(\vec{\rho})\cdot
\vec{\nabla}_{\vec{\rho}}\right)\vec{b}(\vec{\rho})`.
"""
return np.dot(GQ + np.dot(k_T, rho)*Q, rho) + np.dot(k_T_Q, rho)*rho
def a_dx_b_tr_dec(GQ, rho):
r"""Same as :func:`a_dx_b`, but for the linear differential equation.
Because the nonlinear terms are discarded, this function requires fewer
arguments.
"""
return np.dot(GQ, rho)
def a_dx_a(Q2, rho):
r"""A term in Taylor integration methods.
Function to return the :math:`\left(\vec{a}(\vec{\rho})\cdot
\vec{\nabla}_{\vec{\rho}}\right)\vec{a}(\vec{\rho})` term for stochastic
integration.
Parameters
----------
Q2: numpy.array
:math:`Q^2`.
rho: numpy.array
:math:`\rho`.
Returns
-------
numpy.array
:math:`\left(\vec{a}(\vec{\rho})\cdot
\vec{\nabla}_{\vec{\rho}}\right)\vec{a}(\vec{\rho})`.
"""
return np.dot(Q2, rho)
def b_dx_b_dx_b(G3, G2, G, k_T, k_T_G, k_T_G2, rho):
r"""A term in Taylor integration methods.
Function to return the :math:`\left(\vec{b}(\vec{\rho})\cdot
\vec{\nabla}_{\vec{\rho}}\right)^2\vec{b}(\vec{\rho})` term for stochastic
integration.
Parameters
----------
G3: numpy.array
:math:`G^3`.
G2: numpy.array
:math:`G^2`.
G: numpy.array
:math:`G`.
k_T: numpy.array
:math:`\vec{k}^T`.
k_T_G: numpy.array
:math:`\vec{k}^TG`.
k_T_G2: numpy.array
:math:`\vec{k}^TG^2`.
rho: numpy.array
:math:`\rho`.
Returns
-------
numpy.array
:math:`\left(\vec{b}(\vec{\rho})\cdot
\vec{\nabla}_{\vec{\rho}}\right)^2\vec{b}(\vec{\rho})`.
"""
k_rho_dot = np.dot(k_T, rho)
k_T_G_rho_dot = np.dot(k_T_G, rho)
k_T_G2_rho_dot = np.dot(k_T_G2, rho)
return (np.dot(G3 + 3*k_rho_dot*G2 + 3*(k_T_G_rho_dot + 2*k_rho_dot**2)*G,
rho) + (k_T_G2_rho_dot + 6*k_rho_dot*k_T_G_rho_dot +
6*k_rho_dot**3)*rho)
def b_dx_b_dx_b_tr_dec(G3, rho):
r"""Same as :func:`b_dx_b_dx_b`, but for the linear differential equation.
Because the nonlinear terms are discarded, this function requires fewer
arguments.
"""
return np.dot(G3, rho)
def b_b_dx_dx_b(G, k_T, k_T_G, rho):
r"""A term in Taylor integration methods.
Function to return the :math:`b^\nu b^\sigma\partial_\nu\partial_\sigma
b^\mu\hat{e}_\mu` term for stochastic integration.
Parameters
----------
G: numpy.array
:math:`G`.
k_T: numpy.array
:math:`\vec{k}^T`.
k_T_G: numpy.array
:math:`\vec{k}^TG`.
rho: numpy.array
:math:`\rho`.
Returns
-------
numpy.array
:math:`b^\nu b^\sigma\partial_\nu\partial_\sigma b^\mu\hat{e}_\mu`
"""
k_rho_dot = np.dot(k_T, rho)
k_T_G_rho_dot = np.dot(k_T_G, rho)
return 2*(k_T_G_rho_dot + k_rho_dot**2)*(np.dot(G, rho) + k_rho_dot*rho)
class Solution:
r"""Integrated solution to a differential equation.
Packages the vectorized solution with the basis it is vectorized with
respect to along with providing convenient functions for returning
properties of the solution a user might care about (such as expectation
value of an observable) without requiring the user to know anything about
the particular representation used for numerical integration.
"""
def __init__(self, vec_soln, basis):
self.vec_soln = vec_soln
self.basis = basis
def get_expectations(self, observable, idx_slice=None, hermitian=True):
r"""Calculate the expectation value of an observable for all times.
Returns
-------
numpy.array
The expectation values of an observable for all the calculated
times.
"""
# For an expectation, I want the trace with the observable. Dualize is
# used for calculating traces with the adjoint of the operator, so I
# need to preemptively adjoint here. This becomes important when taking
# traces with non-hermitial observables.
if idx_slice is None:
idx_slice = np.s_[:]
dual = sb.dualize(observable.conj().T, self.basis)
if hermitian:
dual = dual.real
return np.dot(self.vec_soln[idx_slice], dual)
def get_purities(self, idx_slice=None):
r"""Calculate the purity of the state for all times.
Returns
-------
numpy.array
The purity :math:`\operatorname{Tr}[\rho^2]` at each calculated
time.
"""
if idx_slice is None:
idx_slice = np.s_[:]
if isinstance(self.basis, sparse.COO):
basis_dual = np.array([np.trace(np.dot(op.conj().T, op)).real
for op in self.basis.todense()])
else:
basis_dual = np.array([np.trace(np.dot(op.conj().T, op)).real
for op in self.basis])
return np.dot(self.vec_soln[idx_slice]**2, basis_dual)
def get_density_matrices(self, idx_slice=None):
r"""Represent the solution as a sequence of Hermitian arrays.
Returns
-------
list of numpy.array
The density matrix at each calculated time.
"""
if idx_slice is None:
idx_slice = np.s_[:]
return np.tensordot(self.vec_soln[idx_slice], self.basis, ([-1], [0]))
def get_density_matrices_slow(self):
r"""Represent the solution as a sequence of Hermitian arrays.
Returns
-------
list of numpy.array
The density matrix at each calculated time.
"""
return [sum([comp*op for comp, op in zip(state, self.basis)])
for state in self.vec_soln]
def save(self, outfile):
np.savez_compressed(outfile, vec_soln=self.vec_soln,
basis=self.basis)
def load_solution(infile):
loaded = np.load(infile)
return Solution(loaded['vec_soln'], loaded['basis'])
class LindbladIntegrator:
r"""Template class for Lindblad integrators.
Defines the most basic constructor shared by all integrators of Lindblad
ordinary and stochastic master equations.
Parameters
----------
Ls : [numpy.array]
List-like collection of Lindblad operators
H : numpy.array
The plant Hamiltonian
basis : list of numpy.array, optional
The Hermitian basis to vectorize the operators in terms of (with the
component proportional to the identity in last place). If no basis is
provided the generalized Gell-Mann basis will be used.
drift_rep : numpy.array, optional
The real matrix Q that acts on the vectorized rho as the deterministic
evolution operator. Will save computation time if already known and
don't need to calculate from `Ls`, and `H`.
"""
def __init__(self, Ls, H, basis=None, drift_rep=None):
dim = H.shape[0]
self.basis = ssb.SparseBasis(dim, basis)
if drift_rep is None:
self.L_vecs = [self.basis.vectorize(L) for L in Ls]
self.h_vec = self.basis.vectorize(H)
self.Q = (self.basis.make_hamil_comm_matrix(self.h_vec)
+ sum([self.basis.make_diff_op_matrix(L_vec)
for L_vec in self.L_vecs]))
else:
self.Q = drift_rep
def a_fn(self, t, rho_vec):
return np.dot(self.Q, rho_vec)
def integrate(self, rho_0, times):
raise NotImplementedError()
class UncondLindbladIntegrator(LindbladIntegrator):
r"""Integrator for an unconditional Lindblad master equation.
Parameters
----------
Ls : list of numpy.array
Collection of Lindblad operators
H : numpy.array
The plant Hamiltonian
basis : list of numpy.array, optional
The Hermitian basis to vectorize the operators in terms of (with the
component proportional to the identity in last place). If no basis is
provided the generalized Gell-Mann basis will be used.
drift_rep : numpy.array, optional
The real matrix Q that acts on the vectorized rho as the deterministic
evolution operator. Will save computation time if already known and
don't need to calculate from `Ls`, and `H`.
"""
def jac(self, t, rho_vec):
return self.Q
def integrate(self, rho_0, times, solve_ivp_kwargs=None):
r"""Integrate the equation for a list of times with given initial
conditions.
:param rho_0: The initial state of the system
:type rho_0: `numpy.array`
:param times: A sequence of time points for which to solve for rho
:type times: `list(real)`
:param solve_ivp_kwargs: kwargs for scipy.integrate.solve_ivp
:type solve_ivp_kwargs: dict
:returns: The components of the vecorized :math:`\rho` for all
specified times
:rtype: `Solution`
"""
rho_0_vec = self.basis.vectorize(rho_0, dense=True).real
default_solve_ivp_kwargs = {'method': 'BDF',
't_eval': times,
'jac': self.jac}
process_default_kwargs(solve_ivp_kwargs, default_solve_ivp_kwargs)
ivp_soln = solve_ivp(self.a_fn, (times[0], times[-1]), rho_0_vec,
**default_solve_ivp_kwargs)
return Solution(ivp_soln.y.T, self.basis.basis.todense())
def integrate_non_herm(self, rho_0, times, solve_ivp_kwargs=None):
r"""Integrate the equation for a list of times with given initial
conditions that may be non hermitian (useful for applications involving
the quantum regression theorem).
:param rho_0: The initial state of the system
:type rho_0: `numpy.array`
:param times: A sequence of time points for which to solve for rho
:type times: `list(real)`
:param solve_ivp_kwargs: kwargs for scipy.integrate.solve_ivp
:type solve_ivp_kwargs: dict
:returns: The components of the vecorized :math:`\rho` for all
specified times
:rtype: `Solution`
"""
rho_0_vec = self.basis.vectorize(rho_0, dense=True)
default_solve_ivp_kwargs = {'method': 'BDF',
't_eval': times,
'jac': self.jac}
process_default_kwargs(solve_ivp_kwargs, default_solve_ivp_kwargs)
ivp_soln = solve_ivp(self.a_fn, (times[0], times[-1]), rho_0_vec,
**default_solve_ivp_kwargs)
return Solution(ivp_soln.y.T, self.basis.basis.todense())
class UncondTimeDepLindInt(UncondLindbladIntegrator):
r"""Integrator for an unconditional Lindblad master equation with
time-dependent Hamiltonian and Lindblad operators.
Parameters
----------
Ls : list of lists of form [numpy.array, (numpy.array, callable), ...]
Collection of Lindblad operators, each expressed as a list whose first
element contains the constant part of the operator and whose subsequent
elements are pairs whose first element is an operator and whose second
element is the time-dependent coefficient of that operator.
E.g., a Lindblad operator expressed algebraically as
L_j = sum_m f_{j,m}(t) L_{j,m}
is put into the list form
[L_{j,0}, (L_{j,1}, f_{j,1}), ...]
H : [numpy.array, (numpy.array, callable), ...]
The plant Hamiltonian expressed as a list whose first element contains
the constant part of the operator and whose subsequent elements are
pairs whose first element is an operator and whose second element is the
time-dependent coefficient of that operator.
basis : list of numpy.array, optional
The Hermitian basis to vectorize the operators in terms of (with the
component proportional to the identity in last place). If no basis is
provided the generalized Gell-Mann basis will be used.
drift_rep : numpy.array, optional
The real matrix Q that acts on the vectorized rho as the deterministic
time-independent evolution operator. Will save computation time if
already known and don't need to calculate from `Ls`, and `H`. Sort of a
holdover from the time-independent case right now. Can't imagine it
being useful in its current state for the time-dependent case.
"""
def __init__(self, Ls, H, basis=None, drift_rep=None):
if type(H[0]) is not tuple:
zero = np.zeros(H[0].shape, dtype=np.complex)
else:
zero = np.zeros(H[0][0].shape, dtype=np.complex)
super().__init__([], zero, basis, drift_rep)
self.time_dep_L_vecs = []
self.time_dep_L_coeffs = []
# Loop over different Lindblad operators
for L in Ls:
lvecs = []
fs = []
if type(L[0]) is not tuple:
lvecs.append(self.basis.vectorize(L[0]))
fs.append(lambda x: 1)
for k in L[1:]:
lvecs.append(self.basis.vectorize(k[0]))
fs.append(k[1])
else:
for k in L:
lvecs.append(self.basis.vectorize(k[0]))
fs.append(k[1])
self.time_dep_L_vecs.append(lvecs)
self.time_dep_L_coeffs.append(fs)
self.diagonal_L_supops = [[self.basis.make_real_comm_matrix(lvec, lvec)
for lvec in lvecs]
for lvecs in self.time_dep_L_vecs]
self.re_off_diag_L_supops = [[self.basis.make_real_comm_matrix(lvec1, lvec2)
+ self.basis.make_real_comm_matrix(lvec2, lvec1)
for lvec1, lvec2 in it.combinations(lvecs, 2)]
for lvecs in self.time_dep_L_vecs]
self.im_off_diag_L_supops = [[self.basis.make_real_comm_matrix(1.j*lvec1, lvec2)
+ self.basis.make_real_comm_matrix(lvec2, 1.j*lvec1)
for lvec1, lvec2 in it.combinations(lvecs, 2)]
for lvecs in self.time_dep_L_vecs]
self.time_dep_H_supops = []
self.time_dep_H_coeffs = []
# Loop over different H factors
if type(H[0]) is not tuple:
hvec = self.basis.vectorize(H[0])
self.time_dep_H_supops.append(self.basis.make_hamil_comm_matrix(hvec))
self.time_dep_H_coeffs.append(lambda x: 1)
for k in H[1:]:
hvec = self.basis.vectorize(k[0])
self.time_dep_H_supops.append(self.basis.make_hamil_comm_matrix(hvec))
self.time_dep_H_coeffs.append(k[1])
else:
for k in H:
hvec = self.basis.vectorize(k[0])
self.time_dep_H_supops.append(self.basis.make_hamil_comm_matrix(hvec))
self.time_dep_H_coeffs.append(k[1])
def jac(self, t, rho_vec):
# Need |f_{j,m}(t)|^2
abs_coeffs = [[np.abs(fn(t))**2 for fn in fs] for fs in self.time_dep_L_coeffs]
diag_supop = sum([sum([coeff*supop for coeff, supop in zip(coeff_list, supop_list)])
for coeff_list, supop_list in zip(abs_coeffs, self.diagonal_L_supops)])
# Need re and im parts of f_{j,m}(t) f_{j,n}(t)^*
re_coeffs = []
im_coeffs = []
for fn_list in self.time_dep_L_coeffs:
re_coeff_list = []
im_coeff_list = []
for fn1, fn2 in it.combinations(fn_list, 2):
coeff_val = fn1(t)*np.conj(fn2(t))
re_coeff_list.append(np.real(coeff_val))
im_coeff_list.append(np.imag(coeff_val))
re_coeffs.append(re_coeff_list)
im_coeffs.append(im_coeff_list)
re_off_diag_supop = sum([sum([coeff*supop for coeff, supop in zip(coeff_list, supop_list)])
for coeff_list, supop_list
in zip(re_coeffs, self.re_off_diag_L_supops)])
im_off_diag_supop = sum([sum([coeff*supop for coeff, supop in zip(coeff_list, supop_list)])
for coeff_list, supop_list
in zip(im_coeffs, self.im_off_diag_L_supops)])
H_supop = sum([fn(t)*supop for fn, supop
in zip(self.time_dep_H_coeffs, self.time_dep_H_supops)])
return diag_supop + re_off_diag_supop + im_off_diag_supop + H_supop
def a_fn(self, t, rho_vec):
return np.dot(self.jac(t, rho_vec), rho_vec)
class HomodyneLindbladIntegrator(UncondLindbladIntegrator):
def __init__(self, Ls, H, meas_L_idx, basis=None, drift_rep=None, **kwargs):
super().__init__(Ls, H, basis, drift_rep, **kwargs)
L_meas_vec = self.L_vecs[meas_L_idx]
L_meas = Ls[meas_L_idx]
Id_vec = self.basis.vectorize(np.eye(L_meas.shape[0]))
self.G = 2 * self.basis.make_real_sand_matrix(L_meas_vec, Id_vec)
self.k_T = -2 * self.basis.dualize(L_meas).real
def a_fn(self, t, rho_vec):
return self.Q @ rho_vec
def b_fn(self, t, rho_vec):
return (self.k_T @ rho_vec) * rho_vec + self.G @ rho_vec
def b_fn_tr_non_pres(self, t, rho_vec):
return self.G @ rho_vec
def dW_fn(self, dM, dt, t, rho_vec):
'''Convert measurement increment dM to Wiener increment dW.
'''
return dM + np.dot(self.k_T, rho_vec) * dt
def integrate(self, rho_0, times, U1s=None, U2s=None):
rho_0_vec = self.basis.vectorize(rho_0, dense=True).real
if U1s is None:
U1s = np.random.randn(len(times) -1)
vec_soln = sde.euler(self.a_fn, self.b_fn, rho_0_vec, times, U1s)
return Solution(vec_soln, self.basis.basis.todense())
def integrate_tr_non_pres(self, rho_0, times, U1s=None, U2s=None):
rho_0_vec = self.basis.vectorize(rho_0, dense=True).real
if U1s is None:
U1s = np.random.randn(len(times) -1)
vec_soln = sde.euler(self.a_fn, self.b_fn_tr_non_pres, rho_0_vec, times,
U1s)
# TODO: Having a difference between the basis stored by the Lindblad
# integrators and that stored by the Gaussian integrators is also not
# ideal. Eventually it would be nice for everything to be one of these
# Lindblad integrators and have methods for constructing the Gaussian
# versions from the relevant Gaussian parameters.
return Solution(vec_soln, self.basis.basis.todense())
def integrate_measurements(self, rho_0, times, dMs):
r"""Integrate system evolution conditioned on a measurement record.
Parameters
----------
rho_0: numpy.array
The initial state of the system
times: numpy.array
A sequence of time points for which to solve for rho
dMs: numpy.array(len(times) - 1)
Incremental measurement outcomes used to drive the SDE.
Returns
-------
Solution
The components of the vecorized :math:`\rho` for all specified
times
"""
rho_0_vec = self.basis.vectorize(rho_0, dense=True).real
vec_soln = sde.meas_euler(self.a_fn, self.b_fn, self.dW_fn, rho_0_vec,
times, dMs)
return Solution(vec_soln, self.basis.basis.todense())
def gen_meas_record(self, rho_0, times, U1s=None):
r"""Simulate a measurement record.
Integrate for a sequence of times with a given initial condition (and
optionally specified white noise), returning a measurement record along
with the trajectory.
The incremental measurement outcomes making up the measurement record
are related to the white noise increments and instantaneous state in
the following way:
.. math::
dM_t=dW_t-\operatorname{tr}[(c+c^\dagger)\rho_t]
Parameters
----------
rho_0 : numpy.array of complex float
The initial state of the system as a Hermitian matrix
times : numpy.array of real float
A sequence of time points for which to solve for rho
U1s: numpy.array of real float
Samples from a standard-normal distribution used to construct
Wiener increments :math:`\Delta W` for each time interval. Multiple
rows may be included for independent trajectories. ``U1s.shape`` is
assumed to be ``(len(times) - 1,)``.
Returns
-------
tuple of Solution and numpy.array
The components of the vecorized :math:`\rho` for all specified
times and an array of incremental measurement outcomes
"""
if U1s is None:
U1s = np.random.randn(len(times) -1)
soln = self.integrate(rho_0, times, U1s)
dts = times[1:] - times[:-1]
dWs = np.sqrt(dts) * U1s
tr_c_c_rs = np.array([-np.dot(self.k_T, rho_vec)
for rho_vec in soln.vec_soln[:-1]])
dMs = dWs + tr_c_c_rs * dts
return soln, dMs
class JumpLindbladIntegrator(UncondLindbladIntegrator):
def __init__(self, Ls, H, meas_L_idx, basis=None, drift_rep=None, **kwargs):
super().__init__(Ls, H, basis, drift_rep, **kwargs)
L_meas_vec = self.L_vecs[meas_L_idx]
self.G = -self.basis.make_real_sand_matrix(L_meas_vec, L_meas_vec)
L_meas = Ls[meas_L_idx]
self.kT = self.basis.dualize(L_meas.conj().T @ L_meas).real
# Add the appropriate operator to convert the D operator into the
# no-jump operator (-1/2) (L L† rho + rho L L†)
self.lin_no_jump_op = self.Q + self.G
# The jump operator without renormalization: L† rho L
self.lin_jump_op = -self.G
self.tr_fnctnl = self.basis.dualize(np.eye(L_meas.shape[0],
dtype=L_meas.dtype)).real
def lin_no_jump_jac(self, t, rho_vec):
return self.lin_no_jump_op
def lin_no_jump_a_fn(self, t, rho_vec):
return self.lin_no_jump_op @ rho_vec
def jump_event(self, t, rho_vec, jump_threshold):
return self.tr_fnctnl @ rho_vec - jump_threshold
def integrate(self, rho_0, times, Us=None, return_meas_rec=False,
method='BDF'):
rho_0_vec = self.basis.vectorize(rho_0, dense=True).real
if Us is None:
jump_thresholds = iter([])
else:
jump_thresholds = iter(Us)
meas_rec = []
start_idx = 0
vec_soln_segments = []
jump_occurred = True
while jump_occurred and start_idx < len(times) - 1:
try:
jump_threshold = next(jump_thresholds)
except StopIteration:
# If no jump thresholds are provided or we run out, generate new
# random thresholds.
jump_threshold = np.random.uniform()
jump = partial(self.jump_event, jump_threshold=jump_threshold)
jump.terminal = True
ivp_soln = solve_ivp(self.lin_no_jump_a_fn,
[times[start_idx], times[-1]], rho_0_vec,
t_eval=times[start_idx:], events=jump,
method=method, jac=self.lin_no_jump_jac)
traces = np.tensordot(ivp_soln.y, self.tr_fnctnl, [0, 0])
vec_soln_segments.append(ivp_soln.y.T / traces[:,np.newaxis])
if ivp_soln.status == 1:
# A jump occurred
meas_rec.append(ivp_soln.t_events[0][0])
start_idx += len(ivp_soln.t)
rho_0_vec = self.lin_jump_op @ ivp_soln.y.T[-1]
rho_0_vec = rho_0_vec / (self.tr_fnctnl @ rho_0_vec)
else:
jump_occurred = False
soln = Solution(np.vstack(vec_soln_segments),
self.basis.basis.todense())
return (soln, meas_rec) if return_meas_rec else soln
class GaussIntegrator:
r"""Template class for Gaussian integrators.
Defines the most basic constructor shared by all integrators of Gaussian
ordinary and stochastic master equations.
Parameters
----------
c_op : numpy.array
The coupling operator
M_sq : complex float
The squeezing parameter
N : non-negative float
The thermal parameter
H : numpy.array
The plant Hamiltonian
basis : list of numpy.array, optional
The Hermitian basis to vectorize the operators in terms of (with the
component proportional to the identity in last place). If no basis is
provided the generalized Gell-Mann basis will be used.
drift_rep : numpy.array, optional
The real matrix Q that acts on the vectorized rho as the deterministic
evolution operator. Will save computation time if already known and
don't need to calculate from `c_op`, `M_sq`, `N`, and `H`.
"""
def __init__(self, c_op, M_sq, N, H, basis=None, drift_rep=None, **kwargs):
if basis is None:
d = c_op.shape[0]
self.basis = gm.get_basis(d)
else:
self.basis = basis
if drift_rep is None:
self.Q = sb.construct_Q(c_op, M_sq, N, H, self.basis[:-1])
else:
self.Q = drift_rep
def a_fn(self, t, rho_vec):
return np.dot(self.Q, rho_vec)
def integrate(self, rho_0, times):
raise NotImplementedError()
class UncondGaussIntegrator(GaussIntegrator):
r"""Integrator for an unconditional Gaussian master equation.
Parameters
----------
c_op : numpy.array
The coupling operator
M_sq : complex float
The squeezing parameter
N : non-negative float
The thermal parameter
H : numpy.array
The plant Hamiltonian
basis : list of numpy.array, optional
The Hermitian basis to vectorize the operators in terms of (with the
component proportional to the identity in last place). If no basis is
provided the generalized Gell-Mann basis will be used.
drift_rep : numpy.array, optional
The real matrix Q that acts on the vectorized rho as the deterministic
evolution operator. Will save computation time if already known and
don't need to calculate from `c_op`, `M_sq`, `N`, and `H`.
"""
def jac(self, t, rho_vec):
return self.Q
def integrate(self, rho_0, times, method='BDF'):
r"""Integrate the equation for a list of times with given initial
conditions.
:param rho_0: The initial state of the system
:type rho_0: `numpy.array`
:param times: A sequence of time points for which to solve for rho
:type times: `list(real)`
:returns: The components of the vecorized :math:`\rho` for all
specified times
:rtype: `Solution`
"""
rho_0_vec = sb.vectorize(rho_0, self.basis).real
ivp_soln = solve_ivp(self.a_fn, (times[0], times[-1]),
rho_0_vec, method=method, t_eval=times,
jac=self.jac)
return Solution(ivp_soln.y.T, self.basis)
def integrate_non_herm(self, rho_0, times, solve_ivp_kwargs=None):
r"""Integrate the equation for a list of times with given initial
conditions that may be non hermitian (useful for applications involving
the quantum regression theorem).
:param rho_0: The initial state of the system
:type rho_0: `numpy.array`
:param times: A sequence of time points for which to solve for rho
:type times: `list(real)`
:param method: The integration method for `scipy.integrate.solve_ivp`
to use.
:type method: String
:returns: The components of the vecorized :math:`\rho` for all
specified times
:rtype: `Solution`
"""
rho_0_vec = sb.vectorize(rho_0, self.basis)
default_solve_ivp_kwargs = {'method': 'BDF',
't_eval': times,
'jac': self.jac}
process_default_kwargs(solve_ivp_kwargs, default_solve_ivp_kwargs)
ivp_soln = solve_ivp(self.a_fn, (times[0], times[-1]),
rho_0_vec, **default_solve_ivp_kwargs)
return Solution(ivp_soln.y.T, self.basis)
class Strong_0_5_HomodyneIntegrator(GaussIntegrator):
r"""Template class for integrators of strong order >= 0.5.
Defines the most basic constructor shared by all integrators of Gaussian
homodyne stochastic master equations of strong order >= 0.5.
Parameters
----------
c_op : numpy.array
The coupling operator
M_sq : complex float
The squeezing parameter
N : non-negative float
The thermal parameter
H : numpy.array
The plant Hamiltonian
basis : list of numpy.array, optional
The Hermitian basis to vectorize the operators in terms of (with the
component proportional to the identity in last place). If no basis is
provided the generalized Gell-Mann basis will be used.
drift_rep : numpy.array, optional
The real matrix Q that acts on the vectorized rho as the deterministic
evolution operator. Will save computation time if already known and
don't need to calculate from `c_op`, `M_sq`, `N`, and `H`.
diffusion_reps : dict of numpy.array, optional
The real matrix G and row vector k_T that act on the vectorized rho as
the stochastic evolution operator. Will save computation time if
already known and don't need to calculate from `c_op`, `M_sq`, and `N`.
"""
def __init__(self, c_op, M_sq, N, H, basis=None, drift_rep=None,
diffusion_reps=None, **kwargs):
super(Strong_0_5_HomodyneIntegrator, self).__init__(c_op, M_sq, N, H,
basis, drift_rep,
**kwargs)
if diffusion_reps is None:
self.G, self.k_T = sb.construct_G_k_T(c_op, M_sq, N, H,
self.basis[:-1])
else:
self.G = diffusion_reps['G']
self.k_T = diffusion_reps['k_T']
def b_fn(self, t, rho_vec):
return np.dot(self.k_T, rho_vec)*rho_vec + np.dot(self.G, rho_vec)
def b_fn_tr_dec(self, t, rho_vec):
# For use with a trace-decreasing linear integration function
return np.dot(self.G, rho_vec)
def dW_fn(self, dM, dt, t, rho_vec):
'''Convert measurement increment dM to Wiener increment dW.
'''
return dM + np.dot(self.k_T, rho_vec) * dt
def integrate(self, rho_0, times, U1s=None, U2s=None):
raise NotImplementedError()
def integrate_tr_dec(self, rho_0, times, U1s=None, U2s=None):
raise NotImplementedError()
def gen_meas_record(self, rho_0, times, U1s=None):
r"""Simulate a measurement record.
Integrate for a sequence of times with a given initial condition (and
optionally specified white noise), returning a measurement record along
with the trajectory.
The incremental measurement outcomes making up the measurement record
are related to the white noise increments and instantaneous state in
the following way:
.. math::
dM_t=dW_t-\operatorname{tr}[(c+c^\dagger)\rho_t]
Parameters
----------
rho_0 : numpy.array of complex float
The initial state of the system as a Hermitian matrix
times : numpy.array of real float
A sequence of time points for which to solve for rho
U1s: numpy.array of real float
Samples from a standard-normal distribution used to construct
Wiener increments :math:`\Delta W` for each time interval. Multiple
rows may be included for independent trajectories. ``U1s.shape`` is
assumed to be ``(len(times) - 1,)``.
Returns
-------
tuple of Solution and numpy.array
The components of the vecorized :math:`\rho` for all specified
times and an array of incremental measurement outcomes
"""
if U1s is None:
U1s = np.random.randn(len(times) -1)
soln = self.integrate(rho_0, times, U1s)
dts = times[1:] - times[:-1]
dWs = np.sqrt(dts) * U1s
tr_c_c_rs = np.array([-np.dot(self.k_T, rho_vec)
for rho_vec in soln.vec_soln[:-1]])
dMs = dWs + tr_c_c_rs * dts
return soln, dMs
class Strong_1_0_HomodyneIntegrator(Strong_0_5_HomodyneIntegrator):
r"""Template class for integrators of strong order >= 1.
Defines the most basic constructor shared by all integrators of Gaussian
homodyne stochastic master equations of strong order >= 1.
Parameters
----------
c_op : numpy.array
The coupling operator
M_sq : complex float
The squeezing parameter
N : non-negative float
The thermal parameter
H : numpy.array
The plant Hamiltonian
basis : list of numpy.array, optional
The Hermitian basis to vectorize the operators in terms of (with the
component proportional to the identity in last place). If no basis is
provided the generalized Gell-Mann basis will be used.
drift_rep : numpy.array, optional
The real matrix Q that acts on the vectorized rho as the deterministic
evolution operator. Will save computation time if already known and
don't need to calculate from `c_op`, `M_sq`, `N`, and `H`.
diffusion_reps : dict of numpy.array, optional
The real matrix G and row vector k_T that act on the vectorized rho as
the stochastic evolution operator. Will save computation time if
already known and don't need to calculate from `c_op`, `M_sq`, and `N`.
"""
def __init__(self, c_op, M_sq, N, H, basis=None, drift_rep=None,
diffusion_reps=None, **kwargs):
super(Strong_1_0_HomodyneIntegrator, self).__init__(c_op, M_sq, N, H,
basis, drift_rep,
diffusion_reps,
**kwargs)
self.k_T_G = np.dot(self.k_T, self.G)
self.G2 = np.dot(self.G, self.G)
class Strong_1_5_HomodyneIntegrator(Strong_1_0_HomodyneIntegrator):
r"""Template class for integrators of strong order >= 1.5.
Defines the most basic constructor shared by all integrators of Gaussian
homodyne stochastic master equations of strong order >= 1.5.
Parameters
----------
c_op : numpy.array
The coupling operator
M_sq : complex float
The squeezing parameter
N : non-negative float
The thermal parameter
H : numpy.array
The plant Hamiltonian
basis : list of numpy.array, optional
The Hermitian basis to vectorize the operators in terms of (with the
component proportional to the identity in last place). If no basis is
provided the generalized Gell-Mann basis will be used.
drift_rep : numpy.array, optional
The real matrix Q that acts on the vectorized rho as the deterministic
evolution operator. Will save computation time if already known and
don't need to calculate from `c_op`, `M_sq`, `N`, and `H`.
diffusion_reps : dict of numpy.array, optional
The real matrix G and row vector k_T that act on the vectorized rho as
the stochastic evolution operator. Will save computation time if
already known and don't need to calculate from `c_op`, `M_sq`, and `N`.
"""
def __init__(self, c_op, M_sq, N, H, basis=None, drift_rep=None,
diffusion_reps=None, **kwargs):
super(Strong_1_5_HomodyneIntegrator, self).__init__(c_op, M_sq, N, H,
basis, drift_rep,
diffusion_reps,
**kwargs)
self.G3 = np.dot(self.G2, self.G)
self.Q2 = np.dot(self.Q, self.Q)
self.QG = np.dot(self.Q, self.G)
self.GQ = np.dot(self.G, self.Q)
self.k_T_G2 = np.dot(self.k_T, self.G2)
self.k_T_Q = np.dot(self.k_T, self.Q)
class EulerHomodyneIntegrator(Strong_0_5_HomodyneIntegrator):
r"""Euler integrator for the conditional Gaussian master equation.
Parameters
----------
c_op : numpy.array
The coupling operator
M_sq : complex float
The squeezing parameter
N : non-negative float
The thermal parameter
H : numpy.array
The plant Hamiltonian
basis : list of numpy.array, optional
The Hermitian basis to vectorize the operators in terms of (with the
component proportional to the identity in last place). If no basis is
provided the generalized Gell-Mann basis will be used.
drift_rep : numpy.array, optional
The real matrix Q that acts on the vectorized rho as the deterministic
evolution operator. Will save computation time if already known and
don't need to calculate from `c_op`, `M_sq`, `N`, and `H`.
diffusion_reps : dict of numpy.array, optional
The real matrix G and row vector k_T that act on the vectorized rho as
the stochastic evolution operator. Will save computation time if
already known and don't need to calculate from `c_op`, `M_sq`, and `N`.
"""
def integrate(self, rho_0, times, U1s=None, U2s=None):
r"""Integrate the initial value problem.
Integrate for a sequence of times with a given initial condition (and
optionally specified white noise).
Parameters
----------
rho_0: numpy.array
The initial state of the system
times: numpy.array
A sequence of time points for which to solve for rho
U1s: numpy.array(len(times) - 1)
Samples from a standard-normal distribution used to construct
Wiener increments :math:`\Delta W` for each time interval. Multiple
rows may be included for independent trajectories.
U2s: numpy.array(len(times) - 1)
Unused, included to make the argument list uniform with
higher-order integrators.
Returns
-------
Solution
The state of :math:`\rho` for all specified times
"""
rho_0_vec = sb.vectorize(rho_0, self.basis).real
if U1s is None:
U1s = np.random.randn(len(times) -1)
vec_soln = sde.euler(self.a_fn, self.b_fn, rho_0_vec, times, U1s)
return Solution(vec_soln, self.basis)
def integrate_tr_dec(self, rho_0, times, U1s=None, U2s=None):
r"""Integrate the initial value problem.
Integrate for a sequence of times with a given initial condition (and
optionally specified white noise). Integrates the linear equation,
which ignores the tr[(c + cdag) rho] rho term and therefore decreases
the trace.
Parameters
----------
rho_0: numpy.array
The initial state of the system
times: numpy.array
A sequence of time points for which to solve for rho
U1s: numpy.array(len(times) - 1)
Samples from a standard-normal distribution used to construct
Wiener increments :math:`\Delta W` for each time interval. Multiple
rows may be included for independent trajectories.
U2s: numpy.array(len(times) - 1)
Unused, included to make the argument list uniform with
higher-order integrators.
Returns
-------
Solution
The state of :math:`\rho` for all specified times
"""
rho_0_vec = sb.vectorize(rho_0, self.basis).real
if U1s is None:
U1s = np.random.randn(len(times) -1)
vec_soln = sde.euler(self.a_fn, self.b_fn_tr_dec, rho_0_vec, times,
U1s)
return Solution(vec_soln, self.basis)
def integrate_measurements(self, rho_0, times, dMs):
r"""Integrate system evolution conditioned on a measurement record.
Parameters
----------
rho_0: numpy.array
The initial state of the system
times: numpy.array
A sequence of time points for which to solve for rho
dMs: numpy.array(len(times) - 1)
Incremental measurement outcomes used to drive the SDE.
Returns
-------
Solution
The components of the vecorized :math:`\rho` for all specified
times
"""
rho_0_vec = sb.vectorize(rho_0, self.basis).real
vec_soln = sde.meas_euler(self.a_fn, self.b_fn, self.dW_fn, rho_0_vec,
times, dMs)
return Solution(vec_soln, self.basis)
class MilsteinHomodyneIntegrator(Strong_1_0_HomodyneIntegrator):
r"""Milstein integrator for the conditional Gaussian master equation.
Parameters
----------
c_op : numpy.array
The coupling operator
M_sq : complex float
The squeezing parameter
N : non-negative float
The thermal parameter
H : numpy.array
The plant Hamiltonian
basis : list of numpy.array, optional
The Hermitian basis to vectorize the operators in terms of (with the
component proportional to the identity in last place). If no basis is
provided the generalized Gell-Mann basis will be used.
drift_rep : numpy.array, optional
The real matrix Q that acts on the vectorized rho as the deterministic
evolution operator. Will save computation time if already known and
don't need to calculate from `c_op`, `M_sq`, `N`, and `H`.
diffusion_reps : dict of numpy.array, optional
The real matrix G and row vector k_T that act on the vectorized rho as
the stochastic evolution operator. Will save computation time if
already known and don't need to calculate from `c_op`, `M_sq`, and `N`.
"""
def b_dx_b_fn(self, t, rho_vec):
# TODO: May want this to be defined by the constructor to facilitate
# numba optimization.
return b_dx_b(self.G2, self.k_T_G, self.G, self.k_T, rho_vec)
def b_dx_b_fn_tr_dec(self, t, rho_vec):
# Used by trace decreasing interator
return b_dx_b_tr_dec(self.G2, rho_vec)
def integrate(self, rho_0, times, U1s=None, U2s=None):
r"""Integrate the initial value problem.
Integrate for a sequence of times with a given initial condition (and
optionally specified white noise).
Parameters
----------
rho_0: numpy.array
The initial state of the system
times: numpy.array
A sequence of time points for which to solve for rho
U1s: numpy.array(len(times) - 1)
Samples from a standard-normal distribution used to construct
Wiener increments :math:`\Delta W` for each time interval. Multiple
rows may be included for independent trajectories.
U2s: numpy.array(len(times) - 1)
Unused, included to make the argument list uniform with
higher-order integrators.
Returns
-------
Solution
The state of :math:`\rho` for all specified times
"""
rho_0_vec = sb.vectorize(rho_0, self.basis).real
if U1s is None:
U1s = np.random.randn(len(times) -1)
vec_soln = sde.milstein(self.a_fn, self.b_fn, self.b_dx_b_fn, rho_0_vec,
times, U1s)
return Solution(vec_soln, self.basis)
def integrate_tr_dec(self, rho_0, times, U1s=None, U2s=None):
r"""Integrate the initial value problem.
Integrate for a sequence of times with a given initial condition (and
optionally specified white noise). Integrates the linear equation,
which ignores the tr[(c + cdag) rho] rho term and therefore decreases
the trace.
Parameters
----------
rho_0: numpy.array
The initial state of the system
times: numpy.array
A sequence of time points for which to solve for rho
U1s: numpy.array(len(times) - 1)
Samples from a standard-normal distribution used to construct
Wiener increments :math:`\Delta W` for each time interval. Multiple
rows may be included for independent trajectories.
U2s: numpy.array(len(times) - 1)
Unused, included to make the argument list uniform with
higher-order integrators.
Returns
-------
Solution
The state of :math:`\rho` for all specified times
"""
rho_0_vec = sb.vectorize(rho_0, self.basis).real
if U1s is None:
U1s = np.random.randn(len(times) -1)
vec_soln = sde.milstein(self.a_fn, self.b_fn_tr_dec,
self.b_dx_b_fn_tr_dec, rho_0_vec, times, U1s)
return Solution(vec_soln, self.basis)
def integrate_measurements(self, rho_0, times, dMs):
r"""Integrate system evolution conditioned on a measurement record.
Parameters
----------
rho_0: numpy.array
The initial state of the system
times: numpy.array
A sequence of time points for which to solve for rho
dMs: numpy.array(len(times) - 1)
Incremental measurement outcomes used to drive the SDE.
Returns
-------
Solution
The components of the vecorized :math:`\rho` for all specified
times
"""
rho_0_vec = sb.vectorize(rho_0, self.basis).real
vec_soln = sde.meas_milstein(self.a_fn, self.b_fn, self.b_dx_b_fn,
self.dW_fn, rho_0_vec, times, dMs)
return Solution(vec_soln, self.basis)
class Taylor_1_5_HomodyneIntegrator(Strong_1_5_HomodyneIntegrator):
r"""Order 1.5 Taylor ntegrator for the conditional Gaussian master equation.
Parameters
----------
c_op : numpy.array
The coupling operator
M_sq : complex float
The squeezing parameter
N : non-negative float
The thermal parameter
H : numpy.array
The plant Hamiltonian
basis : list of numpy.array, optional
The Hermitian basis to vectorize the operators in terms of (with the
component proportional to the identity in last place). If no basis is
provided the generalized Gell-Mann basis will be used.
drift_rep : numpy.array, optional
The real matrix Q that acts on the vectorized rho as the deterministic
evolution operator. Will save computation time if already known and
don't need to calculate from `c_op`, `M_sq`, `N`, and `H`.
diffusion_reps : dict of numpy.array, optional
The real matrix G and row vector k_T that act on the vectorized rho as
the stochastic evolution operator. Will save computation time if
already known and don't need to calculate from `c_op`, `M_sq`, and `N`.
"""
def a_fn(self, rho_vec):
return np.dot(self.Q, rho_vec)
def b_fn(self, rho_vec):
return np.dot(self.k_T, rho_vec)*rho_vec + np.dot(self.G, rho_vec)
def b_fn_tr_dec(self, rho_vec):
return np.dot(self.G, rho_vec)
def b_dx_b_fn(self, rho_vec):
return b_dx_b(self.G2, self.k_T_G, self.G, self.k_T, rho_vec)
def b_dx_b_fn_tr_dec(self, rho_vec):
return b_dx_b_tr_dec(self.G2, rho_vec)
def b_dx_a_fn(self, rho_vec):
return b_dx_a(self.QG, self.k_T, self.Q, rho_vec)
def b_dx_a_fn_tr_dec(self, rho_vec):
return b_dx_a_tr_dec(self.QG, rho_vec)
def a_dx_b_fn(self, rho_vec):
return a_dx_b(self.GQ, self.k_T, self.Q, self.k_T_Q, rho_vec)
def a_dx_b_fn_tr_dec(self, rho_vec):
return a_dx_b_tr_dec(self.GQ, rho_vec)
def a_dx_a_fn(self, rho_vec):
return a_dx_a(self.Q2, rho_vec)
def b_dx_b_dx_b_fn(self, rho_vec):
return b_dx_b_dx_b(self.G3, self.G2, self.G, self.k_T, self.k_T_G,
self.k_T_G2, rho_vec)
def b_dx_b_dx_b_fn_tr_dec(self, rho_vec):
return b_dx_b_dx_b_tr_dec(self.G3, rho_vec)
def b_b_dx_dx_b_fn(self, rho_vec):
return b_b_dx_dx_b(self.G, self.k_T, self.k_T_G, rho_vec)
def b_b_dx_dx_b_fn_tr_dec(self, rho_vec):
return 0
def b_b_dx_dx_a_fn(self, rho_vec):
return 0
def integrate(self, rho_0, times, U1s=None, U2s=None):
r"""Integrate the initial value problem.
Integrate for a sequence of times with a given initial condition (and
optionally specified white noise).
Parameters
----------
rho_0: numpy.array
The initial state of the system
times: numpy.array
A sequence of time points for which to solve for rho
U1s: numpy.array(len(times) - 1)
Samples from a standard-normal distribution used to construct
Wiener increments :math:`\Delta W` for each time interval. Multiple
rows may be included for independent trajectories.
U2s: numpy.array(len(times) - 1)
Unused, included to make the argument list uniform with
higher-order integrators.
Returns
-------
Solution
The state of :math:`\rho` for all specified times
"""
rho_0_vec = sb.vectorize(rho_0, self.basis).real
if U1s is None:
U1s = np.random.randn(len(times) -1)
if U2s is None:
U2s = np.random.randn(len(times) -1)
vec_soln = sde.time_ind_taylor_1_5(self.a_fn, self.b_fn, self.b_dx_b_fn,
self.b_dx_a_fn, self.a_dx_b_fn,
self.a_dx_a_fn, self.b_dx_b_dx_b_fn,
self.b_b_dx_dx_b_fn,
self.b_b_dx_dx_a_fn,
rho_0_vec, times, U1s, U2s)
return Solution(vec_soln, self.basis)
def integrate_tr_dec(self, rho_0, times, U1s=None, U2s=None):
r"""Integrate the initial value problem.
Integrate for a sequence of times with a given initial condition (and
optionally specified white noise). Integrates the linear equation,
which ignores the tr[(c + cdag) rho] rho term and therefore decreases
the trace.
Parameters
----------
rho_0: numpy.array
The initial state of the system
times: numpy.array
A sequence of time points for which to solve for rho
U1s: numpy.array(len(times) - 1)
Samples from a standard-normal distribution used to construct
Wiener increments :math:`\Delta W` for each time interval. Multiple
rows may be included for independent trajectories.
U2s: numpy.array(len(times) - 1)
Unused, included to make the argument list uniform with
higher-order integrators.
Returns
-------
Solution
The state of :math:`\rho` for all specified times
"""
rho_0_vec = sb.vectorize(rho_0, self.basis).real
if U1s is None:
U1s = np.random.randn(len(times) -1)
if U2s is None:
U2s = np.random.randn(len(times) -1)
vec_soln = sde.time_ind_taylor_1_5(self.a_fn, self.b_fn_tr_dec,
self.b_dx_b_fn_tr_dec,
self.b_dx_a_fn_tr_dec,
self.a_dx_b_fn_tr_dec,
self.a_dx_a_fn,
self.b_dx_b_dx_b_fn_tr_dec,
self.b_b_dx_dx_b_fn_tr_dec,
self.b_b_dx_dx_a_fn,
rho_0_vec, times, U1s, U2s)
return Solution(vec_soln, self.basis)
class TrDecMilsteinHomodyneIntegrator(MilsteinHomodyneIntegrator):
"""Milstein integrator that does not preserve trace.
Only does the linear evolution, where the decrease in trace now encodes
the likelihood of the particular trajectory. Might be more appropriate to
include as a particulare `integrate_tr_dec` method in preëxisting integrator
classes.
"""
def __init__(self, c_op, M_sq, N, H, basis=None, drift_rep=None,
diffusion_reps=None, **kwargs):
super(TrDecMilsteinHomodyneIntegrator, self).__init__(c_op, M_sq, N, H,
basis, drift_rep,
diffusion_reps,
**kwargs)
self.k_T = 0
self.k_T_G = np.zeros(self.G.shape)
class IntegratorFactory:
r"""Factory that pre-computes things for other integrators.
A class that pre-computes some of the things in common to a family of
integrators one wants to construct instances of.
Parameters
----------
IntClass : Class of integrator
A class inheriting from :class:`GaussIntegrator` that you want to
create many instances of.
precomp_data
Data needed by the `IntClass` constructor common across all integrators
in the family of interest in the form that can be passed to the
`parameter_fn` as `precomp_data`.
parameter_fn
Function that takes the parameters defining the instance of the family
to be generated and the precomputed data and returns ``**kwargs`` to pass
to the constructor of `IntClass`.
"""
def __init__(self, IntClass, precomp_data, parameter_fn):
self.precomp_data = precomp_data
self.parameter_fn = parameter_fn
self.IntClass = IntClass
def make_integrator(self, params):
"""Create a new integrator.
Create a new instance of `IntClass`, feeding `params` and
`precomp_data` to the constructor.
"""
constructor_kwargs = self.parameter_fn(params, self.precomp_data)
return self.IntClass(**constructor_kwargs)
class QuasiMarkoff2LvlIntegrator(UncondLindbladIntegrator):
r'''QuasiMarkoff equation for squeezed fields
Assumes the memory time associated with the squeezing is very much less than
the atomic decay rate but can be comparable to or even longer than the Rabi
period. See Equations 1 through 25 in reference:
[YB96] Influence of squeezing bandwidths on resonance fluorescence
G. Yeoman and S. M. Barnett, Journal of Modern Optics 43, 2037 (1996).
https://doi.org/10.1080/09500349608232870
Parameters
----------
gamma : non-negative float
Atomic linewidth.
N_A : non-negative float
The thermal parameter evaluated at atomic transition frequency. See
the text below Eqn 14 in [YB96].
N_Om : non-negative float
The thermal parameter evaluated at atomic transition frequency minus
the Rabi freqency. See the text below Eqn 14 in [YB96].
M_A : complex float
The squeezing parameter evaluated at atomic transition frequency. See
the text below Eqn 14 in [YB96].
M_Om : complex float
The squeezing parameter evaluated at atomic transition frequency minus
the Rabi freqency. See the text below Eqn 14 in [YB96].
Delta_AL : float
Atomic frequency minus carrier (laser) frequency i.e,
Delta_AL = omega_A - omega_L.
Omega : float
The Rabi frequency.
phi_L : float
Carrier (laser) frequency phase, see Eq 4 in [YB96].
F_A : complex float
Eq. 18 in [YB96].
G_A : complex float
Eq. 19 in [YB96].
'''
def __init__(self, gamma, N_A, N_Om, M_A, M_Om, Delta_AL, Omega, phi_L, F_A, G_A):
dim = 2
basis = ssb.SparseBasis(dim)
self.basis = basis
sx = np.array([[0, 1], [1, 0]], dtype=np.complex)
sy = np.array([[0, -1j], [1j, 0]], dtype=np.complex)
sz = np.array([[1, 0], [0, -1]], dtype=np.complex)
sp = (sx + 1j*sy)/2
sm = (sx - 1j*sy)/2
# Eqn 16 and 17 in [YB96]
Sm = sm*np.exp(-1j*phi_L)
Sp = sp*np.exp(1j*phi_L)
Y = 1j*sz@(Sp + Sm)
sz_vec = self.basis.vectorize(sz)
Sm_vec = self.basis.vectorize(Sm)
Sp_vec = self.basis.vectorize(Sp)
Y_vec = self.basis.vectorize(Y)
H_vec = ((Omega/2 + 1j*F_A)*(Sp_vec + Sm_vec)
+ (Delta_AL/2)*sz_vec + G_A*Y_vec)
self.Q = -gamma/4*(
(1 + N_A)*(basis.make_double_comm_matrix(Sm_vec, 1)
- 2*basis.make_diff_op_matrix(Sm_vec))
+ N_A*(basis.make_double_comm_matrix(Sp_vec, 1)
- 2*basis.make_diff_op_matrix(Sp_vec))
+ (1 + N_Om)*(-basis.make_double_comm_matrix(Sm_vec, 1)
- 2*basis.make_diff_op_matrix(Sm_vec))
+ N_Om*(-basis.make_double_comm_matrix(Sp_vec, 1)
- 2*basis.make_diff_op_matrix(Sp_vec))
- 2*basis.make_double_comm_matrix(Sm_vec, M_A*np.exp(-2j*phi_L))
+ 2*basis.make_real_comm_matrix(M_A*np.exp(-2j*phi_L)*Sp_vec,
Sp_vec)
+ 2*basis.make_real_comm_matrix(M_A.conj()*np.exp(2j*phi_L)
*Sm_vec, Sm_vec)
- 2*basis.make_double_comm_matrix(Sm_vec,
M_Om*np.exp(-2j*phi_L))
- 2*basis.make_real_comm_matrix(M_Om*np.exp(-2j*phi_L)*Sp_vec,
Sp_vec)
- 2*basis.make_real_comm_matrix(M_Om.conj()*np.exp(2j*phi_L)
*Sm_vec, Sm_vec))
self.Q += basis.make_hamil_comm_matrix(H_vec)
self.Q += -2*basis.make_real_sand_matrix(F_A*(Sp_vec - Sm_vec), sz_vec)
self.Q += -2*basis.make_real_sand_matrix(G_A*(Sp_vec + Sm_vec), sz_vec)
|
CQuIC/pysme
|
src/pysme/integrate.py
|
Python
|
mit
| 62,208
|
[
"Gaussian"
] |
078969c5af5f72b74f6b69d48ce07687d9bf7f8806257d6b6c71bb5d72459731
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = '''
---
module: svc
author: Brian Coca
version_added:
short_description: Manage daemontools services.
description:
- Controls daemontools services on remote hosts using the svc utility.
options:
name:
required: true
description:
- Name of the service to manage.
state:
required: false
choices: [ started, stopped, restarted, reloaded, once ]
description:
- C(Started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
svc (svc -t) and C(killed) will always bounce the svc (svc -k).
C(reloaded) will send a sigusr1 (svc -u).
C(once) will run a normally downed svc once (svc -o), not really
an idempotent operation.
downed:
required: false
choices: [ "yes", "no" ]
default: no
description:
- Should a 'down' file exist or not, if it exists it disables auto startup.
defaults to no. Downed does not imply stopped.
enabled:
required: false
choices: [ "yes", "no" ]
description:
- Wheater the service is enabled or not, if disabled it also implies stopped.
Make note that a service can be enabled and downed (no auto restart).
service_dir:
required: false
default: /service
description:
- directory svscan watches for services
service_src:
required: false
description:
- directory where services are defined, the source of symlinks to service_dir.
'''
EXAMPLES = '''
# Example action to start svc dnscache, if not running
- svc: name=dnscache state=started
# Example action to stop svc dnscache, if running
- svc: name=dnscache state=stopped
# Example action to kill svc dnscache, in all cases
- svc : name=dnscache state=killed
# Example action to restart svc dnscache, in all cases
- svc : name=dnscache state=restarted
# Example action to reload svc dnscache, in all cases
- svc: name=dnscache state=reloaded
# Example using alt svc directory location
- svc: name=dnscache state=reloaded service_dir=/var/service
'''
import platform
import shlex
def _load_dist_subclass(cls, *args, **kwargs):
'''
Used for derivative implementations
'''
subclass = None
distro = kwargs['module'].params['distro']
# get the most specific superclass for this platform
if distro is not None:
for sc in cls.__subclasses__():
if sc.distro is not None and sc.distro == distro:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
class Svc(object):
"""
Main class that handles daemontools, can be subclassed and overriden in case
we want to use a 'derivative' like encore, s6, etc
"""
#def __new__(cls, *args, **kwargs):
# return _load_dist_subclass(cls, args, kwargs)
def __init__(self, module):
self.extra_paths = [ '/command', '/usr/local/bin' ]
self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
self.module = module
self.name = module.params['name']
self.service_dir = module.params['service_dir']
self.service_src = module.params['service_src']
self.enabled = None
self.downed = None
self.full_state = None
self.state = None
self.pid = None
self.duration = None
self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths)
self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths)
self.svc_full = '/'.join([ self.service_dir, self.name ])
self.src_full = '/'.join([ self.service_src, self.name ])
self.enabled = os.path.lexists(self.svc_full)
if self.enabled:
self.downed = os.path.lexists('%s/down' % self.svc_full)
self.get_status()
else:
self.downed = os.path.lexists('%s/down' % self.src_full)
self.state = 'stopped'
def enable(self):
if os.path.exists(self.src_full):
try:
os.symlink(self.src_full, self.svc_full)
except OSError, e:
self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % str(e))
else:
self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
def disable(self):
try:
os.unlink(self.svc_full)
except OSError, e:
self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % str(e))
self.execute_command([self.svc_cmd,'-dx',self.src_full])
src_log = '%s/log' % self.src_full
if os.path.exists(src_log):
self.execute_command([self.svc_cmd,'-dx',src_log])
def get_status(self):
(rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full])
if err is not None and err:
self.full_state = self.state = err
else:
self.full_state = out
m = re.search('\(pid (\d+)\)', out)
if m:
self.pid = m.group(1)
m = re.search('(\d+) seconds', out)
if m:
self.duration = m.group(1)
if re.search(' up ', out):
self.state = 'start'
elif re.search(' down ', out):
self.state = 'stopp'
else:
self.state = 'unknown'
return
if re.search(' want ', out):
self.state += 'ing'
else:
self.state += 'ed'
def start(self):
return self.execute_command([self.svc_cmd, '-u', self.svc_full])
def stopp(self):
return self.stop()
def stop(self):
return self.execute_command([self.svc_cmd, '-d', self.svc_full])
def once(self):
return self.execute_command([self.svc_cmd, '-o', self.svc_full])
def reload(self):
return self.execute_command([self.svc_cmd, '-1', self.svc_full])
def restart(self):
return self.execute_command([self.svc_cmd, '-t', self.svc_full])
def kill(self):
return self.execute_command([self.svc_cmd, '-k', self.svc_full])
def execute_command(self, cmd):
try:
(rc, out, err) = self.module.run_command(' '.join(cmd))
except Exception, e:
self.module.fail_json(msg="failed to execute: %s" % str(e))
return (rc, out, err)
def report(self):
self.get_status()
states = {}
for k in self.report_vars:
states[k] = self.__dict__[k]
return states
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(choices=['started', 'stopped', 'restarted', 'killed', 'reloaded', 'once']),
enabled = dict(required=False, type='bool', choices=BOOLEANS),
downed = dict(required=False, type='bool', choices=BOOLEANS),
dist = dict(required=False, default='daemontools'),
service_dir = dict(required=False, default='/service'),
service_src = dict(required=False, default='/etc/service'),
),
supports_check_mode=True,
)
state = module.params['state']
enabled = module.params['enabled']
downed = module.params['downed']
svc = Svc(module)
changed = False
orig_state = svc.report()
if enabled is not None and enabled != svc.enabled:
changed = True
if not module.check_mode:
try:
if enabled:
svc.enable()
else:
svc.disable()
except (OSError, IOError), e:
module.fail_json(msg="Could change service link: %s" % str(e))
if state is not None and state != svc.state:
changed = True
if not module.check_mode:
getattr(svc,state[:-2])()
if downed is not None and downed != svc.downed:
changed = True
if not module.check_mode:
d_file = "%s/down" % svc.svc_full
try:
if downed:
open(d_file, "a").close()
else:
os.unlink(d_file)
except (OSError, IOError), e:
module.fail_json(msg="Could change downed file: %s " % (str(e)))
module.exit_json(changed=changed, svc=svc.report())
# this is magic, not normal python include
from ansible.module_utils.basic import *
main()
|
ravello/ansible-modules-extras
|
system/svc.py
|
Python
|
gpl-3.0
| 8,918
|
[
"Brian"
] |
53b5ad0a95022e4d7bd2c9a79d0c427721d77fe0a541c89ae5a356432f4bc869
|
import chemistry
from openmoltools import cirpy
import mdtraj as md
import pymbar
import os
import pandas as pd
import glob
import dipole_errorbars
from density_simulation_parameters import DATA_PATH
num_bootstrap = 100
fixed_block_length = 20 # 200 ps blocks for dielectric error bar block averaging.
prmtop_filenames = glob.glob(DATA_PATH + "/tleap/*.prmtop")
filename_munger = lambda filename: os.path.splitext(os.path.split(filename)[1])[0].split("_")
data = []
for prmtop_filename in prmtop_filenames:
cas, n_molecules, temperature = filename_munger(prmtop_filename)
print(cas, temperature)
dcd_filename = DATA_PATH + "/production/%s_%s_%s_production.dcd" % (cas, n_molecules, temperature)
csv_filename = DATA_PATH + "/production/%s_%s_%s_production.csv" % (cas, n_molecules, temperature)
try:
traj = md.load(dcd_filename, top=prmtop_filename)
except IOError:
continue
if traj.unitcell_lengths is None: continue
rho = pd.read_csv(csv_filename)["Density (g/mL)"].values * 1000. # g / mL -> kg /m3
initial_traj_length = len(traj)
initial_density_length = len(rho)
[t0, g, Neff] = pymbar.timeseries.detectEquilibration(rho)
mu = rho[t0:].mean()
sigma = rho[t0:].std() * Neff ** -0.5
prmtop = chemistry.load_file(prmtop_filename)
charges = prmtop.to_dataframe().charge.values
temperature = float(temperature)
traj = traj[t0 * len(traj) / len(rho):]
dielectric = md.geometry.static_dielectric(traj, charges, temperature)
dielectric_sigma_fixedblock = dipole_errorbars.bootstrap_old(traj, charges, temperature, fixed_block_length)[1]
block_length = dipole_errorbars.find_block_size(traj, charges, temperature)
dielectric_sigma = dipole_errorbars.bootstrap(traj, charges, temperature, block_length, num_bootstrap)
formula = cirpy.resolve(cas, "formula")
data.append(dict(cas=cas, temperature=temperature, n_trimmed=t0, inefficiency=g, initial_traj_length=initial_traj_length, initial_density_length=initial_density_length, density=mu, density_sigma=sigma, Neff=Neff, n_frames=traj.n_frames, dielectric=dielectric, dielectric_sigma=dielectric_sigma, dielectric_sigma_fixedblock=dielectric_sigma_fixedblock, block_length=block_length, formula=formula))
print(data[-1])
data = pd.DataFrame(data)
data.to_csv("./tables/predictions.csv")
|
choderalab/LiquidBenchmark
|
src/munge_output_amber.py
|
Python
|
gpl-2.0
| 2,349
|
[
"MDTraj"
] |
8a865290296c56867207dba16af4610a4af9903d5acb2f523673220906988352
|
#!/usr/bin/env python
"""
=================
dMRI: Camino, DTI
=================
Introduction
============
This script, camino_dti_tutorial.py, demonstrates the ability to perform basic diffusion analysis
in a Nipype pipeline.
python dmri_camino_dti.py
We perform this analysis using the FSL course data, which can be acquired from here:
http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz
Import necessary modules from nipype.
"""
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import nipype.interfaces.camino as camino
import nipype.interfaces.fsl as fsl
import nipype.interfaces.camino2trackvis as cam2trk
import nipype.algorithms.misc as misc
import os # system functions
"""
We use the following functions to scrape the voxel and data dimensions of the input images. This allows the
pipeline to be flexible enough to accept and process images of varying size. The SPM Face tutorial
(fmri_spm_face.py) also implements this inferral of voxel size from the data.
"""
def get_vox_dims(volume):
import nibabel as nb
if isinstance(volume, list):
volume = volume[0]
nii = nb.load(volume)
hdr = nii.get_header()
voxdims = hdr.get_zooms()
return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])]
def get_data_dims(volume):
import nibabel as nb
if isinstance(volume, list):
volume = volume[0]
nii = nb.load(volume)
hdr = nii.get_header()
datadims = hdr.get_data_shape()
return [int(datadims[0]), int(datadims[1]), int(datadims[2])]
def get_affine(volume):
import nibabel as nb
nii = nb.load(volume)
return nii.get_affine()
subject_list = ['subj1']
fsl.FSLCommand.set_default_output_type('NIFTI')
"""
Map field names to individual subject runs
"""
info = dict(dwi=[['subject_id', 'data']],
bvecs=[['subject_id','bvecs']],
bvals=[['subject_id','bvals']])
infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']),
name="infosource")
"""Here we set up iteration over all the subjects. The following line
is a particular example of the flexibility of the system. The
``datasource`` attribute ``iterables`` tells the pipeline engine that
it should repeat the analysis on each of the items in the
``subject_list``. In the current example, the entire first level
preprocessing and estimation will be repeated for each subject
contained in subject_list.
"""
infosource.iterables = ('subject_id', subject_list)
"""
Now we create a :class:`nipype.interfaces.io.DataGrabber` object and
fill in the information from above about the layout of our data. The
:class:`nipype.pipeline.engine.Node` module wraps the interface object
and provides additional housekeeping and pipeline specific
functionality.
"""
datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
outfields=info.keys()),
name = 'datasource')
datasource.inputs.template = "%s/%s"
# This needs to point to the fdt folder you can find after extracting
# http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz
datasource.inputs.base_directory = os.path.abspath('fsl_course_data/fdt/')
datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz')
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
"""
An inputnode is used to pass the data obtained by the data grabber to the actual processing functions
"""
inputnode = pe.Node(interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), name="inputnode")
"""
Setup for Diffusion Tensor Computation
--------------------------------------
In this section we create the nodes necessary for diffusion analysis.
First, the diffusion image is converted to voxel order.
"""
image2voxel = pe.Node(interface=camino.Image2Voxel(), name="image2voxel")
fsl2scheme = pe.Node(interface=camino.FSL2Scheme(), name="fsl2scheme")
fsl2scheme.inputs.usegradmod = True
"""
Second, diffusion tensors are fit to the voxel-order data.
"""
dtifit = pe.Node(interface=camino.DTIFit(),name='dtifit')
"""
Next, a lookup table is generated from the schemefile and the
signal-to-noise ratio (SNR) of the unweighted (q=0) data.
"""
dtlutgen = pe.Node(interface=camino.DTLUTGen(), name="dtlutgen")
dtlutgen.inputs.snr = 16.0
dtlutgen.inputs.inversion = 1
"""
In this tutorial we implement probabilistic tractography using the PICo algorithm.
PICo tractography requires an estimate of the fibre direction and a model of its
uncertainty in each voxel; this is produced using the following node.
"""
picopdfs = pe.Node(interface=camino.PicoPDFs(), name="picopdfs")
picopdfs.inputs.inputmodel = 'dt'
"""
An FSL BET node creates a brain mask is generated from the diffusion image for seeding the PICo tractography.
"""
bet = pe.Node(interface=fsl.BET(), name="bet")
bet.inputs.mask = True
"""
Finally, tractography is performed.
First DT streamline tractography.
"""
trackdt = pe.Node(interface=camino.TrackDT(), name="trackdt")
"""
Now camino's Probablistic Index of connectivity algorithm.
In this tutorial, we will use only 1 iteration for time-saving purposes.
"""
trackpico = pe.Node(interface=camino.TrackPICo(), name="trackpico")
trackpico.inputs.iterations = 1
"""
Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to
convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse.
"""
cam2trk_dt = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_dt")
cam2trk_dt.inputs.min_length = 30
cam2trk_dt.inputs.voxel_order = 'LAS'
cam2trk_pico = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_pico")
cam2trk_pico.inputs.min_length = 30
cam2trk_pico.inputs.voxel_order = 'LAS'
trk2camino = pe.Node(interface=cam2trk.Trackvis2Camino(), name="trk2camino")
"""
Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview,
using the following two nodes. For VTK use VtkStreamlines.
"""
procstreamlines = pe.Node(interface=camino.ProcStreamlines(), name="procstreamlines")
procstreamlines.inputs.outputtracts = 'oogl'
"""
We can also produce a variety of scalar values from our fitted tensors. The following nodes generate the
fractional anisotropy and diffusivity trace maps and their associated headers.
"""
fa = pe.Node(interface=camino.ComputeFractionalAnisotropy(),name='fa')
trace = pe.Node(interface=camino.ComputeTensorTrace(),name='trace')
dteig = pe.Node(interface=camino.ComputeEigensystem(), name='dteig')
analyzeheader_fa = pe.Node(interface= camino.AnalyzeHeader(), name = "analyzeheader_fa")
analyzeheader_fa.inputs.datatype = "double"
analyzeheader_trace = analyzeheader_fa.clone('analyzeheader_trace')
fa2nii = pe.Node(interface=misc.CreateNifti(),name='fa2nii')
trace2nii = fa2nii.clone("trace2nii")
"""
Since we have now created all our nodes, we can now define our workflow and start making connections.
"""
tractography = pe.Workflow(name='tractography')
tractography.connect([(inputnode, bet,[("dwi","in_file")])])
"""
File format conversion
"""
tractography.connect([(inputnode, image2voxel, [("dwi", "in_file")]),
(inputnode, fsl2scheme, [("bvecs", "bvec_file"),
("bvals", "bval_file")])
])
"""
Tensor fitting
"""
tractography.connect([(image2voxel, dtifit,[['voxel_order','in_file']]),
(fsl2scheme, dtifit,[['scheme','scheme_file']])
])
"""
Workflow for applying DT streamline tractogpahy
"""
tractography.connect([(bet, trackdt,[("mask_file","seed_file")])])
tractography.connect([(dtifit, trackdt,[("tensor_fitted","in_file")])])
"""
Workflow for applying PICo
"""
tractography.connect([(bet, trackpico,[("mask_file","seed_file")])])
tractography.connect([(fsl2scheme, dtlutgen,[("scheme","scheme_file")])])
tractography.connect([(dtlutgen, picopdfs,[("dtLUT","luts")])])
tractography.connect([(dtifit, picopdfs,[("tensor_fitted","in_file")])])
tractography.connect([(picopdfs, trackpico,[("pdfs","in_file")])])
# ProcStreamlines might throw memory errors - comment this line out in such case
tractography.connect([(trackdt, procstreamlines,[("tracked","in_file")])])
"""
Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the
tensor fitting.
This is also where our voxel- and data-grabbing functions come in. We pass these functions, along with
the original DWI image from the input node, to the header-generating nodes. This ensures that the files
will be correct and readable.
"""
tractography.connect([(dtifit, fa,[("tensor_fitted","in_file")])])
tractography.connect([(fa, analyzeheader_fa,[("fa","in_file")])])
tractography.connect([(inputnode, analyzeheader_fa,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
tractography.connect([(fa, fa2nii,[('fa','data_file')])])
tractography.connect([(inputnode, fa2nii,[(('dwi', get_affine), 'affine')])])
tractography.connect([(analyzeheader_fa, fa2nii,[('header', 'header_file')])])
tractography.connect([(dtifit, trace,[("tensor_fitted","in_file")])])
tractography.connect([(trace, analyzeheader_trace,[("trace","in_file")])])
tractography.connect([(inputnode, analyzeheader_trace,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
tractography.connect([(trace, trace2nii,[('trace','data_file')])])
tractography.connect([(inputnode, trace2nii,[(('dwi', get_affine), 'affine')])])
tractography.connect([(analyzeheader_trace, trace2nii,[('header', 'header_file')])])
tractography.connect([(dtifit, dteig,[("tensor_fitted","in_file")])])
tractography.connect([(trackpico, cam2trk_pico, [('tracked','in_file')])])
tractography.connect([(trackdt, cam2trk_dt, [('tracked','in_file')])])
tractography.connect([(inputnode, cam2trk_pico,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
tractography.connect([(inputnode, cam2trk_dt,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
"""
Finally, we create another higher-level workflow to connect our tractography workflow with the info and datagrabbing nodes
declared at the beginning. Our tutorial can is now extensible to any arbitrary number of subjects by simply adding
their names to the subject list and their data to the proper folders.
"""
workflow = pe.Workflow(name="workflow")
workflow.base_dir = os.path.abspath('camino_dti_tutorial')
workflow.connect([(infosource,datasource,[('subject_id', 'subject_id')]),
(datasource,tractography,[('dwi','inputnode.dwi'),
('bvals','inputnode.bvals'),
('bvecs','inputnode.bvecs')
])
])
"""
The following functions run the whole workflow and produce a .dot and .png graph of the processing pipeline.
"""
if __name__ == '__main__':
workflow.run()
workflow.write_graph()
"""
You can choose the format of the experted graph with the ``format`` option. For example ``workflow.write_graph(format='eps')``
"""
|
rameshvs/nipype
|
examples/dmri_camino_dti.py
|
Python
|
bsd-3-clause
| 11,489
|
[
"ParaView",
"VTK"
] |
6845237fa0e42981febf08eb82271db5b16eb04c71c1f705c5132cf592ffa293
|
import datetime
import faker
import re
import threading
import sqlalchemy
import sqlalchemy.orm
from sqlalchemy import or_, and_
from sqlalchemy.sql import expression
from typing import Collection
# Note: leaving for future use if we go back to using a relationship to PatientStatus table.
# from sqlalchemy.orm import selectinload
from werkzeug.exceptions import BadRequest, NotFound
from rdr_service import clock, config
from rdr_service.api_util import (
format_json_code,
format_json_date,
format_json_enum,
format_json_hpo,
format_json_org,
format_json_site,
parse_json_enum
)
from rdr_service.app_util import is_care_evo_and_not_prod
from rdr_service.code_constants import BIOBANK_TESTS, ORIGINATING_SOURCES, PMI_SKIP_CODE, PPI_SYSTEM, UNSET
from rdr_service.dao.base_dao import UpdatableDao
from rdr_service.dao.code_dao import CodeDao
from rdr_service.dao.database_utils import get_sql_and_params_for_array, replace_null_safe_equals
from rdr_service.dao.hpo_dao import HPODao
from rdr_service.dao.organization_dao import OrganizationDao
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.dao.participant_incentives_dao import ParticipantIncentivesDao
from rdr_service.dao.patient_status_dao import PatientStatusDao
from rdr_service.dao.site_dao import SiteDao
from rdr_service.model.config_utils import from_client_biobank_id, to_client_biobank_id
from rdr_service.model.consent_file import ConsentType
from rdr_service.model.retention_eligible_metrics import RetentionEligibleMetrics
from rdr_service.model.participant_summary import (
ParticipantGenderAnswers,
ParticipantRaceAnswers,
ParticipantSummary,
WITHDRAWN_PARTICIPANT_FIELDS,
WITHDRAWN_PARTICIPANT_VISIBILITY_TIME
)
from rdr_service.model.patient_status import PatientStatus
from rdr_service.model.utils import get_property_type, to_client_participant_id
from rdr_service.participant_enums import (
BiobankOrderStatus,
EhrStatus,
EnrollmentStatus,
DeceasedStatus,
ConsentExpireStatus,
GenderIdentity,
ParticipantCohort,
PatientStatusFlag,
PhysicalMeasurementsStatus,
QuestionnaireStatus,
Race,
SampleCollectionMethod,
SampleStatus,
SuspensionStatus,
WithdrawalStatus,
get_bucketed_age
)
from rdr_service.query import FieldFilter, FieldJsonContainsFilter, Operator, OrderBy, PropertyType
# By default / secondarily order by last name, first name, DOB, and participant ID
_ORDER_BY_ENDING = ("lastName", "firstName", "dateOfBirth", "participantId")
# The default ordering of results for queries for withdrawn participants.
_WITHDRAWN_ORDER_BY_ENDING = ("withdrawalTime", "participantId")
_CODE_FILTER_FIELDS = ("organization", "site", "awardee")
_SITE_FIELDS = (
"physicalMeasurementsCreatedSite",
"physicalMeasurementsFinalizedSite",
"biospecimenSourceSite",
"biospecimenCollectedSite",
"biospecimenProcessedSite",
"biospecimenFinalizedSite",
"site",
"enrollmentSite",
)
# Lazy caches of property names for client JSON conversion.
_DATE_FIELDS = set()
_ENUM_FIELDS = set()
_CODE_FIELDS = set()
_fields_lock = threading.RLock()
# Query used to update the enrollment status for all participant summaries after
# a Biobank samples import.
# TODO(DA-631): This should likely be a conditional update (e.g. see
# baseline/dna updates) which updates last modified.
_ENROLLMENT_STATUS_CASE_SQL = """
CASE WHEN (consent_for_study_enrollment = :submitted
AND consent_for_electronic_health_records = :submitted
AND (consent_cohort != :cohort_3 OR
(consent_for_genomics_ror BETWEEN :submitted AND :submitted_not_sure)
)
AND num_completed_baseline_ppi_modules = :num_baseline_ppi_modules
AND physical_measurements_status = :completed
AND samples_to_isolate_dna = :received) OR
(consent_for_study_enrollment = :submitted
AND consent_for_electronic_health_records = :unset
AND consent_for_dv_electronic_health_records_sharing = :submitted
AND (consent_cohort != :cohort_3 OR
(consent_for_genomics_ror BETWEEN :submitted AND :submitted_not_sure)
)
AND num_completed_baseline_ppi_modules = :num_baseline_ppi_modules
AND physical_measurements_status = :completed
AND samples_to_isolate_dna = :received)
THEN :full_participant
WHEN (consent_for_study_enrollment = :submitted
AND consent_for_electronic_health_records = :submitted
AND (consent_cohort != :cohort_3 OR
(consent_for_genomics_ror BETWEEN :submitted AND :submitted_not_sure)
)
AND num_completed_baseline_ppi_modules = :num_baseline_ppi_modules
AND physical_measurements_status != :completed
AND samples_to_isolate_dna = :received) OR
(consent_for_study_enrollment = :submitted
AND consent_for_electronic_health_records = :unset
AND consent_for_dv_electronic_health_records_sharing = :submitted
AND (consent_cohort != :cohort_3 OR
(consent_for_genomics_ror BETWEEN :submitted AND :submitted_not_sure)
)
AND num_completed_baseline_ppi_modules = :num_baseline_ppi_modules
AND physical_measurements_status != :completed
AND samples_to_isolate_dna = :received)
THEN :core_minus_pm
WHEN (consent_for_study_enrollment = :submitted
AND consent_for_electronic_health_records = :submitted) OR
(consent_for_study_enrollment = :submitted
AND consent_for_electronic_health_records = :unset
AND consent_for_dv_electronic_health_records_sharing = :submitted
)
THEN :member
ELSE :interested
END
"""
_ENROLLMENT_STATUS_SQL = """
UPDATE
participant_summary
SET
enrollment_status = {enrollment_status_case_sql},
last_modified = :now
WHERE
(
(enrollment_status != :full_participant and enrollment_status != :core_minus_pm)
OR
(enrollment_status = :core_minus_pm AND :full_participant = {enrollment_status_case_sql})
)
AND enrollment_status != {enrollment_status_case_sql}
""".format(
enrollment_status_case_sql=_ENROLLMENT_STATUS_CASE_SQL
)
# DA-614 - Notes: Because there can be multiple distinct samples with the same test for a
# participant and we can't show them all in the participant summary. The HealthPro team
# wants to see status and timestamp of received records over disposed records. Currently
# this sql sets a generic disposed status instead of the specific disposal status. The
# HealthPro team wants a new API to query biobank_stored_samples and get the specific
# disposed status there instead from the participant summary.
_SAMPLE_SQL = """,
sample_status_%(test)s =
CASE WHEN EXISTS(SELECT * FROM biobank_stored_sample bss
WHERE bss.biobank_id = ps.biobank_id
AND bss.test = %(sample_param_ref)s)
THEN
# DA-614 - Only set disposed status when ALL samples for this test are disposed of.
CASE WHEN (SELECT MIN(bss.status) FROM biobank_stored_sample bss
WHERE bss.biobank_id = ps.biobank_id
AND bss.test = %(sample_param_ref)s) >= :disposed_bad
THEN :disposed
ELSE :received END
ELSE :unset END,
sample_status_%(test)s_time =
CASE WHEN EXISTS(SELECT * FROM biobank_stored_sample bss
WHERE bss.biobank_id = ps.biobank_id AND bss.test = %(sample_param_ref)s)
THEN
# DA-614 - Only use disposed datetime when ALL samples for this test are disposed of.
CASE WHEN (SELECT MIN(bss.status) FROM biobank_stored_sample bss
WHERE bss.biobank_id = ps.biobank_id
AND bss.test = %(sample_param_ref)s) >= :disposed_bad
THEN (SELECT MAX(disposed) from biobank_stored_sample bss
WHERE bss.biobank_id = ps.biobank_id
AND bss.test = %(sample_param_ref)s)
ELSE (SELECT MAX(confirmed) from biobank_stored_sample bss
WHERE bss.biobank_id = ps.biobank_id and (bss.status < :disposed_bad or bss.status is null)
AND bss.test = %(sample_param_ref)s)
END
ELSE NULL END
"""
_COLLECTION_METHOD_CASE_SQL = f"""
# Results in NULL if an order wasn't found (since we're unsure how the order was made)
CASE
WHEN bmko.id IS NOT NULL
# there's a mail-kit order tied to the sample
THEN {int(SampleCollectionMethod.MAIL_KIT)}
WHEN bo.biobank_order_id IS NOT NULL
# there's an order created for the sample, but no mail-kit order tied to it
THEN {int(SampleCollectionMethod.ON_SITE)}
ELSE
# there's no order for the sample
{int(SampleCollectionMethod.UNSET)}
END
"""
_SAMPLE_COLLECTION_METHOD_SQL = f""",
sample_%(test)s_collection_method =
CASE WHEN EXISTS(SELECT * FROM biobank_stored_sample bss
WHERE bss.biobank_id = ps.biobank_id AND bss.test = %(sample_param_ref)s)
THEN
# Use the same sample that is used to set the status and time fields
CASE WHEN (SELECT MIN(bss.status) FROM biobank_stored_sample bss
WHERE bss.biobank_id = ps.biobank_id
AND bss.test = %(sample_param_ref)s) >= :disposed_bad
THEN (
SELECT {_COLLECTION_METHOD_CASE_SQL}
FROM biobank_stored_sample bss
LEFT JOIN biobank_order_identifier boi on boi.value = bss.biobank_order_identifier
LEFT JOIN biobank_order bo on bo.biobank_order_id = boi.biobank_order_id
LEFT JOIN biobank_mail_kit_order bmko on bmko.biobank_order_id = bo.biobank_order_id
WHERE bss.biobank_id = ps.biobank_id AND bss.test = %(sample_param_ref)s
ORDER BY disposed DESC
LIMIT 1)
ELSE (
SELECT {_COLLECTION_METHOD_CASE_SQL}
FROM biobank_stored_sample bss
LEFT JOIN biobank_order_identifier boi on boi.value = bss.biobank_order_identifier
LEFT JOIN biobank_order bo on bo.biobank_order_id = boi.biobank_order_id
LEFT JOIN biobank_mail_kit_order bmko on bmko.biobank_order_id = bo.biobank_order_id
WHERE bss.biobank_id = ps.biobank_id and (bss.status < :disposed_bad or bss.status is null)
AND bss.test = %(sample_param_ref)s
ORDER BY confirmed DESC
LIMIT 1)
END
ELSE NULL END
"""
_WHERE_SQL = """
not ps.sample_status_%(test)s_time <=>
(SELECT MAX(bss.confirmed) FROM biobank_stored_sample bss
WHERE bss.biobank_id = ps.biobank_id
AND bss.test = %(sample_param_ref)s)
"""
def _get_sample_sql_and_params(now):
"""Gets SQL and params needed to update status and time fields on the participant summary for
each biobank sample.
"""
sql = """
UPDATE
participant_summary ps
SET
ps.last_modified = :now
"""
params = {
"received": int(SampleStatus.RECEIVED),
"unset": int(SampleStatus.UNSET),
"disposed": int(SampleStatus.DISPOSED),
# DA-871: use first bad disposed reason code value.
"disposed_bad": int(SampleStatus.SAMPLE_NOT_RECEIVED),
"now": now,
}
where_sql = ""
for i in range(0, len(BIOBANK_TESTS)):
sample_param = "sample%d" % i
sample_param_ref = ":%s" % sample_param
lower_test = BIOBANK_TESTS[i].lower()
sql += _SAMPLE_SQL % {"test": lower_test, "sample_param_ref": sample_param_ref}
if lower_test == '1sal2':
sql += _SAMPLE_COLLECTION_METHOD_SQL % {"test": lower_test, "sample_param_ref": sample_param_ref}
params[sample_param] = BIOBANK_TESTS[i]
if where_sql != "":
where_sql += " or "
where_sql += _WHERE_SQL % {"test": lower_test, "sample_param_ref": sample_param_ref}
sql += " WHERE " + where_sql
return sql, params
def _get_baseline_sql_and_params():
tests_sql, params = get_sql_and_params_for_array(
config.getSettingList(config.BASELINE_SAMPLE_TEST_CODES), "baseline"
)
return (
"""
(
SELECT
COUNT(*)
FROM
biobank_stored_sample
WHERE
biobank_stored_sample.biobank_id = participant_summary.biobank_id
AND biobank_stored_sample.confirmed IS NOT NULL
AND biobank_stored_sample.test IN %s
)
"""
% (tests_sql),
params,
)
def _get_dna_isolates_sql_and_params():
tests_sql, params = get_sql_and_params_for_array(config.getSettingList(config.DNA_SAMPLE_TEST_CODES), "dna")
params.update({"received": int(SampleStatus.RECEIVED), "unset": int(SampleStatus.UNSET)})
return (
"""
(
CASE WHEN EXISTS(SELECT * FROM biobank_stored_sample
WHERE biobank_stored_sample.biobank_id = participant_summary.biobank_id
AND biobank_stored_sample.confirmed IS NOT NULL
AND biobank_stored_sample.test IN %s)
THEN :received ELSE :unset END
)
"""
% (tests_sql),
params,
)
def _get_status_time_sql():
dns_test_list = config.getSettingList(config.DNA_SAMPLE_TEST_CODES)
status_time_sql = "%s" % ",".join(
["""COALESCE(sample_status_%s_time, '3000-01-01')""" % item for item in dns_test_list]
)
return status_time_sql
def _get_baseline_ppi_module_sql():
baseline_ppi_module_fields = config.getSettingList(config.BASELINE_PPI_QUESTIONNAIRE_FIELDS, [])
baseline_ppi_module_sql = "%s" % ",".join(
["""%s_time""" % re.sub("(?<!^)(?=[A-Z])", "_", item).lower() for item in baseline_ppi_module_fields]
)
return baseline_ppi_module_sql
def _get_sample_status_time_sql_and_params():
"""Gets SQL that to update enrollmentStatusCoreStoredSampleTime field
on the participant summary.
"""
status_time_sql = _get_status_time_sql()
baseline_ppi_module_sql = _get_baseline_ppi_module_sql()
sub_sql = """
SELECT
participant_id,
GREATEST(
CASE WHEN enrollment_status_member_time IS NOT NULL THEN enrollment_status_member_time
ELSE consent_for_electronic_health_records_time
END,
physical_measurements_finalized_time,
{baseline_ppi_module_sql},
CASE WHEN
LEAST(
{status_time_sql}
) = '3000-01-01' THEN NULL
ELSE LEAST(
{status_time_sql}
)
END
) AS new_core_stored_sample_time
FROM
participant_summary
""".format(
status_time_sql=status_time_sql, baseline_ppi_module_sql=baseline_ppi_module_sql
)
sql = """
UPDATE
participant_summary AS a
INNER JOIN ({sub_sql}) AS b ON a.participant_id = b.participant_id
SET
a.enrollment_status_core_stored_sample_time = b.new_core_stored_sample_time
WHERE a.enrollment_status = 3
AND a.enrollment_status_core_stored_sample_time IS NULL
""".format(
sub_sql=sub_sql
)
return sql
def _get_core_minus_pm_time_sql_and_params():
"""
Gets SQL that to update enrollmentStatusCoreMinusPMTime field on the participant summary.
"""
status_time_sql = _get_status_time_sql()
baseline_ppi_module_sql = _get_baseline_ppi_module_sql()
sub_sql = """
SELECT
participant_id,
GREATEST(
CASE WHEN enrollment_status_member_time IS NOT NULL THEN enrollment_status_member_time
ELSE consent_for_electronic_health_records_time
END,
{baseline_ppi_module_sql},
CASE WHEN
LEAST(
{status_time_sql}
) = '3000-01-01' THEN NULL
ELSE LEAST(
{status_time_sql}
)
END
) AS core_minus_pm_time
FROM
participant_summary
""".format(
status_time_sql=status_time_sql, baseline_ppi_module_sql=baseline_ppi_module_sql
)
sql = """
UPDATE
participant_summary AS a
INNER JOIN ({sub_sql}) AS b ON a.participant_id = b.participant_id
SET
a.enrollment_status_core_minus_pm_time = b.core_minus_pm_time
WHERE a.enrollment_status = 4
AND a.enrollment_status_core_minus_pm_time IS NULL
""".format(
sub_sql=sub_sql
)
return sql
class ParticipantSummaryDao(UpdatableDao):
def __init__(self):
super(ParticipantSummaryDao, self).__init__(ParticipantSummary, order_by_ending=_ORDER_BY_ENDING)
self.hpo_dao = HPODao()
self.code_dao = CodeDao()
self.site_dao = SiteDao()
self.organization_dao = OrganizationDao()
self.patient_status_dao = PatientStatusDao()
self.participant_dao = ParticipantDao()
self.incentive_dao = ParticipantIncentivesDao()
self.faker = faker.Faker()
self.hpro_consents = []
self.participant_incentives = []
# pylint: disable=unused-argument
def from_client_json(self, resource, participant_id, client_id):
column_names = self.to_dict(self.model_type)
participant = self.participant_dao.get(participant_id)
static_keys = ["participantId", "biobankId"]
payload_attrs = {key: value for key, value in resource.items()
if key in column_names and key not in
static_keys}
default_attrs = {
"participantId": participant.participantId,
"biobankId": participant.biobankId,
"hpoId": participant.hpoId,
"firstName": self.faker.first_name(),
"lastName": self.faker.first_name(),
"withdrawalStatus": WithdrawalStatus.NOT_WITHDRAWN,
"suspensionStatus": SuspensionStatus.NOT_SUSPENDED,
"participantOrigin": participant.participantOrigin,
"isEhrDataAvailable": False,
}
default_attrs.update(payload_attrs)
self.parse_resource_enums(default_attrs)
return self.model_type(**default_attrs)
def get_id(self, obj):
return obj.participantId
def get_with_children(self, obj_id):
with self.session() as session:
# Note: leaving for future use if we go back to using a relationship to PatientStatus table.
# return self.get_with_session(session, obj_id,
# options=self.get_eager_child_loading_query_options())
return self.get_with_session(session, obj_id)
@classmethod
def get_by_ids_with_session(cls, session: sqlalchemy.orm.Session,
obj_ids: Collection) -> Collection[ParticipantSummary]:
return session.query(
ParticipantSummary
).filter(
ParticipantSummary.participantId.in_(obj_ids)
).all()
def get_by_participant_id(self, participant_id):
with self.session() as session:
return session.query(
ParticipantSummary
).filter(
ParticipantSummary.participantId == participant_id
).one_or_none()
def _validate_update(self, session, obj, existing_obj): # pylint: disable=unused-argument
"""Participant summaries don't have a version value; drop it from validation logic."""
if not existing_obj:
raise NotFound(f"{self.model_type.__name__} with id {id} does not exist")
def parse_resource_enums(self, resource):
for key in resource.keys():
if key in self.to_dict(self.model_type):
_type = getattr(self.model_type, key)
if _type.expression.type.__class__.__name__.lower() == 'enum':
_cls = _type.expression.type.enum_type
parse_json_enum(resource, key, _cls)
return resource
def _has_withdrawn_filter(self, query):
for field_filter in query.field_filters:
if field_filter.field_name == "withdrawalStatus" and field_filter.value == WithdrawalStatus.NO_USE:
return True
if field_filter.field_name == "withdrawalTime" and field_filter.value is not None:
return True
return False
def _get_non_withdrawn_filter_field(self, query):
"""Returns the first field referenced in query filters which isn't in
WITHDRAWN_PARTICIPANT_FIELDS."""
for field_filter in query.field_filters:
if not field_filter.field_name in WITHDRAWN_PARTICIPANT_FIELDS:
return field_filter.field_name
return None
def _initialize_query(self, session, query_def):
filter_client = False
non_withdrawn_field = self._get_non_withdrawn_filter_field(query_def)
client_id = self.get_client_id()
# Care evolution can GET participants from PTSC if env < prod.
if client_id in ORIGINATING_SOURCES and not is_care_evo_and_not_prod():
filter_client = True
if self._has_withdrawn_filter(query_def):
if non_withdrawn_field:
raise BadRequest(f"Can't query on {non_withdrawn_field} for withdrawn participants")
# When querying for withdrawn participants, ensure that the only fields being filtered on or
# ordered by are in WITHDRAWN_PARTICIPANT_FIELDS.
return super(ParticipantSummaryDao, self)._initialize_query(session, query_def)
else:
query = super(ParticipantSummaryDao, self)._initialize_query(session, query_def)
withdrawn_visible_start = clock.CLOCK.now() - WITHDRAWN_PARTICIPANT_VISIBILITY_TIME
if filter_client and non_withdrawn_field:
return query.filter(ParticipantSummary.participantOrigin == client_id,
or_(
ParticipantSummary.withdrawalStatus != WithdrawalStatus.NO_USE,
ParticipantSummary.withdrawalTime >= withdrawn_visible_start,
)
)
elif filter_client:
return query.filter(
ParticipantSummary.participantOrigin == client_id
)
elif non_withdrawn_field:
# When querying on fields that aren't available for withdrawn participants,
# ensure that we only return participants
# who have not withdrawn or withdrew in the past 48 hours.
return query.filter(
or_(
ParticipantSummary.withdrawalStatus != WithdrawalStatus.NO_USE,
ParticipantSummary.withdrawalTime >= withdrawn_visible_start,
)
)
else:
# When querying on fields that are available for withdrawn participants, return everybody;
# withdrawn participants will have all but WITHDRAWN_PARTICIPANT_FIELDS cleared out 48
# hours after withdrawing.
return query
def _get_order_by_ending(self, query):
if self._has_withdrawn_filter(query):
return _WITHDRAWN_ORDER_BY_ENDING
return self.order_by_ending
def _add_order_by(self, query, order_by, field_names, fields):
if order_by.field_name in _CODE_FILTER_FIELDS:
return super(ParticipantSummaryDao, self)._add_order_by(
query, OrderBy(order_by.field_name + "Id", order_by.ascending), field_names, fields
)
return super(ParticipantSummaryDao, self)._add_order_by(query, order_by, field_names, fields)
def _make_query(self, session, query_def):
query, order_by_field_names = super(ParticipantSummaryDao, self)._make_query(session, query_def)
# Note: leaving for future use if we go back to using a relationship to PatientStatus table.
# query.options(selectinload(ParticipantSummary.patientStatus))
# sql = self.query_to_text(query)
return query, order_by_field_names
def make_query_filter(self, field_name, value):
"""Handle HPO and code values when parsing filter values."""
if field_name == "biobankId":
value = from_client_biobank_id(value, log_exception=True)
if field_name == "hpoId" or field_name == "awardee":
hpo = self.hpo_dao.get_by_name(value)
if not hpo:
raise BadRequest(f"No HPO found with name {value}")
if field_name == "awardee":
field_name = "hpoId"
return super(ParticipantSummaryDao, self).make_query_filter(field_name, hpo.hpoId)
if field_name == "organization":
if value == UNSET:
return super(ParticipantSummaryDao, self).make_query_filter(field_name + "Id", None)
organization = self.organization_dao.get_by_external_id(value)
if not organization:
raise BadRequest(f"No organization found with name {value}")
return super(ParticipantSummaryDao, self).make_query_filter(field_name + "Id", organization.organizationId)
if field_name in _SITE_FIELDS:
if value == UNSET:
return super(ParticipantSummaryDao, self).make_query_filter(field_name + "Id", None)
site = self.site_dao.get_by_google_group(value)
if not site:
raise BadRequest(f"No site found with google group {value}")
return super(ParticipantSummaryDao, self).make_query_filter(field_name + "Id", site.siteId)
if field_name in _CODE_FILTER_FIELDS:
if value == UNSET:
return super(ParticipantSummaryDao, self).make_query_filter(field_name + "Id", None)
# Note: we do not at present support querying for UNMAPPED code values.
code = self.code_dao.get_code(PPI_SYSTEM, value)
if not code:
raise BadRequest(f"No code found: {value}")
return super(ParticipantSummaryDao, self).make_query_filter(field_name + "Id", code.codeId)
if field_name == "patientStatus":
return self._make_patient_status_field_filter(field_name, value)
if field_name == "participantOrigin":
if value not in ORIGINATING_SOURCES:
raise BadRequest(f"No origin source found for {value}")
return super(ParticipantSummaryDao, self).make_query_filter(field_name, value)
return super(ParticipantSummaryDao, self).make_query_filter(field_name, value)
def _make_patient_status_field_filter(self, field_name, value):
try:
organization_external_id, status_text = value.split(":")
except ValueError:
raise BadRequest(
("Invalid patientStatus parameter: `{}`. It must be in the format `ORGANIZATION:VALUE`").format(value)
)
try:
status = PatientStatusFlag(status_text)
except (KeyError, TypeError):
raise BadRequest(
("Invalid patientStatus parameter: `{}`. `VALUE` must be one of {}").format(
value, list(PatientStatusFlag.to_dict().keys())
)
)
organization = self.organization_dao.get_by_external_id(organization_external_id)
if not organization:
raise BadRequest(f"No organization found with name {organization_external_id}")
# Note: leaving for future use if we go back to using a relationship to PatientStatus table.
# return PatientStatusFieldFilter(field_name, Operator.EQUALS, value,
# organization=organization,
# status=status)
if status == PatientStatusFlag.UNSET:
filter_value = '{{"organization": "{0}"}}'.format(organization.externalId)
filter_obj = FieldJsonContainsFilter(field_name, Operator.NOT_EQUALS, filter_value)
else:
filter_value = '{{"organization": "{0}", "status": "{1}"}}'.format(organization.externalId, str(status))
filter_obj = FieldJsonContainsFilter(field_name, Operator.EQUALS, filter_value)
return filter_obj
def update_from_biobank_stored_samples(self, participant_id=None):
"""Rewrites sample-related summary data. Call this after updating BiobankStoredSamples.
If participant_id is provided, only that participant will have their summary updated."""
now = clock.CLOCK.now()
sample_sql, sample_params = _get_sample_sql_and_params(now)
baseline_tests_sql, baseline_tests_params = _get_baseline_sql_and_params()
dna_tests_sql, dna_tests_params = _get_dna_isolates_sql_and_params()
sample_status_time_sql = _get_sample_status_time_sql_and_params()
sample_status_time_params = {}
core_minus_pm_time_sql = _get_core_minus_pm_time_sql_and_params()
core_minus_pm_time_params = {}
counts_sql = """
UPDATE
participant_summary
SET
num_baseline_samples_arrived = {baseline_tests_sql},
samples_to_isolate_dna = {dna_tests_sql},
last_modified = :now
WHERE
num_baseline_samples_arrived != {baseline_tests_sql} OR
samples_to_isolate_dna != {dna_tests_sql}
""".format(
baseline_tests_sql=baseline_tests_sql, dna_tests_sql=dna_tests_sql
)
counts_params = {"now": now}
counts_params.update(baseline_tests_params)
counts_params.update(dna_tests_params)
enrollment_status_sql = _ENROLLMENT_STATUS_SQL
enrollment_status_params = {
"submitted": int(QuestionnaireStatus.SUBMITTED),
"submitted_not_sure": int(QuestionnaireStatus.SUBMITTED_NOT_SURE),
"unset": int(QuestionnaireStatus.UNSET),
"num_baseline_ppi_modules": self._get_num_baseline_ppi_modules(),
"completed": int(PhysicalMeasurementsStatus.COMPLETED),
"received": int(SampleStatus.RECEIVED),
"full_participant": int(EnrollmentStatus.FULL_PARTICIPANT),
"core_minus_pm": int(EnrollmentStatus.CORE_MINUS_PM),
"member": int(EnrollmentStatus.MEMBER),
"interested": int(EnrollmentStatus.INTERESTED),
"cohort_3": int(ParticipantCohort.COHORT_3),
"now": now,
}
# If participant_id is provided, add the participant ID filter to all update statements.
if participant_id:
sample_sql += " AND participant_id = :participant_id"
sample_params["participant_id"] = participant_id
counts_sql += " AND participant_id = :participant_id"
counts_params["participant_id"] = participant_id
enrollment_status_sql += " AND participant_id = :participant_id"
enrollment_status_params["participant_id"] = participant_id
sample_status_time_sql += " AND a.participant_id = :participant_id"
sample_status_time_params["participant_id"] = participant_id
core_minus_pm_time_sql += " AND a.participant_id = :participant_id"
core_minus_pm_time_params["participant_id"] = participant_id
sample_sql = replace_null_safe_equals(sample_sql)
counts_sql = replace_null_safe_equals(counts_sql)
with self.session() as session:
session.execute(sample_sql, sample_params)
session.execute(counts_sql, counts_params)
session.execute(enrollment_status_sql, enrollment_status_params)
session.commit()
# TODO: Change this to the optimized sql in _update_dv_stored_samples()
session.execute(sample_status_time_sql, sample_status_time_params)
session.execute(core_minus_pm_time_sql, core_minus_pm_time_params)
def _get_num_baseline_ppi_modules(self):
return len(config.getSettingList(config.BASELINE_PPI_QUESTIONNAIRE_FIELDS))
def update_enrollment_status(self, summary):
"""Updates the enrollment status field on the provided participant summary to
the correct value based on the other fields on it. Called after
a questionnaire response or physical measurements are submitted."""
consent = (
summary.consentForStudyEnrollment == QuestionnaireStatus.SUBMITTED
and summary.consentForElectronicHealthRecords == QuestionnaireStatus.SUBMITTED
) or (
summary.consentForStudyEnrollment == QuestionnaireStatus.SUBMITTED
and summary.consentForElectronicHealthRecords is None
and summary.consentForDvElectronicHealthRecordsSharing == QuestionnaireStatus.SUBMITTED
)
enrollment_status = self.calculate_enrollment_status(
consent,
summary.numCompletedBaselinePPIModules,
summary.physicalMeasurementsStatus,
summary.samplesToIsolateDNA,
summary.consentCohort,
summary.consentForGenomicsROR,
summary.ehrConsentExpireStatus
)
summary.enrollmentStatusCoreOrderedSampleTime = self.calculate_core_ordered_sample_time(consent, summary)
summary.enrollmentStatusCoreStoredSampleTime = self.calculate_core_stored_sample_time(consent, summary)
summary.enrollmentStatusCoreMinusPMTime = self.calculate_core_minus_pm_time(consent, summary)
# [DA-1623] Participants that have 'Core' status should never lose it
# CORE_MINUS_PM status can not downgrade, but can upgrade to FULL_PARTICIPANT
if summary.enrollmentStatus not in (EnrollmentStatus.FULL_PARTICIPANT, EnrollmentStatus.CORE_MINUS_PM) \
or (summary.enrollmentStatus == EnrollmentStatus.CORE_MINUS_PM
and enrollment_status == EnrollmentStatus.FULL_PARTICIPANT):
# Update last modified date if status changes
if summary.enrollmentStatus != enrollment_status:
summary.lastModified = clock.CLOCK.now()
summary.enrollmentStatus = enrollment_status
summary.enrollmentStatusMemberTime = self.calculate_member_time(consent, summary)
def calculate_enrollment_status(
self, consent, num_completed_baseline_ppi_modules, physical_measurements_status, samples_to_isolate_dna,
consent_cohort, gror_consent, consent_expire_status=ConsentExpireStatus.NOT_EXPIRED
):
"""
2021-07 Note on enrollment status calculations and GROR:
Per NIH Analytics Data Glossary and confirmation on requirements for Core participants:
Cohort 3 participants need any GROR response (yes/no/not sure) to elevate to Core or Core Minus PM status
"""
if consent:
if (
num_completed_baseline_ppi_modules == self._get_num_baseline_ppi_modules()
and physical_measurements_status == PhysicalMeasurementsStatus.COMPLETED
and samples_to_isolate_dna == SampleStatus.RECEIVED
and (consent_cohort != ParticipantCohort.COHORT_3 or
# All response status enum values other than UNSET or SUBMITTED_INVALID meet the GROR requirement
(gror_consent and gror_consent != QuestionnaireStatus.UNSET
and gror_consent != QuestionnaireStatus.SUBMITTED_INVALID))
):
return EnrollmentStatus.FULL_PARTICIPANT
elif (
num_completed_baseline_ppi_modules == self._get_num_baseline_ppi_modules()
and physical_measurements_status != PhysicalMeasurementsStatus.COMPLETED
and samples_to_isolate_dna == SampleStatus.RECEIVED
and (consent_cohort != ParticipantCohort.COHORT_3 or
(gror_consent and gror_consent != QuestionnaireStatus.UNSET
and gror_consent != QuestionnaireStatus.SUBMITTED_INVALID))
):
return EnrollmentStatus.CORE_MINUS_PM
elif consent_expire_status != ConsentExpireStatus.EXPIRED:
return EnrollmentStatus.MEMBER
return EnrollmentStatus.INTERESTED
@staticmethod
def calculate_member_time(consent, participant_summary):
if consent and participant_summary.enrollmentStatusMemberTime is not None:
return participant_summary.enrollmentStatusMemberTime
elif consent:
if (
participant_summary.consentForElectronicHealthRecords is None
and participant_summary.consentForDvElectronicHealthRecordsSharing == QuestionnaireStatus.SUBMITTED
):
return participant_summary.consentForDvElectronicHealthRecordsSharingAuthored
return participant_summary.consentForElectronicHealthRecordsAuthored
else:
return None
def calculate_core_minus_pm_time(self, consent, participant_summary):
if (
consent
and participant_summary.numCompletedBaselinePPIModules == self._get_num_baseline_ppi_modules()
and participant_summary.physicalMeasurementsStatus != PhysicalMeasurementsStatus.COMPLETED
and participant_summary.samplesToIsolateDNA == SampleStatus.RECEIVED
and (participant_summary.consentForGenomicsROR == QuestionnaireStatus.SUBMITTED
or participant_summary.consentCohort != ParticipantCohort.COHORT_3)
) or participant_summary.enrollmentStatus == EnrollmentStatus.CORE_MINUS_PM:
max_core_sample_time = self.calculate_max_core_sample_time(
participant_summary, field_name_prefix="sampleStatus"
)
if max_core_sample_time and participant_summary.enrollmentStatusCoreStoredSampleTime:
return participant_summary.enrollmentStatusCoreStoredSampleTime
else:
return max_core_sample_time
else:
return None
def calculate_core_stored_sample_time(self, consent, participant_summary):
if (
consent
and participant_summary.numCompletedBaselinePPIModules == self._get_num_baseline_ppi_modules()
and participant_summary.physicalMeasurementsStatus == PhysicalMeasurementsStatus.COMPLETED
and participant_summary.samplesToIsolateDNA == SampleStatus.RECEIVED
) or participant_summary.enrollmentStatus == EnrollmentStatus.FULL_PARTICIPANT:
max_core_sample_time = self.calculate_max_core_sample_time(
participant_summary, field_name_prefix="sampleStatus"
)
if max_core_sample_time and participant_summary.enrollmentStatusCoreStoredSampleTime:
return participant_summary.enrollmentStatusCoreStoredSampleTime
else:
return max_core_sample_time
else:
return None
def calculate_core_ordered_sample_time(self, consent, participant_summary):
if (
consent
and participant_summary.numCompletedBaselinePPIModules == self._get_num_baseline_ppi_modules()
and participant_summary.physicalMeasurementsStatus == PhysicalMeasurementsStatus.COMPLETED
) or participant_summary.enrollmentStatus == EnrollmentStatus.FULL_PARTICIPANT:
max_core_sample_time = self.calculate_max_core_sample_time(
participant_summary, field_name_prefix="sampleOrderStatus"
)
if max_core_sample_time and participant_summary.enrollmentStatusCoreOrderedSampleTime:
return participant_summary.enrollmentStatusCoreOrderedSampleTime
else:
return max_core_sample_time
else:
return None
def calculate_max_core_sample_time(self, participant_summary, field_name_prefix="sampleStatus"):
keys = [field_name_prefix + "%sTime" % test for test in config.getSettingList(config.DNA_SAMPLE_TEST_CODES)]
sample_time_list = [v for k, v in participant_summary if k in keys and v is not None]
sample_time = min(sample_time_list) if sample_time_list else None
if sample_time is not None:
return max([time for time in
[
sample_time,
participant_summary.enrollmentStatusMemberTime,
participant_summary.questionnaireOnTheBasicsTime,
participant_summary.questionnaireOnLifestyleTime,
participant_summary.questionnaireOnOverallHealthTime,
participant_summary.physicalMeasurementsFinalizedTime,
] if time is not None]
)
else:
return None
def calculate_distinct_visits(self, pid, finalized_time, id_, amendment=False):
""" Participants may get PM or biobank samples on same day. This should be considered as
a single visit in terms of program payment to participant.
return Boolean: true if there has not been an order on same date."""
from rdr_service.dao.biobank_order_dao import BiobankOrderDao
from rdr_service.dao.physical_measurements_dao import PhysicalMeasurementsDao
day_has_order, day_has_measurement = False, False
existing_orders = BiobankOrderDao().get_biobank_orders_for_participant(pid)
ordered_samples = BiobankOrderDao().get_ordered_samples_for_participant(pid)
existing_measurements = PhysicalMeasurementsDao().get_measuremnets_for_participant(pid)
order_id_to_finalized_date = {
sample.biobankOrderId: sample.finalized.date() for sample in ordered_samples if sample.finalized
}
if existing_orders and finalized_time:
for order in existing_orders:
order_finalized_date = order_id_to_finalized_date.get(order.biobankOrderId)
if (
order_finalized_date == finalized_time.date()
and order.biobankOrderId != id_
and order.orderStatus != BiobankOrderStatus.CANCELLED
):
day_has_order = True
elif order.biobankOrderId == id_ and amendment:
day_has_order = True
elif not finalized_time and amendment:
day_has_order = True
if existing_measurements and finalized_time:
for measurement in existing_measurements:
if not measurement.finalized:
continue
if measurement.finalized.date() == finalized_time.date() and measurement.physicalMeasurementsId != id_:
day_has_measurement = True
is_distinct_visit = not (day_has_order or day_has_measurement)
return is_distinct_visit
@staticmethod
def get_client_id():
from rdr_service import app_util, api_util
email = app_util.get_oauth_id()
user_info = app_util.lookup_user_info(email)
client_id = user_info.get('clientId')
if email == api_util.DEV_MAIL and client_id is None:
client_id = 'example' # account for temp configs that dont create the key
return client_id
def get_record_from_attr(self, *, attr, value):
with self.session() as session:
record = session.query(ParticipantSummary)\
.filter(ParticipantSummary.withdrawalStatus == WithdrawalStatus.NOT_WITHDRAWN,
getattr(ParticipantSummary, attr) == value,
getattr(ParticipantSummary, attr).isnot(None))
return record.all()
def get_hpro_consent_paths(self, result):
consents_map = {
ConsentType.PRIMARY: 'consentForStudyEnrollment',
ConsentType.CABOR: 'consentForCABoR',
ConsentType.EHR: 'consentForElectronicHealthRecords',
ConsentType.GROR: 'consentForGenomicsROR'
}
participant_id = result['participantId']
records = list(filter(lambda obj: obj.participant_id == participant_id, self.hpro_consents))
for consent_type, consent_name in consents_map.items():
value_path_key = f'{consent_name}FilePath'
has_consent_path = [obj for obj in records if consent_type == obj.consent_type]
if has_consent_path:
result[value_path_key] = has_consent_path[0].file_path
return result
def get_participant_incentives(self, result):
participant_id = result['participantId']
records = list(filter(lambda obj: obj.participantId == participant_id, self.participant_incentives))
records = [self.incentive_dao.convert_json_obj(obj) for obj in records]
return records
def to_client_json(self, model: ParticipantSummary):
result = model.asdict()
if self.hpro_consents:
result = self.get_hpro_consent_paths(result)
if self.participant_incentives:
result['participantIncentives'] = self.get_participant_incentives(result)
is_the_basics_complete = model.questionnaireOnTheBasics == QuestionnaireStatus.SUBMITTED
# Participants that withdrew more than 48 hours ago should have fields other than
# WITHDRAWN_PARTICIPANT_FIELDS cleared.
should_clear_fields_for_withdrawal = model.withdrawalStatus == WithdrawalStatus.NO_USE and (
model.withdrawalTime is None
or model.withdrawalTime < clock.CLOCK.now() - WITHDRAWN_PARTICIPANT_VISIBILITY_TIME
)
if should_clear_fields_for_withdrawal:
result = {k: result.get(k) for k in WITHDRAWN_PARTICIPANT_FIELDS}
result["participantId"] = to_client_participant_id(model.participantId)
biobank_id = result.get("biobankId")
if biobank_id:
result["biobankId"] = to_client_biobank_id(biobank_id)
date_of_birth = result.get("dateOfBirth")
if date_of_birth:
result["ageRange"] = get_bucketed_age(date_of_birth, clock.CLOCK.now())
else:
result["ageRange"] = UNSET
if not result.get("primaryLanguage"):
result["primaryLanguage"] = UNSET
if "organizationId" in result:
result["organization"] = result["organizationId"]
del result["organizationId"]
format_json_org(result, self.organization_dao, "organization")
if result.get("genderIdentityId"):
del result["genderIdentityId"] # deprecated in favor of genderIdentity
# Map demographic Enums if TheBasics was submitted and Skip wasn't in use
if is_the_basics_complete and not should_clear_fields_for_withdrawal:
if model.genderIdentity is None or model.genderIdentity == GenderIdentity.UNSET:
result['genderIdentity'] = GenderIdentity.PMI_Skip
if model.race is None or model.race == Race.UNSET:
result['race'] = Race.PMI_Skip
result["patientStatus"] = model.patientStatus
format_json_hpo(result, self.hpo_dao, "hpoId")
result["awardee"] = result["hpoId"]
_initialize_field_type_sets()
for new_field_name, existing_field_name in self.get_aliased_field_map().items():
result[new_field_name] = getattr(model, existing_field_name)
# register new field as date if field is date
if type(result[new_field_name]) is datetime.datetime:
_DATE_FIELDS.add(new_field_name)
for fieldname in _DATE_FIELDS:
format_json_date(result, fieldname)
for fieldname in _CODE_FIELDS:
is_demographic_field = fieldname in ['educationId', 'incomeId', 'sexualOrientationId', 'sexId']
should_map_unset_to_skip = (
is_the_basics_complete and is_demographic_field and not should_clear_fields_for_withdrawal
)
format_json_code(
result, self.code_dao, fieldname,
unset_value=PMI_SKIP_CODE if should_map_unset_to_skip else UNSET
)
for fieldname in _ENUM_FIELDS:
format_json_enum(result, fieldname)
for fieldname in _SITE_FIELDS:
format_json_site(result, self.site_dao, fieldname)
if model.withdrawalStatus == WithdrawalStatus.NO_USE\
or model.suspensionStatus == SuspensionStatus.NO_CONTACT\
or model.deceasedStatus == DeceasedStatus.APPROVED:
result["recontactMethod"] = "NO_CONTACT"
# Strip None values.
result = {k: v for k, v in list(result.items()) if v is not None}
return result
@staticmethod
def get_aliased_field_map():
return {
'firstEhrReceiptTime': 'ehrReceiptTime',
'latestEhrReceiptTime': 'ehrUpdateTime'
}
def _decode_token(self, query_def, fields):
""" If token exists in participant_summary api, decode and use lastModified to add a buffer
of 60 seconds. This ensures when a _sync link is used no one is missed. This will return
at a minimum, the last participant and any more that have been modified in the previous 60
seconds. Duplicate participants returned should be handled on the client side."""
decoded_vals = super(ParticipantSummaryDao, self)._decode_token(query_def, fields)
if query_def.order_by and (
query_def.order_by.field_name == "lastModified"
and query_def.always_return_token is True
and query_def.backfill_sync is True
):
decoded_vals[0] = decoded_vals[0] - datetime.timedelta(seconds=config.LAST_MODIFIED_BUFFER_SECONDS)
return decoded_vals
@staticmethod
def update_ehr_status(summary, update_time):
summary.ehrStatus = EhrStatus.PRESENT
if not summary.ehrReceiptTime:
summary.ehrReceiptTime = update_time
summary.ehrUpdateTime = update_time
return summary
def get_participant_ids_with_ehr_data_available(self):
with self.session() as session:
result = session.query(ParticipantSummary.participantId).filter(
ParticipantSummary.isEhrDataAvailable == expression.true()
).all()
return {row.participantId for row in result}
def prepare_for_ehr_status_update(self):
with self.session() as session:
query = (
sqlalchemy.update(ParticipantSummary).values({
ParticipantSummary.isEhrDataAvailable: False
})
)
return session.execute(query)
@staticmethod
def bulk_update_ehr_status_with_session(session, parameter_sets):
query = (
sqlalchemy.update(ParticipantSummary)
.where(ParticipantSummary.participantId == sqlalchemy.bindparam("pid"))
.values(
{
ParticipantSummary.ehrStatus.name: EhrStatus.PRESENT,
ParticipantSummary.isEhrDataAvailable: True,
ParticipantSummary.ehrUpdateTime: sqlalchemy.bindparam("receipt_time"),
ParticipantSummary.ehrReceiptTime: sqlalchemy.case(
[(ParticipantSummary.ehrReceiptTime.is_(None), sqlalchemy.bindparam("receipt_time"))],
else_=ParticipantSummary.ehrReceiptTime,
),
}
)
)
return session.execute(query, parameter_sets)
def bulk_update_retention_eligible_flags(self, upload_date):
with self.session() as session:
query = (
sqlalchemy.update(
ParticipantSummary
).where(and_(
ParticipantSummary.participantId == RetentionEligibleMetrics.participantId,
RetentionEligibleMetrics.fileUploadDate == sqlalchemy.bindparam("file_upload_date")
))
).values(
{
ParticipantSummary.retentionEligibleStatus: RetentionEligibleMetrics.retentionEligibleStatus,
ParticipantSummary.retentionEligibleTime: RetentionEligibleMetrics.retentionEligibleTime,
ParticipantSummary.retentionType: RetentionEligibleMetrics.retentionType,
ParticipantSummary.lastActiveRetentionActivityTime:
RetentionEligibleMetrics.lastActiveRetentionActivityTime
}
)
session.execute(query, {'file_upload_date': upload_date})
def _initialize_field_type_sets():
"""Using reflection, populate _DATE_FIELDS, _ENUM_FIELDS, and _CODE_FIELDS, which are
used when formatting JSON from participant summaries.
We call this lazily to avoid having issues with the code getting executed while SQLAlchemy
is still initializing itself. Locking ensures we only run throught the code once.
"""
with _fields_lock:
# Return if this is already initialized.
if _DATE_FIELDS:
return
for prop_name in dir(ParticipantSummary):
if prop_name.startswith("_"):
continue
if prop_name == "genderIdentityId": # deprecated
continue
prop = getattr(ParticipantSummary, prop_name)
if callable(prop):
continue
property_type = get_property_type(prop)
if property_type:
if property_type == PropertyType.DATE or property_type == PropertyType.DATETIME:
_DATE_FIELDS.add(prop_name)
elif property_type == PropertyType.ENUM:
_ENUM_FIELDS.add(prop_name)
elif property_type == PropertyType.INTEGER:
fks = prop.property.columns[0].foreign_keys
if fks:
for fk in fks:
if fk._get_colspec() == "code.code_id":
_CODE_FIELDS.add(prop_name)
break
class PatientStatusFieldFilter(FieldFilter):
"""
FieldFilter class for patientStatus relationship field
"""
def __init__(self, field_name, operator, value, organization, status):
super(PatientStatusFieldFilter, self).__init__(field_name, operator, value)
self.organization = organization
self.status = status
def add_to_sqlalchemy_query(self, query, field):
if self.operator == Operator.EQUALS:
if self.status == PatientStatusFlag.UNSET:
criterion = sqlalchemy.not_(
field.any(PatientStatus.organizationId == self.organization.organizationId)
)
else:
criterion = field.any(
sqlalchemy.and_(
PatientStatus.organizationId == self.organization.organizationId,
PatientStatus.patientStatus == self.status,
)
)
return query.filter(criterion)
else:
raise ValueError(f"Invalid operator: {self.operator}.")
class ParticipantGenderAnswersDao(UpdatableDao):
def __init__(self):
super(ParticipantGenderAnswersDao, self).__init__(ParticipantGenderAnswers, order_by_ending=["id"])
def update_gender_answers_with_session(self, session, participant_id, gender_code_ids):
# remove old answers
self.delete_answers_with_session(session, participant_id)
# insert new answers
now = clock.CLOCK.now()
records = [
ParticipantGenderAnswers(**dict(participantId=participant_id, created=now, modified=now, codeId=code_id))
for code_id in gender_code_ids
]
for record in records:
session.merge(record)
def delete_answers_with_session(self, session, participant_id):
session.query(ParticipantGenderAnswers).filter(
ParticipantGenderAnswers.participantId == participant_id
).delete()
class ParticipantRaceAnswersDao(UpdatableDao):
def __init__(self):
super(ParticipantRaceAnswersDao, self).__init__(ParticipantRaceAnswers, order_by_ending=["id"])
def update_race_answers_with_session(self, session, participant_id, race_code_ids):
# remove old answers
self.delete_answers_with_session(session, participant_id)
# insert new answers
now = clock.CLOCK.now()
records = [
ParticipantRaceAnswers(**dict(participantId=participant_id, created=now, modified=now, codeId=code_id))
for code_id in race_code_ids
]
for record in records:
session.merge(record)
def delete_answers_with_session(self, session, participant_id):
session.query(ParticipantRaceAnswers).filter(ParticipantRaceAnswers.participantId == participant_id).delete()
|
all-of-us/raw-data-repository
|
rdr_service/dao/participant_summary_dao.py
|
Python
|
bsd-3-clause
| 57,595
|
[
"VisIt"
] |
e537a77da8089c045073a0a521272cfdd3c0f229bb768eea1e93a645d285b3cf
|
# -*- coding: utf-8 -*-
# Copyright (C) Brian Moe (2013-2014), Duncan Macleod (2014-)
#
# This file is part of LIGO CIS Core.
#
# LIGO CIS Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LIGO CIS Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LIGO CIS Core. If not, see <http://www.gnu.org/licenses/>.
import re
from django.db.models import (Model, CharField, ForeignKey, FloatField,
IntegerField, TextField, DateTimeField,
BooleanField, Q)
from django.conf import settings
from django.contrib.auth.models import User
from reversion import revisions as reversion
from rest_framework.reverse import reverse
class CisModel(Model):
class Meta:
abstract = True
@classmethod
def get_or_new(cls, **kwargs):
"""Find the class instance based on the `kwargs`, or create a new one
"""
try:
return cls.objects.get(**kwargs), False
except cls.DoesNotExist:
return cls(**kwargs), True
class Ifo(CisModel):
"""Model of a Laser Interferometer
"""
name = CharField(max_length=10, null=False, unique=True)
label = CharField(max_length=5, null=False, unique=False)
description = TextField(null=False)
def __unicode__(self):
return self.name
class Channel(CisModel):
"""Model for a data Channel
A `Channel` is a single, recorded data stream read out from an `Ifo`.
"""
re_name = re.compile(
r'((?:(?P<ifo>[A-Z]\d))?|[\w-]+):' # match IFO prefix
'(?:(?P<subsystem>[a-zA-Z0-9]+))?' # match subsystem
'(?:[-_](?P<signal>[a-zA-Z0-9_-]+))?' # match signal
'(?:\.(?P<trend>[a-z]+))?' # match trend type
'(?:,(?P<type>([a-z]-)?[a-z]+))?' # match channel type
)
DATATYPE = {
0: "Undefined",
1: "16-bit Integer",
2: "32-bit Integer",
3: "64-bit Integer",
4: "32-bit Float",
5: "64-bit Double",
6: "32-bit Complex",
}
# key attributes
ifo = ForeignKey(Ifo)
subsystem = CharField(max_length=10, null=False)
name = CharField(max_length=70, null=False, unique=True, db_index=True)
# DAQ parameters
gain = FloatField()
slope = FloatField()
offset = IntegerField()
datatype = IntegerField()
ifoid = IntegerField()
acquire = IntegerField()
units = CharField(max_length=10)
dcuid = IntegerField()
datarate = IntegerField()
chnnum = IntegerField(null=True)
# versioning
created = DateTimeField(auto_now=True, null=False)
createdby = CharField(max_length=30, null=False)
source = CharField(max_length=30, db_index=True)
# is it currently recorded?
is_current = BooleanField(default=False, null=False)
# is this a test-point (unrecorded channel)
is_testpoint = BooleanField(default=False, null=False)
def get_datatype_display(self):
"""String display of data type
"""
return self.DATATYPE.get(self.datatype, self.datatype)
def update_vals(self, vals):
"""Update the attributes for this `Channel`
Parameters
----------
vals : `list` of `tuples <tuple>`
list of `(name, value)` attribute pairs
Returns
-------
changed : `bool`
whether object has changed
"""
changed = False
for attr, val in vals:
old = getattr(self, attr)
try:
new = type(old)(val)
except (ValueError, TypeError):
# problem with casting
# Go ahead and change -- let the model sort it out.
# OR
# oldval is None -- one of val or old val is not None
if val is not None or new is not None:
changed = True
setattr(self, attr, val)
else:
if old != new:
changed = True
if changed:
setattr(self, attr, val)
return changed
def subsystem_description(self):
"""Get the description for the `Subsytem` of this `Channel`
Returns
-------
description : `str`
the string description, or `'?'` if no description is found
"""
try:
return Subsystem.objects.get(label=self.subsystem).description
except ValueError:
return "?"
def description(self):
"""The description of this `Channel`
"""
name = self.name.split(':')[1]
return ChannelDescription.get_or_new(name=name)[0]
def sub_names(self, include_self=False):
"""Find the component names of this `Channel`
Parameters
----------
include_self : `bool`, optional, default: `False`
include the full name of this channel (excluding the IFO prefix),
along with the sub-parts
Returns
-------
names : `list` of `str`
a list of component sub-strings for this `Channel`
"""
match = self.re_name.match(self.name)
if match:
names = [match.group(3)] + match.group(4).split('_')
else:
names = []
if include_self:
names.append(self.name.split(':')[1])
return names
def defined_descriptions(self):
"""Find all `ChannelDescription` entries for this `Channel`
"""
return ChannelDescription.objects.filter(
Q(name__in=self.sub_names(include_self=True)))
def descriptions(self):
"""Find all `ChannelDescription` entries for this `Channel`
"""
names = self.sub_names()
return [ChannelDescription.get_or_new(name=name)[0] for
name in self.sub_names()]
@classmethod
def user_query(cls, query=""):
# Typed user query -> Q
query = query.strip()
if query:
# Query language like LigoDV-Web
# Spaces indicate AND, '|' indicates OR, AND has precedence over OR
# terms are case insensitive.
# eg. H1: pem | H2: PSL
# is (*H1:* & *pem*) | (*H2:* | *PSL*)
ors = []
for term in query.split('|'):
ands = [Q(name__icontains=aterm.strip()) for
aterm in term.split()]
ors.append(reduce(Q.__and__, ands, Q()))
q = reduce(Q.__or__, ors, Q())
else:
q = Q()
return q
def simulink_model_link(self):
"""Return the URL of the webview for this channels Simulink model
"""
# Links only guaranteed for current acquired channels.
if not (self.acquire and self.is_current):
return None
# LHO
if self.name[0] in ['h', 'H']:
return ('https://lhocds.ligo-wa.caltech.edu/simulink/'
'%s_slwebview.html' % self.source.lower())
# LLO
elif self.name[0] in ['l', 'L']:
return ('https://llocds.ligo-la.caltech.edu/daq/simulink/'
'%s_slwebview.html' % self.source.lower())
else:
return None
def get_absolute_url(self, request=None):
return reverse("channel", args=[self.id], request=request)
def revisions(self):
return [v.field_dict for v in reversion.get_unique_for_object(self)]
def __unicode__(self):
return self.name
reversion.register(Channel)
class Subsystem(CisModel):
"""Instrumental sub-system for a `Channel` or set of `Channels`
"""
name = CharField(max_length=10, null=False, unique=True)
label = CharField(max_length=5, null=False, unique=False)
description = TextField(null=False)
class Meta(CisModel.Meta):
ordering = ["name"]
def __unicode__(self):
return self.name
reversion.register(Subsystem)
class ChannelDescription(CisModel):
"""Description of a channel or sub-section of a channel name.
This is a simple mapping of 'name' to text.
"""
name = CharField(max_length=60, db_index=True, unique=True)
desc = CharField(max_length=100)
text = TextField(null=True, blank=True)
created = DateTimeField(auto_now_add=True, null=False)
modified = DateTimeField(auto_now=True, null=False)
editor = ForeignKey(User, blank=True, null=True)
def get_absolute_url(self, request=None):
return reverse("api-description", args=[self.id], request=request)
def revisions(self):
return [v.field_dict for v in reversion.get_unique_for_object(self)]
def __unicode__(self):
return self.name
reversion.register(ChannelDescription)
class TreeNode(CisModel):
# name -- sub-string of full channel name.
# Unless this is a leaf node, then it is the full channel name.
# eg CHANNEL from L1:PSL-ODC_CHANNEL_OUT_DQ
name = CharField(max_length=70, db_index=True)
parent = ForeignKey("TreeNode", null=True)
channel = ForeignKey("Channel", null=True) # not NULL iff a leaf node
# namepath: comma-sep list of ancestors' names
# (technically redundant, used for optimization)
# Is NULL (XXX or indeterminate?) for leaf nodes
# Last name in list is self.name
# eg PSL,ODC,CHANNEL
namepath = CharField(max_length=60, db_index=True, null=True)
@classmethod
def add_channel(cls, channel):
"""Add a copy of a channel to the database
"""
if cls.objects.filter(channel=channel).count():
return
names = channel.sub_names()
if not names:
# This is some weirdo channel that doesn't have
# recognizable sub-names. Like a vacuum channel.
# Ignore it.
return
node = cls(name=channel.name, channel=channel)
node.parent = cls.node_with_path(names, create=True)
node.save()
@classmethod
def node_with_path(cls, path, create=False):
namepath = ",".join(path)
try:
return cls.objects.get(namepath=namepath)
except cls.DoesNotExist:
if not create:
raise
if not path:
return None
node = cls(name=path[-1], namepath=namepath)
node.parent = cls.node_with_path(path[0:-1], create)
node.save()
return node
# Deprecated. TreeNode is a more descriptive name. Descriptions no longer used.
class Description(CisModel):
name = CharField(max_length=60, db_index=True)
fullname = CharField(max_length=60, null=True, db_index=True)
shortdesc = CharField(max_length=60, null=True, db_index=True)
text = TextField(null=True)
parent = ForeignKey("Description", null=True)
@classmethod
def _add_standard(cls, name):
if not name:
return None
lookedup = cls.objects.filter(fullname=name)
if lookedup.count():
return lookedup[0]
new = cls(fullname=name)
parentname, myname = new._split_standard_name()
new.name = myname
parent = new._add_standard(parentname)
new.parent = parent
new.save()
return new
@classmethod
def add(cls, name):
lookedup = cls.objects.filter(name=name)
if lookedup.count():
return lookedup[0]
# what kind of channel is this?
if re.match(r'^[A-Z0-9]+:[A-Z]+-[A-Z0-9_]+$', name):
return cls._add_standard(name)
elif re.match(r'^.VE-[A-Z0-9]+:[A-Z0-9]+$', name):
# vacuum channel
pass
else:
# who knows.
pass
return None
def _split_standard_name(self):
# (parent_fullname, my_shortname) for a standard channel.
name = self.fullname
if name.find(':') >= 0:
# Actual, physical channel name. Peel off IFO.
# Need to deal with LVE-XX:XXX_XXX_XXX kinds of things.
#
# For physical channel names, fullname == name.
return name[name.find(':')+1:], name
locateUnderscore = name.rfind('_')
if locateUnderscore >= 0:
return name[:locateUnderscore], name[locateUnderscore+1:]
locateDash = name.rfind('-')
if locateDash >= 0:
return name[:locateDash], name[locateDash+1:]
return "", name
def _parentName(self):
# name of parent for std channel.
name = self.name
if name.find(':') >= 0:
# Actual, physical channel name. Peel off IFO.
# Need to deal with LVE-XX:XXX_XXX_XXX kinds of things.
return name[name.find(':')+1:]
if name.rfind('_') >= 0:
return name[:name.rfind('_')]
if name.rfind('-') >= 0:
return name[:name.rfind('-')]
return ""
def __unicode__(self):
return self.name
reversion.register(Description)
# PEM Sensor web page has nice diagrams of where sensors are.
#
# Given a sensor name, we can generate a URL for one of these diagrams.
# We need a way to map CHANNEL_NAME => SENSOR_NAME
#
# A sensor name is a prefix of a channel name. Given a list of all sensors,
# and a channel name, we find a sensor name that is a prefix of the
# channel name.
class PemSensor(CisModel):
"""A model of a Physical Environment Monitoring sensor
"""
name = CharField(max_length=70, null=False, unique=True)
@property
def link(self):
"""The web URL for this `PemSensor`
:type: `str`
"""
return settings.PEM_SENSOR_DIAGRAM_URL_PATTERN.format(name=self.name)
@classmethod
def sensor_for_channel(cls, channel):
"""Find the `PemSensor` associated with the given channel name
Parameters
----------
channel : `str`, `Channel`
name of channel who's sensor you want
Returns
-------
sensor : `PemSensor`
the first `PemSensor` found to associate with the given `Channel`
"""
prefix_len = settings.PEM_SENSOR_MIN_LENGTH
prefix = str(channel)[:prefix_len]
candidates = cls.objects.filter(name__startswith=prefix)
for candidate in candidates:
if str(channel).startswith(candidate.name):
return candidate
return None
|
lscsoft/cis.server
|
cisserver/models.py
|
Python
|
gpl-3.0
| 14,782
|
[
"Brian",
"MOE"
] |
77b5321d970ffb678c010ff708f6d1381be953b0fb64318459fb9b58bf94ee86
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 29/03/17 at 11:40 AM
@author: neil
Program description here
Version 0.0.0
"""
import numpy as np
from astropy.io import fits
from astropy.table import Table
import matplotlib.pyplot as plt
import sys
import os
import mpmath
from tqdm import tqdm as wrap
import periodogram_functions2 as pf2
# =============================================================================
# Define variables
# =============================================================================
# type of run
TYPE = "Normal"
# TYPE = "DATABASE"
# TYPE = "Elodie"
TEST = False
# -----------------------------------------------------------------------------
# Deal with choosing a target and data paths
WORKSPACE = "/Astro/Projects/RayPaul_Work/SuperWASP/"
# individual location values (for types of run)
if TYPE == "Elodie":
SID = 'GJ1289'
# SID = 'GJ793'
TIMECOL = "time"
DATACOL = "flux"
EDATACOL = "eflux"
# for GJ1289
if SID == 'GJ1289':
DPATH = WORKSPACE + "Data/test_for_ernst/bl_gj1289.fits"
elif SID == 'GJ793':
DPATH = WORKSPACE + "Data/test_for_ernst/bl_gj793.fits"
else:
DPATH = None
elif TYPE == "DATABASE":
SID = "ABD_108A"
TIMECOL = 'HJD'
DATACOL = 'TAMMAG2'
EDATACOL = 'TAMMAG2_ERR'
else:
# set file paths
DPATH = WORKSPACE + 'Data/test_for_ernst/'
DPATH += '1SWASP J192338.19-460631.5.fits'
PLOTPATH = WORKSPACE + '/Plots/Messina_like_plots_from_exoarchive/'
# Column info
TIMECOL = 'HJD'
DATACOL = 'TAMMAG2'
EDATACOL = 'TAMMAG2_ERR'
SID = "1SWASP J192338.19-460631.5"
# -----------------------------------------------------------------------------
# set database settings
HOSTNAME = 'localhost'
USERNAME = 'root'
PASSWORD = '1234'
DATABASE = 'swasp'
TABLE = 'swasp_sep16_tab'
# -----------------------------------------------------------------------------
# whether to show the graph
SHOW = False
# size in inches of the plot
FIGSIZE = (20, 16)
# decide whether to plot nan periods (saves time)
PLOT_NAN_PERIOD = True
# Name the object manually
NAME = SID
# whether to log progress to standard output (print)
LOG = True
# -----------------------------------------------------------------------------
# minimum time period to be sensitive to (5 hours)
TMIN = 5/24.0
# maximum time period to be sensitive to (100 days)
TMAX = 100
# number of samples per peak
SPP = 5
# -----------------------------------------------------------------------------
# random seed for bootstrapping
RANDOM_SEED = 9
# number of bootstraps to perform
N_BS = 500
# Phase offset
OFFSET = (-0.1, 0.1)
# -----------------------------------------------------------------------------
# number of peaks to find
NPEAKS = 5
# number of pixels around a peak to class as same peak
BOXSIZE = 5
# percentage around noise peak to rule out true peak
THRESHOLD = 5.0
# percentile (FAP) to cut peaks at (i.e. any below are not used)
CUTPERCENTILE = pf2.sigma2percentile(1.0)*100
# -----------------------------------------------------------------------------
# minimum number of data points to define a sub region
MINPOINTS = 50 # points
# maximum gap between data points to define a sub region
MAXGAP = 20 # days
# extention for subgroup
EXT = ''
# how to normalise all powers
NORMALISATION = None
# -----------------------------------------------------------------------------
LIMIT_RANGE = False
RANGE_LOW = 3700
RANGE_HIGH = 4000
# -----------------------------------------------------------------------------
TEST_PERIOD = 8.1732
# number of days to observe
TEST_TMAX = 800
# number of observations across TMAX to select
TEST_N = 100
# edata uncertainty (amplitude of signal is set to 1)
TEST_SNR = 0.5
# =============================================================================
# Define functions
# =============================================================================
def get_params():
cfmts = [tuple, list, np.ndarray]
# -------------------------------------------------------------------------
# get constants from code defined above (only select those in uppercase)
default_param_names = list(globals().keys())
params, fmts = dict(), dict()
for name in default_param_names:
if name.isupper():
# get name from global
params[name] = globals()[name]
# deal with typing
kind = type(globals()[name])
# if a list/array need a type for each element
if kind in cfmts:
fmts[name] = [kind, type(globals()[name][0])]
# else just need a type for the object
else:
fmts[name] = [kind, kind]
# -------------------------------------------------------------------------
# update these values from the command line
name, value = "", ""
args = sys.argv
try:
# loop around parameters
for name in list(params.keys()):
# loop around commandline arguments
for arg in args:
# if no equals then not a valid argument
if "=" not in arg:
continue
# get the argument and its string value
argument, value = arg.split('=')
value = value.replace('"', '')
# if we recognise the argument use it over the default vlue
if name == argument:
# deal with having lists
# i.e. need to cast the type for each element
if fmts[name][0] in cfmts:
params[name] = array_from_string(value, name,
fmts[name][0],
fmts[name][1])
# need to deal with booleans
elif fmts[name][0] == bool:
if value == 'False':
params[name] = False
else:
params[name] = True
# all Nones must be string format (as we have no way to tell
# what they should be)
elif isinstance(None, fmts[name][0]):
params[name] = value
# else cast the string into type defined from defaults
else:
params[name] = fmts[name][0](value)
except ValueError:
e = ["Error: Parameter ", name, value, fmts[name][0]]
raise ValueError("{0} {1}={2} must be of type {3}".format(*e))
# return parameters
return params
def array_from_string(string, name, fmtarray, fmtelement):
string = string.split('[')[-1].split('(')[-1]
string = string.split(']')[0].split(')')[0]
string = string.replace(',', '')
rawstringarray = string.split()
try:
array = [fmtelement(rsa) for rsa in rawstringarray]
except ValueError:
e = ["Error: Parameter ", name, string, fmtarray, 'with elements: ',
fmtelement]
raise Exception("{0} {1}={2} must be a {3} {4} {5}".format(*e))
return fmtarray(array)
def load_data(params):
# loading data
if TEST:
params['NAME'] = "TEST P={0}".format(TEST_PERIOD)
time, data, edata = test_data(show=False)
# zero time data to nearest thousand (start at 0 in steps of days)
day0 = np.floor(time.min() / 100) * 100
time -= day0
params['DAY0'] = day0
elif params['TYPE'] == "DATABASE":
print('\n Loading data...')
sid = params['SID']
sql_kwargs = dict(host=params['HOSTNAME'], db=params['DATABASE'],
table=params['TABLE'], user=params['USERNAME'],
passwd=params['PASSWORD'])
pdata = pf2.get_lightcurve_data(conn=None, sid=sid, sortcol='HJD',
replace_infs=True, **sql_kwargs)
time = np.array(pdata[params['TIMECOL']], dtype=float)
data = np.array(pdata[params['DATACOL']], dtype=float)
edata = np.array(pdata[params['EDATACOL']], dtype=float)
params['NAME'] = params['SID']
else:
print('\n Loading data...')
lightcurve = fits.getdata(params['DPATH'], ext=1)
if params['NAME'] is None:
params['NAME'] = DPATH.split('/')[-1].split('.')[0]
# ---------------------------------------------------------------------
# get columns
time = np.array(lightcurve[params['TIMECOL']], dtype=float)
data = np.array(lightcurve[params['DATACOL']], dtype=float)
edata = np.array(lightcurve[params['EDATACOL']], dtype=float)
# zero time data to nearest thousand (start at 0 in steps of days)
day0 = np.floor(time.min() / 100) * 100
time -= day0
params['DAY0'] = day0
# zero data (by median value)
data = data - np.median(data)
# -------------------------------------------------------------------------
if LIMIT_RANGE:
mask = np.arange(RANGE_LOW, RANGE_HIGH, 1)
time, data, edata = time[mask], data[mask], edata[mask]
day0 = np.floor(time.min() / 100) * 100
time -= day0
params['DAY0'] = day0
# -------------------------------------------------------------------------
return time, data, edata, params
def get_sub_regions(time, params):
groupmasks = pf2.subregion_mask(time, params['MINPOINTS'], params['MAXGAP'])
region_names = ['Full']
for g_it in range(len(groupmasks)):
region_names.append('R_{0}'.format(g_it + 1))
return [np.repeat([True], len(time))] + groupmasks, region_names
def update_progress(params):
if params['LOG']:
# name of object
name = '{0}_{1}'.format(params['NAME'], params['EXT'])
pargs = ['='*50, 'Run for {0}'.format(name)]
print('{0}\n\t{1}\n{0}'.format(*pargs))
def calculation(inputs, params, mask=None):
# format time (days from first time)
time, data, edata, day0 = format_time_days_from_first(*inputs, mask=mask)
params['day0'] = day0
# calculate frequency
freq = pf2.make_frequency_grid(time, fmin=1.0/params['TMAX'],
fmax=1.0/params['TMIN'],
samples_per_peak=params['SPP'])
# large frequency grid will take a long time or cause a segmentation fault
nfreq = len(freq)
if nfreq > 100000:
raise ValueError("Error: frequency grid too large ({0})".format(nfreq))
# results
results = dict()
# make combinations of nf, ssp and df
if params['LOG']:
print('\n Calculating lombscargle...')
# kwargs = dict(fit_mean=True, fmin=1/TMAX, fmax=1/TMIN, samples_per_peak=SPP)
kwargs = dict(fit_mean=True, freq=freq)
lsfreq, lspower, ls = pf2.lombscargle(time, data, edata, **kwargs)
lspower = pf2.normalise(lspower, NORMALISATION)
results['lsfreq'] = lsfreq
results['lspower'] = lspower
# -------------------------------------------------------------------------
# compute window function
if params['LOG']:
print('\n Calculating window function...')
# kwargs = dict(fmin=1 / TMAX, fmax=1 / TMIN, samples_per_peak=SPP)
kwargs = dict(freq=freq)
wffreq, wfpower = pf2.compute_window_function(time, **kwargs)
wfpower = pf2.normalise(wfpower, NORMALISATION)
results['wffreq'] = wffreq
results['wfpower'] = wfpower
# -------------------------------------------------------------------------
# compute noise periodogram
kwargs = dict(n_iterations=params['N_BS'],
random_seed=params['RANDOM_SEED'], norm='standard',
fit_mean=True, log=params['LOG'])
msfreq, mspower, _, _ = pf2.ls_noiseperiodogram(time, data, edata, lsfreq,
**kwargs)
mspower = pf2.normalise(mspower, NORMALISATION)
results['msfreq'] = msfreq
results['mspower'] = mspower
# -------------------------------------------------------------------------
# try to calculate true period
if params['LOG']:
print('\n Attempting to locate real peaks...')
lsargs = dict(freq=lsfreq, power=lspower, number=params['NPEAKS'],
boxsize=params['BOXSIZE'])
# bsargs = dict(ppeaks=bsppeak, percentile=params['CUTPERCENTILE'])
bsargs = None
msargs = dict(freq=msfreq, power=mspower, number=params['NPEAKS'],
boxsize=params['BOXSIZE'], threshold=params['THRESHOLD'])
presults = pf2.find_period(lsargs, bsargs, msargs)
results['periods'] = presults[0]
results['power_periods'] = presults[1]
results['nperiods'] = presults[2]
results['noise_power_periods'] = presults[3]
# -------------------------------------------------------------------------
# calcuate phase data
if params['LOG']:
print('\n Computing phase curve...')
phase, phasefit, powerfit = pf2.phase_data(ls, time, results['periods'])
results['phase'] = phase
results['phasefit'] = phasefit
results['powerfit'] = powerfit
# -------------------------------------------------------------------------
inputs = [time, data, edata]
# -------------------------------------------------------------------------
return inputs, results, params
def format_time_days_from_first(time, data, edata, mask=None):
if mask is None:
mask = np.repeat([True], len(time))
# zero time data to nearest thousand (start at 0 in steps of days)
day0 = np.floor(time[mask].min() / 100) * 100
day1 = np.ceil(time[mask].max() / 100) * 100
# there should be no time series longer than 10000 but some data has
# weird times i.e. 32 days and 2454340 days hence if time series
# seems longer than 10000 days cut it by the median days
if (day1 - day0) > 1e5:
day0 = np.median(time[mask]) - 5000
day1 = np.median(time[mask]) + 5000
# make sure we have no days beyond maximum day
limitmask = (time > day0) & (time < day1)
time = time[limitmask & mask]
data = data[limitmask & mask]
if edata is not None:
edata = edata[limitmask & mask]
else:
time = time[mask]
data = data[mask]
edata = edata[mask]
# zero time data to nearest thousand (start at 0 in steps of days)
day0 = np.floor(time.min() / 100) * 100
# time should be time since first observation
time -= day0
# return time
return time, data, edata, day0
def plot_graph(inputs, results, params):
# get inputs
time, data, edata = inputs
# extract variables from results
period = results['periods']
# sort out the name (add extention for sub regions)
name = '{0}_{1}'.format(params['NAME'], params['EXT'])
# -------------------------------------------------------------------------
# do not bother plotting if we get a zero period
if np.isnan(period[0]) and not params['PLOT_NAN_PERIOD']:
return 0
# -------------------------------------------------------------------------
# set up plot
if params['LOG']:
print('\n Plotting graph...')
plt.close()
plt.style.use('seaborn-whitegrid')
fig, frames = plt.subplots(2, 2, figsize=(params['FIGSIZE']))
# -------------------------------------------------------------------------
# plot raw data
kwargs = dict(xlabel='Time / days',
ylabel='$\Delta$ TAM Magnitude',
title='Raw data for {0}'.format(name))
frames[0][0] = pf2.plot_rawdata(frames[0][0], time, data, edata, **kwargs)
frames[0][0].set_ylim(*frames[0][0].get_ylim()[::-1])
# -------------------------------------------------------------------------
# plot window function
if 'wffreq' in results and 'wfpower' in results:
wffreq, wfpower = results['wffreq'], results['wfpower']
kwargs = dict(title='Window function',
ylabel='Lomb-Scargle Power $P_N$')
frames[0][1] = pf2.plot_periodogram(frames[0][1], 1.0/wffreq, wfpower,
**kwargs)
frames[0][1].set_xscale('log')
# -------------------------------------------------------------------------
# plot periodogram
if 'lsfreq' in results and 'lspower' in results:
lsfreq, lspower = results['lsfreq'], results['lspower']
symbol = r'$P_N / \sum(P_N)$'
kwargs = dict(title='Lomb-Scargle Periodogram',
ylabel='Lomb-Scargle Power ' + symbol,
xlabel='Time / days',
zorder=1)
frames[1][0] = pf2.plot_periodogram(frames[1][0], 1.0/lsfreq, lspower,
**kwargs)
# -------------------------------------------------------------------------
# add arrow to periodogram
if 'lsfreq' in results and 'lspower' in results and 'periods' in results:
lsfreq, lspower = results['lsfreq'], results['lspower']
period = results['periods']
kwargs = dict(firstcolor='r', normalcolor='b', zorder=4)
frames[1][0] = pf2.add_arrows(frames[1][0], period, lspower, **kwargs)
# -------------------------------------------------------------------------
# plot MCMC periodogram (noise periodogram)
if ('lsfreq' in results and 'lspower' in results and
'msfreq' in results and 'mspower' in results):
msfreq, mspower = results['msfreq'], results['mspower']
# mspower = np.max(lspower) * mspower / np.max(mspower)
kwargs = dict(color='r', xlabel=None, ylabel=None, xlim=None, ylim=None,
zorder=2)
frames[1][0] = pf2.plot_periodogram(frames[1][0], 1.0/msfreq, mspower,
**kwargs)
frames[1][0].set_xscale('log')
# -------------------------------------------------------------------------
# Plot FAP lines
lsfreq= results['lsfreq']
pargs1 = dict(color='b', linestyle='--')
if 'theoryFAPpower' in results:
tfap_power = results['theoryFAPpower']
sigmas = list(tfap_power.keys())
faps = list(tfap_power.values())
# pf2.add_fap_lines_to_periodogram(frames[1][0], sigmas, faps, **pargs1)
pargs1 = dict(color='g', linestyle='--')
if 'bsFAPpower' in results:
bfap_power = results['bsFAPpower']
bs_power = results['bs_power']
sigmas = list(bfap_power.keys())
faps = list(bfap_power.values())
pf2.add_fap_lines_to_periodogram(frames[1][0], sigmas, faps, **pargs1)
# kwargs = dict(color='g', xlabel=None, ylabel=None, xlim=None, ylim=None,
# zorder=2)
# frames[1][0] = pf2.plot_periodogram(frames[1][0], 1.0/lsfreq, bs_power,
# **kwargs)
pargs1 = dict(color='c', linestyle='--')
if 'mcFAPpower' in results:
mfap_power = results['mcFAPpower']
ms_power = results['ms_power']
sigmas = list(mfap_power.keys())
faps = list(mfap_power.values())
pf2.add_fap_lines_to_periodogram(frames[1][0], sigmas, faps, **pargs1)
# kwargs = dict(color='c', xlabel=None, ylabel=None, xlim=None, ylim=None,
# zorder=2)
# frames[1][0] = pf2.plot_periodogram(frames[1][0], 1.0 / lsfreq, ms_power,
# **kwargs)
# -------------------------------------------------------------------------
# plot phased periodogram
if 'phase' in results and 'phasefit' in results and 'powerfit' in results:
phase = results['phase']
phasefit, powerfit = results['phasefit'], results['powerfit']
args = [frames[1][1], phase, data, edata, phasefit, powerfit,
params['OFFSET']]
kwargs = dict(title='Phase Curve, period={0:.3f} days'.format(period[0]),
ylabel='$\Delta$ TAM Magnitude')
frames[1][1] = pf2.plot_phased_curve(*args, **kwargs)
frames[1][1].set_ylim(*frames[1][1].get_ylim()[::-1])
# -------------------------------------------------------------------------
# save show close
plt.subplots_adjust(hspace=0.3)
plt.show()
plt.close()
def plot_freq_grid_power(xx, freq, dd):
plt.close()
dd= np.array(dd)
# left right bottom top
extent = [np.min(1/freq), np.max(1/freq), 0, len(xx)]
im = plt.imshow(xx[::-1], extent=extent, aspect='auto', vmin=0,
vmax=np.max(xx), cmap='plasma')
plt.scatter(1/dd, range(len(dd)), marker='x', color='g', s=2)
plt.xlabel('Frequency / days$^{-1}$')
plt.xlabel('Time / days')
plt.xscale('log')
plt.ylabel('Monte carlo iteration')
plt.grid(False)
cb = plt.colorbar(im)
cb.set_label('Power')
plt.show()
plt.close()
"""
def comparison_test(inp, res):
results = res
time, data, edata = inp
ff, xx, _ = pf2.lombscargle(time, data, edata, fit_mean=True,
freq=freq, norm = 'standard')
plt.plot(1 / ff, xx, color='k', label='True LS')
plt.plot(1 / ff, x_arr[0], color='r', label='MC LS')
plt.legend(loc=0)
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Time / days')
plt.ylabel('Power $P_N$')
plt.title('Lomb-Scargle True vs MCMC test')
frame = plt.gca()
# -------------------------------------------------------------------------
# Plot FAP lines
pargs1 = dict(color='b', linestyle='--')
if 'theoryFAPpower' in results:
tfap_power = res['theoryFAPpower']
sigmas = list(tfap_power.keys())
faps = list(tfap_power.values())
pf2.add_fap_lines_to_periodogram(frame, sigmas, faps, **pargs1)
pargs1 = dict(color='g', linestyle='--')
if 'bsFAPpower' in results:
bfap_power = res['bsFAPpower']
sigmas = list(bfap_power.keys())
faps = list(bfap_power.values())
pf2.add_fap_lines_to_periodogram(frame, sigmas, faps, **pargs1)
pargs1 = dict(color='c', linestyle='--')
if 'mcFAPpower' in results:
mfap_power = res['mcFAPpower']
sigmas = list(mfap_power.keys())
faps = list(mfap_power.values())
pf2.add_fap_lines_to_periodogram(frame, sigmas, faps, **pargs1)
plt.show()
plt.close()
"""
# =============================================================================
# Define FAP functions
# =============================================================================
def power_from_prob_theory(inputs, faps=None, percentiles=None):
time, data, edata = inputs
if faps is None and percentiles is None:
raise ValueError("Need to define either faps or percentiles")
if faps is None:
faps = 1 - np.array(percentiles)/100.0
N = len(time)
faps = np.array(faps)
Meff = -6.363 + 1.193*N + 0.00098*N**2
prob = 1 - (1 - faps)**mpmath.mpf(1/Meff)
power = 1 - (prob)**mpmath.mpf(2/(N-3))
power[power < (sys.float_info.min * 10)] = 0
return np.array(power, dtype=float)
def lombscargle_bootstrap(time, data, edata, frequency_grid, n_bootstraps=100,
random_seed=None, full=False, norm='standard',
fit_mean=True, log=False):
"""
Perform a bootstrap analysis that resamples the data/edata keeping the
temporal (time vector) co-ordinates constant
modified from:
https://github.com/jakevdp/PracticalLombScargle/blob/master
/figures/Uncertainty.ipynb
:param time: numpy array, the time vector
:param data: numpy array, the data vector
:param edata: numpy array, the uncertainty vector associated with the data
vector
:param frequency_grid: numpy array, the frequency grid to use on each
iteration
:param n_bootstraps: int, number of bootstraps to perform
:param random_seed: int, random seem to use
:param full: boolean, if True return freq at maximum power and maximum
powers, else return powers
:param norm: Lomb-Scargle normalisation
(see astropy.stats.LombScargle)
:param fit_mean: boolean, if True uses a floating mean periodogram
(generalised Lomb-Scargle periodogram) else uses
standard Lomb-Scargle periodogram
:param log: boolean, if true displays progress to standard output (console)
:return:
"""
rng = np.random.RandomState(random_seed)
kwargs = dict(fit_mean=fit_mean, freq=frequency_grid, norm=norm)
def bootstrapped_power():
# sample with replacement
resample = rng.randint(0, len(data), len(data))
# define the Lomb Scargle with resampled data and using frequency_grid
ff, xx, _ = pf2.lombscargle(time, data[resample], edata[resample],
**kwargs)
# return frequency at maximum and maximum
return ff, xx
# run bootstrap
f_arr, d_arr, x_arr = [], [], []
for _ in wrap(range(n_bootstraps)):
f, x = bootstrapped_power()
x_arr.append(x)
# sort
x_arr = np.array(x_arr)
argmax = np.argmax(x_arr, axis=1)
f_arr, d_arr = frequency_grid[argmax], x_arr.flat[argmax]
# return
if full:
median = np.percentile(x_arr, 50, axis=0)
return frequency_grid, x_arr, f_arr, d_arr
else:
return d_arr
def get_gaussian_data(means, stds, samples, rng=None):
if rng is None:
rng = np.random.RandomState(None)
# get n_samples number of gaussians for each time element
g = []
for i in range(len(means)):
g.append(rng.normal(means[i], stds[i], size=samples))
# transpose so we have 1000 light curves with len(time) length
return np.array(g).T
def lombscargle_mcmc(time, data, edata, frequency_grid, n_bootstraps=100,
random_seed=None, full=False, norm='standard',
fit_mean=True, log=False):
"""
Perform a bootstrap analysis that resamples the data/edata keeping the
temporal (time vector) co-ordinates constant
modified from:
https://github.com/jakevdp/PracticalLombScargle/blob/master
/figures/Uncertainty.ipynb
:param time: numpy array, the time vector
:param data: numpy array, the data vector
:param edata: numpy array, the uncertainty vector associated with the data
vector
:param frequency_grid: numpy array, the frequency grid to use on each
iteration
:param n_bootstraps: int, number of bootstraps to perform
:param random_seed: int, random seem to use
:param full: boolean, if True return freq at maximum power and maximum
powers, else return powers
:param norm: Lomb-Scargle normalisation
(see astropy.stats.LombScargle)
:param fit_mean: boolean, if True uses a floating mean periodogram
(generalised Lomb-Scargle periodogram) else uses
standard Lomb-Scargle periodogram
:param log: boolean, if true displays progress to standard output (console)
:return:
"""
rng = np.random.RandomState(random_seed)
kwargs = dict(fit_mean=fit_mean, freq=frequency_grid, norm=norm)
# generate gaussian arrays
# Want to sample the white noise i.e. take each data point from a
# gaussian distribution with mean = 0, std = edata
zerodata = np.zeros_like(data)
g = get_gaussian_data(zerodata, abs(edata), n_bootstraps, rng)
def bootstrapped_power(it):
# define the Lomb Scargle with resampled data and using frequency_grid
ff, xx, _ = pf2.lombscargle(time, g[it], edata, **kwargs)
# return frequency at maximum and maximum
return ff, xx
# run bootstrap
f_arr, d_arr, x_arr = [], [], []
for it in wrap(range(n_bootstraps)):
f, x = bootstrapped_power(it)
x_arr.append(x)
# sort
x_arr = np.array(x_arr)
argmax = np.argmax(x_arr, axis=1)
f_arr, d_arr = frequency_grid[argmax], x_arr.flat[argmax]
# return
if full:
median = np.percentile(x_arr, 50, axis=0)
return frequency_grid, x_arr, f_arr, d_arr
else:
return d_arr
def power_from_prob_bootstrap(inputs, res, faps=None, percentiles=None):
time, data, edata = inputs
freq = res['lsfreq']
if faps is None and percentiles is None:
raise ValueError("Need to define either faps or percentiles")
if faps is None:
faps = 1 - np.array(percentiles)/100.0
res1 = lombscargle_bootstrap(time, data, edata, freq, n_bootstraps=N_BS,
full=True, norm='standard', fit_mean=True,
log=True)
freq, x_arr, f_arr, lres = res1
# plot_freq_grid_power(x_arr, freq, f_arr)
return np.percentile(lres, 100 * (1 - faps)), np.max(x_arr, axis=0)
# return np.percentile(lres, 100*(1 - faps)), np.median(x_arr, axis=0)
def power_from_prob_mcmc(inputs, results, faps=None, percentiles=None):
time, data, edata = inputs
freq = results['lsfreq']
if faps is None and percentiles is None:
raise ValueError("Need to define either faps or percentiles")
if faps is None:
faps = 1 - np.array(percentiles)/100.0
res1 = lombscargle_mcmc(time, data, edata, freq, n_bootstraps=N_BS,
full=True, norm='standard', fit_mean=True,
log=True)
freq, x_arr, f_arr, lres = res1
# plot_freq_grid_power(x_arr, freq, f_arr)
return np.percentile(lres, 100 * (1 - faps)), np.max(x_arr, axis=0)
# return np.percentile(lres, 100*(1 - faps)), np.median(x_arr, axis=0)
def test_data(show=True):
period = TEST_PERIOD
tmax = TEST_TMAX
npoints = TEST_N
noise = TEST_SNR
time, data, edata = pf2.create_data(npoints, timeamp=tmax,
signal_to_noise=noise, period=period,
random_state=9)
if show:
plt.close()
plt.scatter(time, data)
plt.show()
plt.close()
return time, data, edata
# =============================================================================
# Start of code
# =============================================================================
# Main code here
# noinspection PyUnboundLocalVariable
if __name__ == "__main__":
# -------------------------------------------------------------------------
pp = get_params()
# -------------------------------------------------------------------------
# Load data
time_arr, data_arr, edata_arr, pp = load_data(pp)
# -------------------------------------------------------------------------
# define mask and name from
m, pp['EXT'] = None, "full"
# -------------------------------------------------------------------------
# print progress if logging on
update_progress(pp)
# -------------------------------------------------------------------------
# LS/noise/phase Calculation
inp = time_arr, data_arr, edata_arr
inp, res, pp = calculation(inp, pp, m)
# -------------------------------------------------------------------------
# FAP Calculation
print('\n Verbose False Alarm Probability calculations...')
sigmas = [1.0, 2.0, 3.0]
# get percentiles for sigmas
percentiles = np.array(pf2.sigma2percentile(sigmas) * 100)
# get false alarm probability power FROM THEORY
theory_fap_power = power_from_prob_theory(inp, percentiles=percentiles)
# get false alarm probability power FROM BOOTSTRAP
print('\n\t Bootstrap FAP (randomising magnitudes)')
bs_fap_power, bs_power = power_from_prob_bootstrap(inp, res,
percentiles=percentiles)
# get false alarm probability power FROM MONTE CARLO
print('\n\t MCMC FAP (Gaussian dist: mean=1 std=uncertainties)')
mc_fap_power, ms_power = power_from_prob_mcmc(inp, res,
percentiles=percentiles)
res['bs_power'] = bs_power
res['ms_power'] = ms_power
# loop around sigmas
res['theoryFAPpower'] = dict()
res['bsFAPpower'] = dict()
res['mcFAPpower'] = dict()
print('\nNumber of elements in time vector: {0}'.format(len(inp[0])))
print('\nNumber of elements in freq grid: {0}'.format(len(res['lsfreq'])))
for s, sigma in enumerate(sigmas):
# print statements to compare values
print('Sigma = {0} Percentile = {1:4f}'.format(sigma, percentiles[s]))
print('BLUE: FAP theory power = {0:4f}'.format(theory_fap_power[s]))
print('GREEN: FAP bootstrap power = {0:4f}'.format(bs_fap_power[s]))
print('CYAN: FAP monte carlo power = {0:4f}'.format(mc_fap_power[s]))
print('\n')
# save results to results dict
res['theoryFAPpower'][sigma] = theory_fap_power[s]
res['bsFAPpower'][sigma] = bs_fap_power[s]
res['mcFAPpower'][sigma] = mc_fap_power[s]
# -------------------------------------------------------------------------
# plotting
plot_graph(inp, res, pp)
# =============================================================================
# End of code
# =============================================================================
|
njcuk9999/neil_superwasp_periodogram
|
LCA_test_ernst.py
|
Python
|
mit
| 33,394
|
[
"Gaussian"
] |
7dfb8bdcfdc48c694c3fdc230a63e891d860cb447d88f12d45fbb325e67f28c2
|
__author__ = "waroquiers"
import numpy as np
from pymatgen.analysis.chemenv.connectivity.environment_nodes import EnvironmentNode
from pymatgen.analysis.chemenv.utils.graph_utils import (
MultiGraphCycle,
SimpleGraphCycle,
get_delta,
)
from pymatgen.util.testing import PymatgenTest
class FakeNode:
def __init__(self, isite):
self.isite = isite
class FakeNodeWithEqMethod:
def __init__(self, isite):
self.isite = isite
def __eq__(self, other):
return self.isite == other.isite
def __hash__(self):
return 0
class FakeNodeWithEqLtMethods:
def __init__(self, isite):
self.isite = isite
def __eq__(self, other):
return self.isite == other.isite
def __lt__(self, other):
return self.isite < other.isite
def __str__(self):
return f"FakeNode_{self.isite:d}"
def __hash__(self):
return 0
class FakeNodeWithEqLtMethodsBis(FakeNodeWithEqLtMethods):
pass
class FakeNodeWithEqMethodWrongSortable:
def __init__(self, isite):
self.isite = isite
def __eq__(self, other):
return self.isite == other.isite
def __hash__(self):
return 0
def __lt__(self, other):
return self.isite % 2 < other.isite % 2
class GraphUtilsTest(PymatgenTest):
def test_get_delta(self):
n1 = FakeNode(3)
n2 = FakeNode(7)
edge_data = {"start": 3, "end": 7, "delta": [2, 6, 4]}
self.assertTrue(np.allclose(get_delta(n1, n2, edge_data), [2, 6, 4]))
edge_data = {"start": 7, "end": 3, "delta": [2, 6, 4]}
self.assertTrue(np.allclose(get_delta(n1, n2, edge_data), [-2, -6, -4]))
with self.assertRaisesRegex(
ValueError,
"Trying to find a delta between two nodes with an edge that seems not to link these nodes.",
):
edge_data = {"start": 6, "end": 3, "delta": [2, 6, 4]}
get_delta(n1, n2, edge_data)
with self.assertRaisesRegex(
ValueError,
"Trying to find a delta between two nodes with an edge that seems not to link these nodes.",
):
edge_data = {"start": 7, "end": 2, "delta": [2, 6, 4]}
get_delta(n1, n2, edge_data)
def test_simple_graph_cycle(self):
sg_cycle1 = SimpleGraphCycle([0, 1, 2, 3])
# Test equality
sg_cycle2 = SimpleGraphCycle([1, 2, 3, 0])
self.assertEqual(sg_cycle1, sg_cycle2)
sg_cycle2 = SimpleGraphCycle([2, 3, 0, 1])
self.assertEqual(sg_cycle1, sg_cycle2)
sg_cycle2 = SimpleGraphCycle([3, 0, 1, 2])
self.assertEqual(sg_cycle1, sg_cycle2)
# Test reversed cycles
sg_cycle2 = SimpleGraphCycle([0, 3, 2, 1])
self.assertEqual(sg_cycle1, sg_cycle2)
sg_cycle2 = SimpleGraphCycle([3, 2, 1, 0])
self.assertEqual(sg_cycle1, sg_cycle2)
sg_cycle2 = SimpleGraphCycle([2, 1, 0, 3])
self.assertEqual(sg_cycle1, sg_cycle2)
sg_cycle2 = SimpleGraphCycle([1, 0, 3, 2])
self.assertEqual(sg_cycle1, sg_cycle2)
# Test different cycle lengths inequality
sg_cycle2 = SimpleGraphCycle([0, 1, 2, 3, 4])
self.assertNotEqual(sg_cycle1, sg_cycle2)
# Test equality of self-loops
self.assertEqual(SimpleGraphCycle([0]), SimpleGraphCycle([0]))
self.assertNotEqual(SimpleGraphCycle([0]), SimpleGraphCycle([4]))
# Test inequality inversing two nodes
sg_cycle2 = SimpleGraphCycle([0, 1, 3, 2])
self.assertNotEqual(sg_cycle1, sg_cycle2)
# Test inequality with different nodes
sg_cycle2 = SimpleGraphCycle([4, 1, 2, 3])
self.assertNotEqual(sg_cycle1, sg_cycle2)
# Test hashing function
self.assertEqual(hash(sg_cycle1), 4)
self.assertEqual(hash(SimpleGraphCycle([0])), 1)
self.assertEqual(hash(SimpleGraphCycle([0, 1, 3, 6])), 4)
self.assertEqual(hash(SimpleGraphCycle([0, 1, 2])), 3)
# Test from_edges function
# 3-nodes cycle
edges = [(0, 2), (4, 2), (0, 4)]
sg_cycle = SimpleGraphCycle.from_edges(edges=edges, edges_are_ordered=False)
self.assertEqual(sg_cycle, SimpleGraphCycle([4, 0, 2]))
# Self-loop cycle
edges = [(2, 2)]
sg_cycle = SimpleGraphCycle.from_edges(edges=edges)
self.assertEqual(sg_cycle, SimpleGraphCycle([2]))
# 5-nodes cycle
edges = [(0, 2), (4, 7), (2, 7), (4, 5), (5, 0)]
sg_cycle = SimpleGraphCycle.from_edges(edges=edges, edges_are_ordered=False)
self.assertEqual(sg_cycle, SimpleGraphCycle([2, 7, 4, 5, 0]))
# two identical 3-nodes cycles
with self.assertRaisesRegex(
ValueError,
expected_regex="SimpleGraphCycle is not valid : Duplicate nodes.",
):
edges = [(0, 2), (4, 2), (0, 4), (0, 2), (4, 2), (0, 4)]
SimpleGraphCycle.from_edges(edges=edges, edges_are_ordered=False)
# two cycles in from_edges
with self.assertRaisesRegex(ValueError, expected_regex="Could not construct a cycle from edges."):
edges = [(0, 2), (4, 2), (0, 4), (1, 3), (6, 7), (3, 6), (1, 7)]
SimpleGraphCycle.from_edges(edges=edges, edges_are_ordered=False)
with self.assertRaisesRegex(ValueError, expected_regex="Could not construct a cycle from edges."):
edges = [(0, 2), (4, 6), (2, 7), (4, 5), (5, 0)]
SimpleGraphCycle.from_edges(edges=edges, edges_are_ordered=False)
with self.assertRaisesRegex(ValueError, expected_regex="Could not construct a cycle from edges."):
edges = [(0, 2), (4, 7), (2, 7), (4, 10), (5, 0)]
SimpleGraphCycle.from_edges(edges=edges, edges_are_ordered=False)
# Test as_dict from_dict and len method
sg_cycle = SimpleGraphCycle([0, 1, 2, 3])
self.assertEqual(sg_cycle, SimpleGraphCycle.from_dict(sg_cycle.as_dict()))
self.assertEqual(len(sg_cycle), 4)
sg_cycle = SimpleGraphCycle([4])
self.assertEqual(sg_cycle, SimpleGraphCycle.from_dict(sg_cycle.as_dict()))
self.assertEqual(len(sg_cycle), 1)
sg_cycle = SimpleGraphCycle([4, 2, 6, 7, 9, 3, 15])
self.assertEqual(sg_cycle, SimpleGraphCycle.from_dict(sg_cycle.as_dict()))
self.assertEqual(len(sg_cycle), 7)
# Check validation at instance creation time
with self.assertRaisesRegex(
ValueError,
expected_regex="SimpleGraphCycle is not valid : Duplicate nodes.",
):
SimpleGraphCycle([0, 2, 4, 6, 2])
# Check the validate method
# Nodes not sortable
sgc = SimpleGraphCycle(
[FakeNodeWithEqMethod(1), FakeNodeWithEqMethod(0), FakeNodeWithEqMethod(2)],
validate=False,
ordered=False,
)
self.assertFalse(sgc.ordered)
self.assertEqual(
sgc.nodes,
(FakeNodeWithEqMethod(1), FakeNodeWithEqMethod(0), FakeNodeWithEqMethod(2)),
)
sgc.validate(check_strict_ordering=False)
with self.assertRaisesRegex(
ValueError,
expected_regex="SimpleGraphCycle is not valid : The nodes are not sortable.",
):
sgc.validate(check_strict_ordering=True)
# Empty cycle not valid
sgc = SimpleGraphCycle([], validate=False, ordered=False)
with self.assertRaisesRegex(
ValueError,
expected_regex="SimpleGraphCycle is not valid : Empty cycle is not valid.",
):
sgc.validate()
# Simple graph cycle with 2 nodes not valid
sgc = SimpleGraphCycle([1, 2], validate=False, ordered=False)
with self.assertRaisesRegex(
ValueError,
expected_regex="SimpleGraphCycle is not valid : Simple graph cycle with 2 nodes is not valid.",
):
sgc.validate()
# Simple graph cycle with nodes that cannot be strictly ordered
sgc = SimpleGraphCycle(
[
FakeNodeWithEqMethodWrongSortable(0),
FakeNodeWithEqMethodWrongSortable(1),
FakeNodeWithEqMethodWrongSortable(2),
FakeNodeWithEqMethodWrongSortable(3),
],
validate=False,
ordered=False,
)
with self.assertRaisesRegex(
ValueError,
expected_regex="SimpleGraphCycle is not valid : "
"The list of nodes in the cycle cannot be strictly ordered.",
):
sgc.validate(check_strict_ordering=True)
# Check the order method
sgc = SimpleGraphCycle(
[
FakeNodeWithEqMethodWrongSortable(0),
FakeNodeWithEqMethodWrongSortable(1),
FakeNodeWithEqMethodWrongSortable(2),
FakeNodeWithEqMethodWrongSortable(3),
],
validate=False,
ordered=False,
)
sgc.order(raise_on_fail=False)
self.assertFalse(sgc.ordered)
with self.assertRaisesRegex(
ValueError,
expected_regex="SimpleGraphCycle is not valid : "
"The list of nodes in the cycle cannot be strictly ordered.",
):
sgc.order(raise_on_fail=True)
sgc = SimpleGraphCycle(
[
FakeNodeWithEqMethod(8),
FakeNodeWithEqMethod(0),
FakeNodeWithEqMethod(3),
FakeNodeWithEqMethod(6),
],
validate=False,
ordered=False,
)
sgc.order(raise_on_fail=False)
self.assertFalse(sgc.ordered)
with self.assertRaisesRegex(
ValueError,
expected_regex="SimpleGraphCycle is not valid : The nodes are not sortable.",
):
sgc.order(raise_on_fail=True)
sgc = SimpleGraphCycle(
[
FakeNodeWithEqLtMethods(8),
FakeNodeWithEqLtMethods(0),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
],
validate=False,
ordered=False,
)
sgc.order(raise_on_fail=True)
self.assertTrue(sgc.ordered)
self.assertEqual(
sgc.nodes,
(
FakeNodeWithEqLtMethods(0),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
FakeNodeWithEqLtMethods(8),
),
)
sgc = SimpleGraphCycle(
[
FakeNodeWithEqLtMethodsBis(8),
FakeNodeWithEqLtMethods(0),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
],
validate=False,
ordered=False,
)
sgc.order(raise_on_fail=False)
self.assertFalse(sgc.ordered)
self.assertEqual(
sgc.nodes,
(
FakeNodeWithEqLtMethodsBis(8),
FakeNodeWithEqLtMethods(0),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
),
)
with self.assertRaisesRegex(
ValueError,
expected_regex="Could not order simple graph cycle as the nodes are of different classes.",
):
sgc.order(raise_on_fail=True)
sgc = SimpleGraphCycle([FakeNodeWithEqLtMethods(85)], validate=False, ordered=False)
self.assertFalse(sgc.ordered)
sgc.order()
self.assertTrue(sgc.ordered)
self.assertEqual(sgc.nodes, tuple([FakeNodeWithEqLtMethods(85)]))
sgc = SimpleGraphCycle(
[
FakeNodeWithEqLtMethods(8),
FakeNodeWithEqLtMethods(2),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
FakeNodeWithEqLtMethods(4),
FakeNodeWithEqLtMethods(1),
FakeNodeWithEqLtMethods(64),
FakeNodeWithEqLtMethods(32),
],
validate=False,
ordered=False,
)
self.assertFalse(sgc.ordered)
sgc.order()
self.assertTrue(sgc.ordered)
self.assertEqual(
sgc.nodes,
tuple(
[
FakeNodeWithEqLtMethods(1),
FakeNodeWithEqLtMethods(4),
FakeNodeWithEqLtMethods(3),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(2),
FakeNodeWithEqLtMethods(8),
FakeNodeWithEqLtMethods(32),
FakeNodeWithEqLtMethods(64),
]
),
)
# Test str method
self.assertEqual(
str(sgc),
"Simple cycle with nodes :\n"
"FakeNode_1\n"
"FakeNode_4\n"
"FakeNode_3\n"
"FakeNode_6\n"
"FakeNode_2\n"
"FakeNode_8\n"
"FakeNode_32\n"
"FakeNode_64",
)
def test_multigraph_cycle(self):
mg_cycle1 = MultiGraphCycle([2, 4, 3, 5], [1, 0, 2, 0])
# Check is_valid method
is_valid, msg = MultiGraphCycle._is_valid(mg_cycle1)
self.assertTrue(is_valid)
self.assertEqual(msg, "")
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : "
"Number of nodes different from number of "
"edge indices.",
):
MultiGraphCycle([0, 2, 4], [0, 0]) # number of nodes is different from number of edge_indices
with self.assertRaisesRegex(ValueError, expected_regex="MultiGraphCycle is not valid : Duplicate nodes."):
MultiGraphCycle([0, 2, 4, 3, 2], [0, 0, 0, 0, 0]) # duplicated nodes
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : "
"Cycles with two nodes cannot use the same "
"edge for the cycle.",
):
MultiGraphCycle([3, 5], [1, 1]) # number of nodes is different from number of edge_indices
# Testing equality
# Test different cycle lengths inequality
mg_cycle2 = MultiGraphCycle([2, 3, 4, 5, 6], [1, 0, 2, 0, 0])
self.assertFalse(mg_cycle1 == mg_cycle2)
# Test equality
mg_cycle2 = MultiGraphCycle([2, 4, 3, 5], [1, 0, 2, 0])
self.assertTrue(mg_cycle1 == mg_cycle2)
mg_cycle2 = MultiGraphCycle([4, 3, 5, 2], [0, 2, 0, 1])
self.assertTrue(mg_cycle1 == mg_cycle2)
mg_cycle2 = MultiGraphCycle([3, 5, 2, 4], [2, 0, 1, 0])
self.assertTrue(mg_cycle1 == mg_cycle2)
mg_cycle2 = MultiGraphCycle([5, 2, 4, 3], [0, 1, 0, 2])
self.assertTrue(mg_cycle1 == mg_cycle2)
# Test equality (reversed)
mg_cycle2 = MultiGraphCycle([2, 5, 3, 4], [0, 2, 0, 1])
self.assertTrue(mg_cycle1 == mg_cycle2)
mg_cycle2 = MultiGraphCycle([5, 3, 4, 2], [2, 0, 1, 0])
self.assertTrue(mg_cycle1 == mg_cycle2)
mg_cycle2 = MultiGraphCycle([3, 4, 2, 5], [0, 1, 0, 2])
self.assertTrue(mg_cycle1 == mg_cycle2)
mg_cycle2 = MultiGraphCycle([4, 2, 5, 3], [1, 0, 2, 0])
self.assertTrue(mg_cycle1 == mg_cycle2)
# Test inequality
mg_cycle2 = MultiGraphCycle([2, 5, 3, 4], [0, 1, 0, 1])
self.assertFalse(mg_cycle1 == mg_cycle2)
mg_cycle2 = MultiGraphCycle([2, 5, 3, 4], [1, 0, 2, 0])
self.assertFalse(mg_cycle1 == mg_cycle2)
mg_cycle2 = MultiGraphCycle([3, 5, 2, 4], [1, 0, 2, 0])
self.assertFalse(mg_cycle1 == mg_cycle2)
# Test Self-loop case
self.assertTrue(MultiGraphCycle([2], [1]) == MultiGraphCycle([2], [1]))
self.assertFalse(MultiGraphCycle([1], [1]) == MultiGraphCycle([2], [1]))
self.assertFalse(MultiGraphCycle([2], [1]) == MultiGraphCycle([2], [0]))
self.assertFalse(MultiGraphCycle([2], [1]) == MultiGraphCycle([1], [1]))
self.assertFalse(MultiGraphCycle([2], [0]) == MultiGraphCycle([2], [1]))
# Test special case with two nodes
self.assertTrue(MultiGraphCycle([2, 4], [1, 3]) == MultiGraphCycle([2, 4], [1, 3]))
self.assertTrue(MultiGraphCycle([2, 4], [1, 3]) == MultiGraphCycle([2, 4], [3, 1]))
self.assertTrue(MultiGraphCycle([2, 4], [1, 3]) == MultiGraphCycle([4, 2], [3, 1]))
self.assertTrue(MultiGraphCycle([2, 4], [1, 3]) == MultiGraphCycle([4, 2], [1, 3]))
self.assertFalse(MultiGraphCycle([2, 4], [1, 3]) == MultiGraphCycle([4, 2], [1, 2]))
self.assertFalse(MultiGraphCycle([2, 4], [1, 3]) == MultiGraphCycle([4, 0], [1, 3]))
# Test hashing function
self.assertEqual(hash(mg_cycle1), 4)
self.assertEqual(hash(MultiGraphCycle([0], [0])), 1)
self.assertEqual(hash(MultiGraphCycle([0, 3], [0, 1])), 2)
self.assertEqual(hash(MultiGraphCycle([0, 3, 5, 7, 8], [0, 1, 0, 0, 0])), 5)
# Test as_dict from_dict and len method
mg_cycle = MultiGraphCycle([0, 1, 2, 3], [1, 2, 0, 3])
self.assertTrue(mg_cycle == MultiGraphCycle.from_dict(mg_cycle.as_dict()))
self.assertEqual(len(mg_cycle), 4)
mg_cycle = MultiGraphCycle([2, 5, 3, 4, 1, 0], [0, 0, 2, 0, 1, 0])
self.assertTrue(mg_cycle == MultiGraphCycle.from_dict(mg_cycle.as_dict()))
self.assertEqual(len(mg_cycle), 6)
mg_cycle = MultiGraphCycle([8], [1])
self.assertTrue(mg_cycle == MultiGraphCycle.from_dict(mg_cycle.as_dict()))
self.assertEqual(len(mg_cycle), 1)
# Check the validate method
# Number of nodes and edges do not match
mgc = MultiGraphCycle(
nodes=[
FakeNodeWithEqMethod(1),
FakeNodeWithEqMethod(0),
FakeNodeWithEqMethod(2),
],
edge_indices=[0, 0],
validate=False,
ordered=False,
)
self.assertFalse(mgc.ordered)
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : "
"Number of nodes different from "
"number of edge indices.",
):
mgc.validate(check_strict_ordering=False)
# Empty cycle not valid
mgc = MultiGraphCycle([], edge_indices=[], validate=False, ordered=False)
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : Empty cycle is not valid.",
):
mgc.validate()
# Multi graph cycle with duplicate nodes not valid
mgc = MultiGraphCycle(
[FakeNodeWithEqMethod(1), FakeNodeWithEqMethod(0), FakeNodeWithEqMethod(1)],
edge_indices=[0, 1, 0],
validate=False,
ordered=False,
)
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : Duplicate nodes.",
):
mgc.validate()
# Multi graph cycle with two nodes cannot use the same edge
mgc = MultiGraphCycle(
[FakeNodeWithEqMethod(1), FakeNodeWithEqMethod(0)],
edge_indices=[1, 1],
validate=False,
ordered=False,
)
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : "
"Cycles with two nodes cannot use the same edge for the cycle.",
):
mgc.validate()
# Nodes not sortable
mgc = MultiGraphCycle(
[FakeNodeWithEqMethod(1), FakeNodeWithEqMethod(0), FakeNodeWithEqMethod(2)],
edge_indices=[0, 0, 0],
validate=False,
ordered=False,
)
self.assertFalse(mgc.ordered)
self.assertEqual(
mgc.nodes,
(FakeNodeWithEqMethod(1), FakeNodeWithEqMethod(0), FakeNodeWithEqMethod(2)),
)
mgc.validate(check_strict_ordering=False)
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : The nodes are not sortable.",
):
mgc.validate(check_strict_ordering=True)
# Multi graph cycle with nodes that cannot be strictly ordered
mgc = MultiGraphCycle(
[
FakeNodeWithEqMethodWrongSortable(0),
FakeNodeWithEqMethodWrongSortable(1),
FakeNodeWithEqMethodWrongSortable(2),
FakeNodeWithEqMethodWrongSortable(3),
],
edge_indices=[0, 0, 0, 0],
validate=False,
ordered=False,
)
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : "
"The list of nodes in the cycle cannot be strictly ordered.",
):
mgc.validate(check_strict_ordering=True)
# Check the order method
mgc = MultiGraphCycle(
[
FakeNodeWithEqMethodWrongSortable(0),
FakeNodeWithEqMethodWrongSortable(1),
FakeNodeWithEqMethodWrongSortable(2),
FakeNodeWithEqMethodWrongSortable(3),
],
edge_indices=[0, 0, 0, 0],
validate=False,
ordered=False,
)
mgc.order(raise_on_fail=False)
self.assertFalse(mgc.ordered)
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : "
"The list of nodes in the cycle cannot be strictly ordered.",
):
mgc.order(raise_on_fail=True)
mgc = MultiGraphCycle(
[
FakeNodeWithEqMethod(8),
FakeNodeWithEqMethod(0),
FakeNodeWithEqMethod(3),
FakeNodeWithEqMethod(6),
],
edge_indices=[0, 0, 0, 0],
validate=False,
ordered=False,
)
mgc.order(raise_on_fail=False)
self.assertFalse(mgc.ordered)
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : The nodes are not sortable.",
):
mgc.order(raise_on_fail=True)
mgc = MultiGraphCycle(
[
FakeNodeWithEqLtMethods(8),
FakeNodeWithEqLtMethods(0),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
],
edge_indices=[2, 5, 3, 7],
validate=False,
ordered=False,
)
mgc.order(raise_on_fail=True)
self.assertTrue(mgc.ordered)
self.assertEqual(
mgc.nodes,
(
FakeNodeWithEqLtMethods(0),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
FakeNodeWithEqLtMethods(8),
),
)
self.assertEqual(mgc.edge_indices, (5, 3, 7, 2))
mgc = MultiGraphCycle(
[
FakeNodeWithEqLtMethodsBis(8),
FakeNodeWithEqLtMethods(0),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
],
edge_indices=[2, 5, 3, 7],
validate=False,
ordered=False,
)
mgc.order(raise_on_fail=False)
self.assertFalse(mgc.ordered)
self.assertEqual(
mgc.nodes,
(
FakeNodeWithEqLtMethodsBis(8),
FakeNodeWithEqLtMethods(0),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
),
)
self.assertEqual(mgc.edge_indices, (2, 5, 3, 7))
with self.assertRaisesRegex(
ValueError,
expected_regex="Could not order simple graph cycle as the nodes are of different classes.",
):
mgc.order(raise_on_fail=True)
mgc = MultiGraphCycle(
[FakeNodeWithEqLtMethods(85)],
edge_indices=[7],
validate=False,
ordered=False,
)
self.assertFalse(mgc.ordered)
mgc.order()
self.assertTrue(mgc.ordered)
self.assertEqual(mgc.nodes, tuple([FakeNodeWithEqLtMethods(85)]))
self.assertEqual(mgc.edge_indices, tuple([7]))
mgc = MultiGraphCycle(
[
FakeNodeWithEqLtMethods(8),
FakeNodeWithEqLtMethods(2),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
FakeNodeWithEqLtMethods(4),
FakeNodeWithEqLtMethods(1),
FakeNodeWithEqLtMethods(64),
FakeNodeWithEqLtMethods(32),
],
edge_indices=[2, 0, 4, 1, 0, 3, 5, 2],
validate=False,
ordered=False,
)
self.assertFalse(mgc.ordered)
mgc.order()
self.assertTrue(mgc.ordered)
self.assertEqual(
mgc.nodes,
tuple(
[
FakeNodeWithEqLtMethods(1),
FakeNodeWithEqLtMethods(4),
FakeNodeWithEqLtMethods(3),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(2),
FakeNodeWithEqLtMethods(8),
FakeNodeWithEqLtMethods(32),
FakeNodeWithEqLtMethods(64),
]
),
)
self.assertEqual(mgc.edge_indices, tuple([0, 1, 4, 0, 2, 2, 5, 3]))
# Testing all cases for a length-4 cycle
nodes_ref = tuple(FakeNodeWithEqLtMethods(inode) for inode in [0, 1, 2, 3])
edges_ref = (3, 6, 9, 12)
for inodes, iedges in [
((0, 1, 2, 3), (3, 6, 9, 12)),
((1, 2, 3, 0), (6, 9, 12, 3)),
((2, 3, 0, 1), (9, 12, 3, 6)),
((3, 0, 1, 2), (12, 3, 6, 9)),
((3, 2, 1, 0), (9, 6, 3, 12)),
((2, 1, 0, 3), (6, 3, 12, 9)),
((1, 0, 3, 2), (3, 12, 9, 6)),
((0, 3, 2, 1), (12, 9, 6, 3)),
]:
mgc = MultiGraphCycle(
[FakeNodeWithEqLtMethods(inode) for inode in inodes],
edge_indices=[iedge for iedge in iedges],
)
strnodes = ", ".join([str(i) for i in inodes])
self.assertEqual(
mgc.nodes,
nodes_ref,
msg="Nodes not equal for inodes = ({})".format(", ".join([str(i) for i in inodes])),
)
self.assertEqual(
mgc.edge_indices,
edges_ref,
msg=f"Edges not equal for inodes = ({strnodes})",
)
class EnvironmentNodesGraphUtilsTest(PymatgenTest):
def test_cycle(self):
e1 = EnvironmentNode(central_site="Si", i_central_site=0, ce_symbol="T:4")
e2 = EnvironmentNode(central_site="Si", i_central_site=3, ce_symbol="T:4")
e3 = EnvironmentNode(central_site="Si", i_central_site=2, ce_symbol="T:4")
e4 = EnvironmentNode(central_site="Si", i_central_site=5, ce_symbol="T:4")
e5 = EnvironmentNode(central_site="Si", i_central_site=1, ce_symbol="T:4")
# Tests of SimpleGraphCycle with EnvironmentNodes
c1 = SimpleGraphCycle([e2])
c2 = SimpleGraphCycle([e2])
self.assertEqual(c1, c2)
c1 = SimpleGraphCycle([e1])
c2 = SimpleGraphCycle([e2])
self.assertNotEqual(c1, c2)
c1 = SimpleGraphCycle([e1, e2, e3])
c2 = SimpleGraphCycle([e2, e1, e3])
self.assertEqual(c1, c2)
c2 = SimpleGraphCycle([e2, e3, e1])
self.assertEqual(c1, c2)
c1 = SimpleGraphCycle([e3, e2, e4, e1, e5])
c2 = SimpleGraphCycle([e1, e4, e2, e3, e5])
self.assertEqual(c1, c2)
c2 = SimpleGraphCycle([e2, e3, e5, e1, e4])
self.assertEqual(c1, c2)
c1 = SimpleGraphCycle([e2, e3, e4, e1, e5])
c2 = SimpleGraphCycle([e2, e3, e5, e1, e4])
self.assertNotEqual(c1, c2)
# Tests of MultiGraphCycle with EnvironmentNodes
c1 = MultiGraphCycle([e1], [2])
c2 = MultiGraphCycle([e1], [2])
self.assertEqual(c1, c2)
c2 = MultiGraphCycle([e1], [1])
self.assertNotEqual(c1, c2)
c2 = MultiGraphCycle([e2], [2])
self.assertNotEqual(c1, c2)
c1 = MultiGraphCycle([e1, e2], [0, 1])
c2 = MultiGraphCycle([e1, e2], [1, 0])
self.assertEqual(c1, c2)
c2 = MultiGraphCycle([e2, e1], [1, 0])
self.assertEqual(c1, c2)
c2 = MultiGraphCycle([e2, e1], [0, 1])
self.assertEqual(c1, c2)
c2 = MultiGraphCycle([e2, e1], [2, 1])
self.assertNotEqual(c1, c2)
c1 = MultiGraphCycle([e1, e2, e3], [0, 1, 2])
c2 = MultiGraphCycle([e2, e1, e3], [0, 2, 1])
self.assertEqual(c1, c2)
c2 = MultiGraphCycle([e2, e3, e1], [1, 2, 0])
self.assertEqual(c1, c2)
if __name__ == "__main__":
import unittest
unittest.main()
|
vorwerkc/pymatgen
|
pymatgen/analysis/chemenv/utils/tests/test_graph_utils.py
|
Python
|
mit
| 29,166
|
[
"pymatgen"
] |
8b6e90c30a22bf7864e4e2e4501a2bf129d0142ceba591599e93f63ff99fa5fb
|
from gpaw.utilities import unpack
import numpy as np
from gpaw.mpi import world, rank
from gpaw.utilities.blas import gemm
from gpaw.utilities.timing import Timer
from gpaw.utilities.lapack import inverse_general
from gpaw.transport.tools import get_matrix_index, collect_lead_mat, dot
import copy
import _gpaw
class Banded_Sparse_HSD:
#for lead's hamiltonian, overlap, and density matrix
def __init__(self, dtype, ns, npk, index=None):
self.band_index = index
self.dtype = dtype
self.H = []
self.S = []
self.D = []
self.ns = ns
self.npk = npk
self.s = 0
self.pk = 0
for s in range(ns):
self.H.append([])
self.D.append([])
for k in range(npk):
self.H[s].append([])
self.D[s].append([])
for k in range(npk):
self.S.append([])
def reset(self, s, pk, mat, flag='S', init=False):
assert mat.dtype == self.dtype
if flag == 'S':
spar = self.S
elif flag == 'H':
spar = self.H[s]
elif flag == 'D':
spar = self.D[s]
if not init:
spar[pk].reset(mat)
elif self.band_index != None:
spar[pk] = Banded_Sparse_Matrix(self.dtype, mat, self.band_index)
else:
spar[pk] = Banded_Sparse_Matrix(self.dtype, mat)
self.band_index = spar[pk].band_index
class Banded_Sparse_Matrix:
def __init__(self, dtype, mat=None, band_index=None, tol=1e-9):
self.tol = tol
self.dtype = dtype
self.band_index = band_index
if mat != None:
if band_index == None:
self.initialize(mat)
else:
self.reset(mat)
def initialize(self, mat):
# the indexing way needs mat[0][-1] = 0,otherwise will recover a
# unsymmetric full matrix
assert self.dtype == mat.dtype
#assert mat[0][-1] < self.tol
dim = mat.shape[-1]
ku = -1
kl = -1
mat_sum = np.sum(abs(mat))
spar_sum = 0
while abs(mat_sum - spar_sum) > self.tol * 10:
#ku += 1
#kl += 1
#ud_sum = 1
#dd_sum = 1
#while(ud_sum > self.tol):
# ku += 1
# ud_sum = np.sum(np.diag(abs(mat), ku))
#while(dd_sum > self.tol):
# kl += 1
# dd_sum = np.sum(np.diag(abs(mat), -kl))
#ku -= 1
#kl -= 1
ku = dim
kl = dim
# storage in the tranpose, bacause column major order for zgbsv_ function
length = (kl + ku + 1) * dim - kl * (kl + 1) / 2. - \
ku * (ku + 1) / 2.
self.spar = np.zeros([length], self.dtype)
index1 = []
index2 = []
index0 = np.zeros((dim, 2 * kl + ku + 1), int)
n = 0
for i in range(kl, -1, -1):
for j in range(dim - i):
index1.append(i + j)
index2.append(j)
index0[i + j, 2 * kl - i] = n
n += 1
for i in range(1, ku + 1):
for j in range(dim - i):
index1.append(j)
index2.append(j + i)
index0[j, 2 * kl + i] = n
n += 1
index1 = np.array(index1)
index2 = np.array(index2)
self.band_index = (kl, ku, index0, index1, index2)
self.spar = mat[index1, index2]
spar_sum = np.sum(abs(self.recover()))
def test1(self, n1, n2):
index1 ,index2 = self.band_index[-2:]
for i in range(len(index1)):
if index1[i] == n1 and index2[i] == n2:
print i
def recover(self):
index0, index1, index2 = self.band_index[-3:]
dim = index0.shape[0]
mat = np.zeros([dim, dim], self.dtype)
mat[index1, index2] = self.spar
return mat
def reset(self, mat):
index1, index2 = self.band_index[-2:]
assert self.dtype == mat.dtype
self.spar = mat[index1, index2]
def reset_from_others(self, bds_mm1, bds_mm2, c1, c2):
assert self.dtype == complex
self.spar = c1 * bds_mm1.spar + c2 * bds_mm2.spar
def reset_minus(self, mat, full=False):
assert self.dtype == complex
index1, index2 = self.band_index[-2:]
if full:
self.spar -= mat[index1, index2]
else:
self.spar -= mat.recover()[index1, index2]
def reset_plus(self, mat, full=False):
assert self.dtype == complex
index1, index2 = self.band_index[-2:]
if full:
self.spar += mat[index1, index2]
else:
self.spar += mat.recover()[index1, index2]
def test_inv_speed(self):
full_mat = self.recover()
timer = Timer()
timer.start('full_numpy')
tmp0 = np.linalg.inv(full_mat)
timer.stop('full_numpy')
timer.start('full_lapack')
inverse_general(full_mat)
timer.stop('full_lapack')
timer.start('sparse_lapack')
self.inv()
timer.stop('sparse_lapack')
times = []
methods = ['full_numpy', 'full_lapack', 'sparse_lapack']
for name in methods:
time = timer.timers[name,]
print name, time
times.append(time)
mintime = np.min(times)
self.inv_method = methods[np.argmin(times)]
print 'mintime', mintime
def inv(self):
#kl, ku, index0 = self.band_index[:3]
#dim = index0.shape[0]
#inv_mat = np.eye(dim, dtype=complex)
#ldab = 2*kl + ku + 1
#source_mat = self.spar[index0]
#assert source_mat.flags.contiguous
#info = _gpaw.linear_solve_band(source_mat, inv_mat, kl, ku)
#return inv_mat
return np.linalg.inv(self.recover()).copy()
class Tp_Sparse_HSD:
def __init__(self, dtype, ns, npk, ll_index, ex=True):
self.dtype = dtype
self.ll_index = ll_index
self.extended = ex
self.H = []
self.S = []
self.D = []
self.G = []
self.ns = ns
self.npk = npk
self.s = 0
self.pk = 0
self.band_indices = None
for s in range(ns):
self.H.append([])
self.D.append([])
for k in range(npk):
self.H[s].append([])
self.D[s].append([])
for k in range(npk):
self.S.append([])
self.G = Tp_Sparse_Matrix(complex, self.ll_index,
None, None, self.extended)
def reset(self, s, pk, mat, flag='S', init=False):
if flag == 'S':
spar = self.S
elif flag == 'H':
spar = self.H[s]
elif flag == 'D':
spar = self.D[s]
if not init:
spar[pk].reset(mat)
elif self.band_indices == None:
spar[pk] = Tp_Sparse_Matrix(self.dtype, self.ll_index, mat,
None, self.extended)
self.band_indices = spar[pk].band_indices
else:
spar[pk] = Tp_Sparse_Matrix(self.dtype, self.ll_index, mat,
self.band_indices, self.extended)
def append_lead_as_buffer(self, lead_hsd, lead_couple_hsd, ex_index):
assert self.extended == True
clm = collect_lead_mat
for pk in range(self.npk):
diag_h, upc_h, dwnc_h = clm(lead_hsd, lead_couple_hsd, 0, pk)
self.S[pk].append_ex_mat(diag_h, upc_h, dwnc_h, ex_index)
for s in range(self.ns):
diag_h, upc_h, dwnc_h = clm(lead_hsd,
lead_couple_hsd, s, pk, 'H')
self.H[s][pk].append_ex_mat(diag_h, upc_h, dwnc_h, ex_index)
diag_h, upc_h, dwnc_h = clm(lead_hsd,
lead_couple_hsd, s, pk, 'D')
self.D[s][pk].append_ex_mat(diag_h, upc_h, dwnc_h, ex_index)
def calculate_eq_green_function(self, zp, sigma, ex=True, full=False):
s, pk = self.s, self.pk
self.G.reset_from_others(self.S[pk], self.H[s][pk], zp, -1, init=True)
self.G.substract_sigma(sigma)
if full:
return np.linalg.inv(self.G.recover())
else:
#self.G.test_inv_eq()
self.G.inv_eq()
return self.G.recover(ex)
def calculate_ne_green_function(self, zp, sigma, ffocc, ffvir, ex=True):
s, pk = self.s, self.pk
self.G.reset_from_others(self.S[pk], self.H[s][pk], zp, -1)
self.G.substract_sigma(sigma)
gammaocc = []
gammavir = []
for ff0, ff1, tgt in zip(ffocc, ffvir, sigma):
full_tgt = tgt.recover()
gammaocc.append(ff0 * 1.j * (full_tgt - full_tgt.T.conj()))
gammavir.append(ff1 * 1.j * (full_tgt - full_tgt.T.conj()))
glesser, ggreater = self.G.calculate_non_equilibrium_green(gammaocc,
gammavir, ex)
return glesser, ggreater
def abstract_sub_green_matrix(self, zp, sigma, l1, l2, inv_mat=None):
if inv_mat == None:
s, pk = self.s, self.pk
self.G.reset_from_others(self.S[pk], self.H[s][pk], zp, -1)
self.G.substract_sigma(sigma)
inv_mat = self.G.inv_ne()
gr_sub = inv_mat[l2][l1][-1]
return gr_sub, inv_mat
else:
gr_sub = inv_mat[l2][l1][-1]
return gr_sub
class Tp_Sparse_Matrix:
def __init__(self, dtype, ll_index, mat=None, band_indices=None, ex=True):
# ll_index : lead_layer_index
# matrix stored here will be changed to inversion
self.lead_num = len(ll_index)
self.ll_index = ll_index
self.ex_ll_index = copy.deepcopy(ll_index[:])
self.extended = ex
self.dtype = dtype
self.initialize()
self.band_indices = band_indices
if self.band_indices == None:
self.initialize_band_indices()
if mat != None:
self.reset(mat, True)
def initialize_band_indices(self):
self.band_indices = [None]
for i in range(self.lead_num):
self.band_indices.append([])
for j in range(self.ex_lead_nlayer[i] - 1):
self.band_indices[i + 1].append(None)
def initialize(self):
# diag_h : diagonal lead_hamiltonian
# upc_h : superdiagonal lead hamiltonian
# dwnc_h : subdiagonal lead hamiltonian
self.diag_h = []
self.upc_h = []
self.dwnc_h = []
self.lead_nlayer = []
self.ex_lead_nlayer = []
self.mol_index = self.ll_index[0][0]
self.nl = 1
self.nb = len(self.mol_index)
self.length = self.nb * self.nb
self.mol_h = []
for i in range(self.lead_num):
self.diag_h.append([])
self.upc_h.append([])
self.dwnc_h.append([])
self.lead_nlayer.append(len(self.ll_index[i]))
if self.extended:
self.ex_lead_nlayer.append(len(self.ll_index[i]) + 1)
else:
self.ex_lead_nlayer.append(len(self.ll_index[i]))
assert (self.ll_index[i][0] == self.mol_index).all()
self.nl += self.lead_nlayer[i] - 1
for j in range(self.lead_nlayer[i] - 1):
self.diag_h[i].append([])
self.upc_h[i].append([])
self.dwnc_h[i].append([])
len1 = len(self.ll_index[i][j])
len2 = len(self.ll_index[i][j + 1])
self.length += 2 * len1 * len2 + len2 * len2
self.nb += len2
if self.extended:
self.diag_h[i].append([])
self.upc_h[i].append([])
self.dwnc_h[i].append([])
self.ex_nb = self.nb
def append_ex_mat(self, diag_h, upc_h, dwnc_h, ex_index):
assert self.extended
for i in range(self.lead_num):
self.diag_h[i][-1] = diag_h[i]
self.upc_h[i][-1] = upc_h[i]
self.dwnc_h[i][-1] = dwnc_h[i]
self.ex_ll_index[i].append(ex_index[i])
self.ex_nb += len(ex_index[i])
def abstract_layer_info(self):
self.basis_to_layer = np.empty([self.nb], int)
self.neighbour_layers = np.zeros([self.nl, self.lead_num], int) - 1
for i in self.mol_index:
self.basis_to_layer[i] = 0
nl = 1
for i in range(self.lead_num):
for j in range(self.lead_nlayer[i] - 1):
for k in self.ll_index[i][j]:
self.basis_to_layer[k] = nl
nl += 1
nl = 1
for i in range(self.lead_num):
self.neighbour_layers[0][i] = nl
first = nl
for j in range(self.lead_nlayer[i] - 1):
if nl == first:
self.neighbour_layers[nl][0] = 0
if j != self.lead_nlayer[i] - 2:
self.neighbour_layers[nl][1] = nl + 1
else:
self.neighbour_layers[nl][0] = nl - 1
if j != self.lead_nlayer[i] - 2:
self.neighbour_layers[nl][1] = nl + 1
nl += 1
def reset(self, mat, init=False):
assert mat.dtype == self.dtype
ind = get_matrix_index(self.mol_index)
if init:
self.mol_h = Banded_Sparse_Matrix(self.dtype, mat[ind.T, ind],
self.band_indices[0])
if self.band_indices[0] == None:
self.band_indices[0] = self.mol_h.band_index
else:
self.mol_h.reset(mat[ind.T, ind])
for i in range(self.lead_num):
for j in range(self.lead_nlayer[i] - 1):
ind = get_matrix_index(self.ll_index[i][j])
ind1 = get_matrix_index(self.ll_index[i][j + 1])
indr1, indc1 = get_matrix_index(self.ll_index[i][j],
self.ll_index[i][j + 1])
indr2, indc2 = get_matrix_index(self.ll_index[i][j + 1],
self.ll_index[i][j])
if init:
self.diag_h[i][j] = Banded_Sparse_Matrix(self.dtype,
mat[ind1.T, ind1],
self.band_indices[i + 1][j])
if self.band_indices[i + 1][j] == None:
self.band_indices[i + 1][j] = \
self.diag_h[i][j].band_index
else:
self.diag_h[i][j].reset(mat[ind1.T, ind1])
self.upc_h[i][j] = mat[indr1, indc1]
self.dwnc_h[i][j] = mat[indr2, indc2]
def reset_from_others(self, tps_mm1, tps_mm2, c1, c2, init=False):
#self.mol_h = c1 * tps_mm1.mol_h + c2 * tps_mm2.mol_h
if init:
self.mol_h = Banded_Sparse_Matrix(complex)
self.mol_h.spar = c1 * tps_mm1.mol_h.spar + c2 * tps_mm2.mol_h.spar
self.mol_h.band_index = tps_mm1.mol_h.band_index
self.ex_lead_nlayer = tps_mm1.ex_lead_nlayer
self.ex_ll_index = tps_mm1.ex_ll_index
self.ex_nb = tps_mm1.ex_nb
for i in range(self.lead_num):
for j in range(self.ex_lead_nlayer[i] - 1):
assert (tps_mm1.ex_ll_index[i][j] == tps_mm2.ex_ll_index[i][j]).all()
if init:
self.diag_h[i][j] = Banded_Sparse_Matrix(complex)
self.diag_h[i][j].band_index = \
tps_mm1.diag_h[i][j].band_index
self.diag_h[i][j].spar = c1 * tps_mm1.diag_h[i][j].spar + \
c2 * tps_mm2.diag_h[i][j].spar
self.upc_h[i][j] = c1 * tps_mm1.upc_h[i][j] + \
c2 * tps_mm2.upc_h[i][j]
self.dwnc_h[i][j] = c1 * tps_mm1.dwnc_h[i][j] + \
c2 * tps_mm2.dwnc_h[i][j]
def substract_sigma(self, sigma):
if self.extended:
n = -2
else:
n = -1
for i in range(self.lead_num):
self.diag_h[i][n].reset_minus(sigma[i])
def recover(self, ex=False):
if ex:
nb = self.ex_nb
lead_nlayer = self.ex_lead_nlayer
ll_index = self.ex_ll_index
else:
nb = self.nb
lead_nlayer = self.lead_nlayer
ll_index = self.ll_index
mat = np.zeros([nb, nb], self.dtype)
ind = get_matrix_index(ll_index[0][0])
mat[ind.T, ind] = self.mol_h.recover()
gmi = get_matrix_index
for i in range(self.lead_num):
for j in range(lead_nlayer[i] - 1):
ind = gmi(ll_index[i][j])
ind1 = gmi(ll_index[i][j + 1])
indr1, indc1 = gmi(ll_index[i][j], ll_index[i][j + 1])
indr2, indc2 = gmi(ll_index[i][j + 1], ll_index[i][j])
mat[ind1.T, ind1] = self.diag_h[i][j].recover()
mat[indr1, indc1] = self.upc_h[i][j]
mat[indr2, indc2] = self.dwnc_h[i][j]
return mat
def test_inv_eq(self, tol=1e-9):
tp_mat = copy.deepcopy(self)
tp_mat.inv_eq()
mol_h = dot(tp_mat.mol_h.recover(), self.mol_h.recover())
for i in range(self.lead_num):
mol_h += dot(tp_mat.upc_h[i][0], self.dwnc_h[i][0])
diff = np.max(abs(mol_h - np.eye(mol_h.shape[0])))
if diff > tol:
print 'warning, mol_diff', diff
for i in range(self.lead_num):
for j in range(self.lead_nlayer[i] - 2):
diag_h = dot(tp_mat.diag_h[i][j].recover(),
self.diag_h[i][j].recover())
diag_h += dot(tp_mat.dwn_h[i][j], self.upc_h[i][j])
diag_h += dot(tp_mat.upc_h[i][j + 1], self.dwnc_h[i][j + 1])
diff = np.max(abs(diag_h - np.eye(diag_h.shape[0])))
if diff > tol:
print 'warning, diag_diff', i, j, diff
j = self.lead_nlayer[i] - 2
diag_h = dot(tp_mat.diag_h[i][j].recover(),
self.diag_h[i][j].recover())
diag_h += dot(tp_mat.dwnc_h[i][j], self.upc_h[i][j])
diff = np.max(abs(diag_h - np.eye(diag_h.shape[0])))
if diff > tol:
print 'warning, diag_diff', i, j, diff
def inv_eq(self):
q_mat = []
for i in range(self.lead_num):
q_mat.append([])
nll = self.lead_nlayer[i]
for j in range(nll - 1):
q_mat[i].append([])
end = nll - 2
q_mat[i][end] = self.diag_h[i][end].inv()
for j in range(end - 1, -1, -1):
self.diag_h[i][j].reset_minus(self.dotdot(
self.upc_h[i][j + 1],
q_mat[i][j + 1],
self.dwnc_h[i][j + 1]), full=True)
q_mat[i][j] = self.diag_h[i][j].inv()
h_mm = self.mol_h
for i in range(self.lead_num):
h_mm.reset_minus(self.dotdot(self.upc_h[i][0], q_mat[i][0],
self.dwnc_h[i][0]), full=True)
inv_h_mm = h_mm.inv()
h_mm.reset(inv_h_mm)
for i in range(self.lead_num):
tmp_dc = self.dwnc_h[i][0].copy()
self.dwnc_h[i][0] = -self.dotdot(q_mat[i][0], tmp_dc, inv_h_mm)
self.upc_h[i][0] = -self.dotdot(inv_h_mm, self.upc_h[i][0],
q_mat[i][0])
dim = len(self.ll_index[i][1])
self.diag_h[i][0].reset(dot(q_mat[i][0], np.eye(dim) -
dot(tmp_dc, self.upc_h[i][0])))
for j in range(1, self.lead_nlayer[i] - 1):
tmp_dc = self.dwnc_h[i][j].copy()
self.dwnc_h[i][j] = -self.dotdot(q_mat[i][j], tmp_dc,
self.diag_h[i][j - 1].recover())
self.upc_h[i][j] = -self.dotdot(self.diag_h[i][j - 1].recover(),
self.upc_h[i][j],
q_mat[i][j])
dim = len(self.ll_index[i][j + 1])
self.diag_h[i][j].reset(dot(q_mat[i][j], np.eye(dim) -
dot(tmp_dc, self.upc_h[i][j])))
def inv_ne(self):
q_mat = []
qi_mat = []
inv_mat = []
#structure of inv_mat inv_cols_1, inv_cols_2, ..., inv_cols_n (n:lead_num)
#structure of inv_cols_i inv_cols_l1, inv_cols_l2,..., inv_cols_ln, inv_cols_mm(matrix)
#structure of inv_cols_li inv_cols_ll1, inv_cols_ll2,...,inv_cols_ll3
for i in range(self.lead_num):
q_mat.append([])
qi_mat.append([])
inv_mat.append([])
nll = self.lead_nlayer[i]
for j in range(nll - 1):
q_mat[i].append([])
qi_mat[i].append([])
for j in range(self.lead_num):
inv_mat[i].append([])
nll_j = self.lead_nlayer[j]
for k in range(nll_j - 1):
inv_mat[i][j].append([])
inv_mat[i].append([])
end = nll - 2
q_mat[i][end] = self.diag_h[i][end].inv()
for j in range(end - 1, -1, -1):
tmp_diag_h = copy.deepcopy(self.diag_h[i][j])
tmp_diag_h.reset_minus(self.dotdot(self.upc_h[i][j + 1],
q_mat[i][j + 1],
self.dwnc_h[i][j + 1]),
full=True)
q_mat[i][j] = tmp_diag_h.inv()
# above get all the q matrix, then if want to solve the cols
# cooresponding to the lead i, the q_mat[i] will not be used
#q_mm = self.mol_h.recover()
q_mm = copy.deepcopy(self.mol_h)
for i in range(self.lead_num):
#q_mm -= dot(dot(self.upc_h[i][0], q_mat[i][0]),
# self.dwnc_h[i][0])
q_mm.reset_minus(self.dotdot(self.upc_h[i][0],
q_mat[i][0], self.dwnc_h[i][0]), full=True)
for i in range(self.lead_num):
# solve the corresponding cols to the lead i
nll = self.lead_nlayer[i]
#qi_mat[i][0] = q_mm + self.dotdot(self.upc_h[i][0],q_mat[i][0],
# self.dwnc_h[i][0])
q_mm_tmp = copy.deepcopy(q_mm)
q_mm_tmp.reset_plus(self.dotdot(self.upc_h[i][0],q_mat[i][0],
self.dwnc_h[i][0]), full=True)
#inv(qi_mat[i][0])
qi_mat[i][0] = q_mm_tmp.inv()
for j in range(1, nll - 1):
tmp_diag_h = copy.deepcopy(self.diag_h[i][j - 1])
tmp_diag_h.reset_minus(self.dotdot(self.dwnc_h[i][j -1],
qi_mat[i][j - 1],
self.upc_h[i][j - 1]),
full=True)
qi_mat[i][j] = tmp_diag_h.inv()
tmp_diag_h = copy.deepcopy(self.diag_h[i][nll - 2])
tmp_diag_h.reset_minus(self.dotdot(self.dwnc_h[i][nll - 2],
qi_mat[i][nll -2],
self.upc_h[i][nll -2]),
full=True)
inv_mat[i][i][nll - 2] = tmp_diag_h.inv()
for j in range(nll - 3, -1, -1):
inv_mat[i][i][j] = -self.dotdot(qi_mat[i][j + 1],
self.upc_h[i][j + 1],
inv_mat[i][i][j + 1])
inv_mat[i][self.lead_num] = -self.dotdot(qi_mat[i][0],
self.upc_h[i][0],
inv_mat[i][i][0])
for j in range(self.lead_num):
if j != i:
nlj = self.lead_nlayer[j]
inv_mat[i][j][0] = -self.dotdot(q_mat[j][0], self.dwnc_h[j][0],
inv_mat[i][self.lead_num])
for k in range(1, nlj - 1):
inv_mat[i][j][k] = -self.dotdot(q_mat[j][k], self.dwnc_h[j][k],
inv_mat[i][j][k - 1])
return inv_mat
def combine_inv_mat(self, inv_mat):
nb = self.nb
mat = np.zeros([nb, nb], complex)
for i in range(self.lead_num):
indr, indc = get_matrix_index(self.ll_index[i][0],
self.ll_index[i][-1])
mat[indr, indc] = inv_mat[i][self.lead_num]
for j in range(self.lead_num):
for k in range(1, self.lead_nlayer[j]):
indr, indc = get_matrix_index(self.ll_index[j][k],
self.ll_index[i][-1])
mat[indr, indc] = inv_mat[i][j][k - 1]
return mat
def dotdot(self, mat1, mat2, mat3):
return dot(mat1, dot(mat2, mat3))
def calculate_non_equilibrium_green(self, se_less, se_great, ex=True):
inv_mat = self.inv_ne()
glesser = self.calculate_keldysh_green(inv_mat, se_less, ex)
ggreater = self.calculate_keldysh_green(inv_mat, se_great, ex)
return glesser, ggreater
def calculate_keldysh_green(self, inv_mat, keldysh_se, ex=True):
#se_less less selfenergy, structure se_1, se_2, se_3,..., se_n
#the lead sequence of se_less should be the same to self.ll_index
self.mol_h.spar.fill(0.0)
for i in range(self.lead_num):
nll = self.lead_nlayer[i]
for j in range(nll - 1):
self.diag_h[i][j].spar.fill(0.0)
self.upc_h[i][j].fill(0.0)
self.dwnc_h[i][j].fill(0.0)
for i in range(self.lead_num):
# less selfenergy loop
self.mol_h.reset_plus(self.dotdot(inv_mat[i][self.lead_num],
keldysh_se[i],
inv_mat[i][self.lead_num].T.conj()),
full=True)
for j in range(self.lead_num):
# matrix operation loop
nlj = self.lead_nlayer[j]
self.diag_h[j][0].reset_plus(self.dotdot(inv_mat[i][j][0],
keldysh_se[i],
inv_mat[i][j][0].T.conj()),
full=True)
self.dwnc_h[j][0] += self.dotdot(inv_mat[i][j][0], keldysh_se[i],
inv_mat[i][self.lead_num].T.conj())
self.upc_h[j][0] += self.dotdot(inv_mat[i][self.lead_num],
keldysh_se[i],
inv_mat[i][j][0].T.conj())
for k in range(1, nlj -1):
self.diag_h[j][k].reset_plus(self.dotdot(inv_mat[i][j][k],
keldysh_se[i],
inv_mat[i][j][k].T.conj()),
full=True)
self.dwnc_h[j][k] += self.dotdot(inv_mat[i][j][k],
keldysh_se[i],
inv_mat[i][j][k - 1].T.conj())
self.upc_h[j][k] += self.dotdot(inv_mat[i][j][k - 1],
keldysh_se[i],
inv_mat[i][j][k].T.conj())
return self.recover(ex)
def test_inv_speed(self):
full_mat = self.recover()
timer = Timer()
timer.start('full_numpy')
tmp0 = np.linalg.inv(full_mat)
timer.stop('full_numpy')
timer.start('full_lapack')
inverse_general(full_mat)
timer.stop('full_lapack')
timer.start('sparse_lapack')
self.inv_eq()
timer.stop('sparse_lapack')
timer.start('sparse_lapack_ne')
self.inv_ne()
timer.stop('sparse_lapack_ne')
times = []
methods = ['full_numpy', 'full_lapack', 'sparse_lapack']
for name in methods:
time = timer.timers[name,]
print name, time
times.append(time)
mintime = np.min(times)
self.inv_method = methods[np.argmin(times)]
print 'mintime', mintime
print 'sparse_lapack_ne', timer.timers['sparse_lapack_ne',]
class CP_Sparse_HSD:
def __init__(self, dtype, ns, npk, index=None):
self.index = index
self.dtype = dtype
self.H = []
self.S = []
self.D = []
self.ns = ns
self.npk = npk
self.s = 0
self.pk = 0
for s in range(ns):
self.H.append([])
self.D.append([])
for k in range(npk):
self.H[s].append([])
self.D[s].append([])
for k in range(npk):
self.S.append([])
def reset(self, s, pk, mat, flag='S', init=False):
assert mat.dtype == self.dtype
if flag == 'S':
spar = self.S
elif flag == 'H':
spar = self.H[s]
elif flag == 'D':
spar = self.D[s]
if not init:
spar[pk].reset(mat)
elif self.index != None:
spar[pk] = CP_Sparse_Matrix(self.dtype, mat, self.index)
else:
spar[pk] = CP_Sparse_Matrix(self.dtype, mat)
self.index = spar[pk].index
class CP_Sparse_Matrix:
def __init__(self, dtype, mat=None, index=None, flag=None, tol=1e-9):
self.tol = tol
self.index = index
self.dtype = dtype
self.flag = flag
if mat != None:
if self.index == None:
self.initialize(mat)
else:
self.reset(mat)
def initialize(self, mat):
assert self.dtype == mat.dtype
dim = mat.shape[-1]
ud_array = np.empty([dim])
dd_array = np.empty([dim])
for i in range(dim):
ud_array[i] = np.sum(abs(np.diag(mat, i)))
dd_array[i] = np.sum(abs(np.diag(mat, -i)))
spar_sum = 0
mat_sum = np.sum(abs(mat))
if np.sum(abs(ud_array)) > np.sum(abs(dd_array)):
self.flag = 'U'
i = -1
while abs(mat_sum - spar_sum) > self.tol * 10:
i += 1
while ud_array[i] < self.tol and i < dim - 1:
i += 1
self.index = (i, dim)
ldab = dim - i
self.spar = mat[:ldab, i:].copy()
spar_sum = np.sum(abs(self.spar))
else:
self.flag = 'L'
i = -1
while abs(mat_sum - spar_sum) > self.tol * 10:
i += 1
while dd_array[i] < self.tol and i < dim - 1:
i += 1
self.index = (-i, dim)
ldab = dim - i
self.spar = mat[i:, :ldab].copy()
spar_sum = np.sum(abs(self.spar))
def reset(self, mat):
assert mat.dtype == self.dtype and mat.shape[-1] == self.index[1]
dim = mat.shape[-1]
if self.index[0] > 0:
ldab = dim - self.index[0]
self.spar = mat[:ldab, self.index[0]:].copy()
else:
ldab = dim + self.index[0]
self.spar = mat[-self.index[0]:, :ldab].copy()
def recover(self, trans='n'):
nb = self.index[1]
mat = np.zeros([nb, nb], self.dtype)
if self.index[0] > 0:
ldab = nb - self.index[0]
mat[:ldab, self.index[0]:] = self.spar
else:
ldab = nb + self.index[0]
mat[-self.index[0]:, :ldab] = self.spar
if trans == 'c':
if self.dtype == float:
mat = mat.T.copy()
else:
mat = mat.T.conj()
return mat
class Se_Sparse_Matrix:
def __init__(self, mat, tri_type, nn=None, tol=1e-9):
# coupling sparse matrix A_ij!=0 if i>dim-nn and j>dim-nn (for right selfenergy)
# or A_ij!=0 if i<nn and j<nn (for left selfenergy, dim is the shape of A)
self.tri_type = tri_type
self.tol = tol
self.nb = mat.shape[-1]
self.spar = []
if nn == None:
self.initialize(mat)
else:
self.reset(mat, nn)
def initialize(self, mat):
self.nn = 0
nb = self.nb
tol = self.tol
if self.tri_type == 'L':
while self.nn < nb and np.sum(abs(mat[self.nn])) > tol:
self.nn += 1
self.spar = mat[:self.nn, :self.nn].copy()
else:
while self.nn < nb and np.sum(abs(mat[nb - self.nn - 1])) > tol:
self.nn += 1
self.spar = mat[-self.nn:, -self.nn:].copy()
diff = abs(np.sum(abs(mat)) - np.sum(abs(self.spar)))
if diff > tol * 10:
print 'Warning! Sparse Matrix Diff', diff
def reset(self, mat, nn=None):
if nn != None:
self.nn = nn
if self.tri_type == 'L':
self.spar = mat[:self.nn, :self.nn].copy()
else:
self.spar = mat[-self.nn:, -self.nn:].copy()
def restore(self):
mat = np.zeros([self.nb, self.nb], complex)
if self.tri_type == 'L':
mat[:self.nn, :self.nn] = self.spar
else:
mat[-self.nn:, -self.nn:] = self.spar
return mat
|
ajylee/gpaw-rtxs
|
gpaw/transport/sparse_matrix.py
|
Python
|
gpl-3.0
| 35,877
|
[
"GPAW"
] |
f6c036bc67f1bd933f785e72a7254bfe9b1cdb741e6d69b688b5e840f0852872
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 - 2013 Daniel Reis
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Project Issue related Tasks',
'summary': 'Use Tasks to support Issue resolution reports',
'version': '1.1',
'category': 'Project Management',
'description': """\
Support for the use case where solving an Issue means a Task should be done,
such as an on site visit, and a report must be made to document the work done.
This is a common scenario in technical field services.
The Issue form already has a "Task" field, allowing to create a Task related
to an Issue.
This module adds some usability improvements:
* "Create Task" button on the Issue form
* Automaticaly Close the Issue when the Task is Closed
* Automatically Cancel the Task when Issue is Cancelled
* Make the Task also visible to all followers of the related Issue
""",
'author': "Daniel Reis,Odoo Community Association (OCA)",
'license': 'AGPL-3',
'depends': [
'project_issue',
],
'data': [
'project_issue_view.xml',
'project_task_cause_view.xml',
'project_task_view.xml',
'security/ir.model.access.csv',
'security/project_security.xml',
],
'installable': True,
}
|
raycarnes/project
|
project_issue_task/__openerp__.py
|
Python
|
agpl-3.0
| 2,070
|
[
"VisIt"
] |
0b4060307639e9589767809cdee46244058a3c9d0cff50feec4f24235d202fa3
|
compliments = [
"You have very smooth hair.",
"You deserve a promotion.",
"Good effort!",
"What a fine sweater!",
"I appreciate all of your opinions.",
"I like your style.",
"Your T-shirt smells fresh.",
"I love what you've done with the place.",
"You are like a spring flower; beautiful and vivacious.",
"I am utterly disarmed by your wit.",
"I really enjoy the way you pronounce the word 'ruby'.",
"You complete me.",
"Well done!",
"I like your Facebook status.",
"That looks nice on you.",
"I like those shoes more than mine.",
"Nice motor control!",
"You have a good taste in websites.",
"Your mouse told me that you have very soft hands.",
"You are full of youth.",
"I like your jacket.",
"I like the way you move.",
"You have a good web-surfing stance.",
"You should be a poster child for poster children.",
"Nice manners!",
"I appreciate you more than Santa appreciates chimney grease.",
"I wish I was your mirror.",
"I find you to be a fountain of inspiration.",
"You have perfect bone structure.",
"I disagree with anyone who disagrees with you.",
"Way to go!",
"Have you been working out?",
"With your creative wit, I'm sure you could come up with better compliments than me.",
"I like your socks.",
"You are so charming.",
"Your cooking reminds me of my mother's.",
"You're tremendous!",
"You deserve a compliment!",
"Hello, good looking.",
"Your smile is breath taking.",
"How do you get your hair to look that great?",
"You are quite strapping.",
"I am grateful to be blessed by your presence.",
"Say, aren't you that famous model from TV?",
"Take a break; you've earned it.",
"Your life is so interesting!",
"The sound of your voice sends tingles of joy down my back.",
"I enjoy spending time with you.",
"I would share my dessert with you.",
"You can have the last bite.",
"May I have this dance?",
"I would love to visit you, but I live on the Internet.",
"I love the way you click.",
"You're invited to my birthday party.",
"All of your ideas are brilliant!",
"If I freeze, it's not a computer virus. I was just stunned by your beauty.",
"You're spontaneous, and I love it!",
"You should try out for everything.",
"You make my data circuits skip a beat.",
"You are the gravy to my mashed potatoes.",
"You get an A+!",
"I'm jealous of the other websites you visit, because I enjoy seeing you so much!",
"I would enjoy a roadtrip with you.",
"If I had to choose between you or Mr. Rogers, it would be you.",
"I like you more than the smell of Grandma's home-made apple pies.",
"You would look good in glasses OR contacts.",
"Let's do this again sometime.",
"You could go longer without a shower than most people.",
"I feel the need to impress you.",
"I would trust you to pick out a pet fish for me.",
"I'm glad we met.",
"Do that again!",
"Will you sign my yearbook?",
"You're so smart!",
"We should start a band.",
"You're cooler than ice-skating Fonzi.",
"I made this website for you.",
"I heard you make really good French Toast.",
"You're cooler than Pirates and Ninjas combined.",
"Oh, I can keep going.",
"I like your pants.",
"You're pretty groovy, dude.",
"When I grow up, I want to be just like you.",
"I told all my friends about how cool you are.",
"You can play any prank, and get away with it.",
"You have ten of the best fingers I have ever seen!",
"I can tell that we are gonna be friends.",
"I just want to gobble you up!",
"You're sweeter than than a bucket of bon-bons!",
"Treat yourself to another compliment!",
"You're pretty high on my list of people with whom I would want to be stranded on an island.",
"You're #1 in my book!",
"Well played.",
"You are well groomed.",
"You could probably lead a rebellion.",
"Is it hot in here or is it just you?",
"<3",
"You are more fun than a Japanese steakhouse.",
"Your voice is more soothing than Morgan Freeman's.",
"I like your sleeves. They're real big.",
"You could be drinking whole milk if you wanted to.",
"You're so beautiful, you make me walk into things when I look at you.",
"I support all of your decisions.",
"You are as fun as a hot tub full of chocolate pudding.",
"I usually don't say this on a first date, but will you marry me?",
"I don't speak much English, but with you all I really need to say is beautiful.",
"Being awesome is hard, but you'll manage.",
"Your skin is radiant.",
"You will still be beautiful when you get older.",
"You could survive a zombie apocalypse.",
"You make me :)",
"I wish I could move your furniture.",
"I think about you while I'm on the toilet.",
"You're so rad.",
"You're more fun than a barrel of monkeys.",
"You're nicer than a day on the beach.",
"Your glass is the fullest.",
"I find you very relevant.",
"You look so perfect.",
"The only difference between exceptional and amazing is you.",
"Last night I had the hiccups, and the only thing that comforted me to sleep was repeating your name over and over.",
"I like your pearly whites!",
"Your eyebrows really make your pretty eyes stand out.",
"Shall I compare thee to a summer's day? Thou art more lovely and more temperate.",
"I love you more than bacon!",
"You intrigue me.",
"You make me think of beautiful things, like strawberries.",
"I would share my fruit Gushers with you.",
"You're more aesthetically pleasant to look at than that one green color on this website.",
"Even though this goes against everything I know, I think I'm in love with you.",
"You're more fun than bubble wrap.",
"Your smile could illuminate the depths of the ocean.",
"You make babies smile.",
"You make the gloomy days a little less gloomy.",
"You are warmer than a Snuggie.",
"You make me feel like I am on top of the world.",
"Playing video games with you would be fun.",
"Let's never stop hanging out.",
"You're more cuddly than the Downy Bear.",
"I would do your taxes any day.",
"You are a bucket of awesome.",
"You are the star of my daydreams.",
"If you really wanted to, you could probably get a bird to land on your shoulder and hang out with you.",
"My mom always asks me why I can't be more like you.",
"You look great in this or any other light.",
"You listen to the coolest music.",
"You and Chuck Norris are on equal levels.",
"Your body fat percentage is perfectly suited for your height.",
"I am having trouble coming up with a compliment worthy enough for you.",
"If we were playing kickball, I'd pick you first.",
"You're cooler than ice on the rocks.",
"You're the bee's knees.",
"I wish I could choose your handwriting as a font.",
"You definitely know the difference between your and you're.",
"You have good taste.",
"I named all my appliances after you.",
"Your mind is a maze of amazing!",
"Don't worry about procrastinating on your studies, I know you'll do great!",
"I like your style!",
"Hi, I'd like to know why you're so beautiful.",
"If I could count the seconds I think about you, I will die in the process!",
"If you were in a chemistry class with me, it would be 10x less boring.",
"If you broke your arm, I would carry your books for you.",
"I love the way your eyes crinkle at the corners when you smile.",
"You make me want to be the person I am capable of being.",
"You're a skilled driver.",
"You are the rare catalyst to my volatile compound.",
"You're a tall glass of water!",
"I'd like to kiss you. Often.",
"You are the wind beneath my wings.",
"Looking at you makes my foot cramps go away instantaneously.",
"I like your face.",
"You are a champ!",
"You are infatuating.",
"Even my cat likes you.",
"There isn't a thing about you that I don't like.",
"You're so cool, that on a scale of from 1-10, you're elevendyseven.",
"OH, you OWN that ponytail.",
"Your shoes are untied. But for you, it's cool.",
"You have the best laugh ever.",
"We would enjoy a cookout with you!",
"Your name is fun to say.",
"I love you more than a drunk college student loves tacos.",
"My camera isn't worthy to take your picture.",
"You are the sugar on my rice krispies.",
"Nice belt!",
"I could hang out with you for a solid year and never get tired of you.",
"You're real happening in a far out way.",
"I bet you could take a punch from Mike Tyson.",
"Your feet are perfect size!",
"You have very nice teeth.",
"Can you teach me how to be as awesome as you?",
"Our awkward silences aren't even awkward.",
"Don't worry. You'll do great.",
"I enjoy you more than a good sneeze. A GOOD one.",
"You could invent words and people would use them.",
"You have powerful sweaters.",
"If you were around, I would enjoy doing my taxes.",
"You look like you like to rock.",
"You are better than unicorns and sparkles combined!",
"You are the watermelon in my fruit salad. Yum!",
"I dig you.",
"You look better whether the lights are on or off.",
"I am enchanted to meet you.",
"I bet even your farts smell good.",
"I would trust my children with you.",
"You make me forget what I was going to...",
"Your smile makes me smile.",
"I'd wake up for an 8 a.m. class just so I could sit next to you.",
"You have the moves like Jagger.",
"You're so hot that you denature my proteins.",
"All I want for Christmas is you!",
"You are the world's greatest hugger.",
"You have a perfectly symmetrical face.",
"If you were in a movie you wouldn't get killed off.",
"Your red ruby lips and wiggly hips make me do flips!",
"I definitely wouldn't kick you out of bed.",
"They should name an ice cream flavor after you.",
"You're the salsa to my tortilla chips. You spice up my life!",
"You smell nice.",
"You don't need make-up, make-up needs you.",
"Me without you is like a nerd without braces, a shoe with out laces, asentencewithoutspaces.",
"Just knowing someone as cool as you will read this makes me smile.",
"I would volunteer to take your place in the Hunger Games.",
"If I had a nickel for everytime you did something stupid, I'd be broke!",
"I'd let you steal the white part of my Oreo.",
"I'd trust you to perform open heart surgery on me... blindfolded!",
"Nice butt! - According to your toilet seat",
"Perfume strives to smell like you.",
"I've had the time of my life, and I owe it all to you!",
"The Force is strong with you.",
"I like the way your nostrils are placed on your nose.",
"I would hold the elevator doors open for you if they were closing.",
"Your every thought and motion contributes to the beauty of the universe.",
"You make me want to frolic in a field.",
]
|
nerdzeu/NERDZCrush
|
mediacrush/mcmanage/compliments.py
|
Python
|
mit
| 11,200
|
[
"VisIt"
] |
50800828cac3f7a26b9860241715cc87a70c75d789bd5637b537eb20541659cd
|
"""Utilities to lazily create and visit candidates found.
Creating and visiting a candidate is a *very* costly operation. It involves
fetching, extracting, potentially building modules from source, and verifying
distribution metadata. It is therefore crucial for performance to keep
everything here lazy all the way down, so we only touch candidates that we
absolutely need, and not "download the world" when we only need one version of
something.
"""
import itertools
from pip._vendor.six.moves import collections_abc # type: ignore
from pip._internal.utils.compat import lru_cache
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Callable, Iterator, Optional
from .base import Candidate
def _insert_installed(installed, others):
# type: (Candidate, Iterator[Candidate]) -> Iterator[Candidate]
"""Iterator for ``FoundCandidates``.
This iterator is used when the resolver prefers to upgrade an
already-installed package. Candidates from index are returned in their
normal ordering, except replaced when the version is already installed.
The implementation iterates through and yields other candidates, inserting
the installed candidate exactly once before we start yielding older or
equivalent candidates, or after all other candidates if they are all newer.
"""
installed_yielded = False
for candidate in others:
# If the installed candidate is better, yield it first.
if not installed_yielded and installed.version >= candidate.version:
yield installed
installed_yielded = True
yield candidate
# If the installed candidate is older than all other candidates.
if not installed_yielded:
yield installed
class FoundCandidates(collections_abc.Sequence):
"""A lazy sequence to provide candidates to the resolver.
The intended usage is to return this from `find_matches()` so the resolver
can iterate through the sequence multiple times, but only access the index
page when remote packages are actually needed. This improve performances
when suitable candidates are already installed on disk.
"""
def __init__(
self,
get_others, # type: Callable[[], Iterator[Candidate]]
installed, # type: Optional[Candidate]
prefers_installed, # type: bool
):
self._get_others = get_others
self._installed = installed
self._prefers_installed = prefers_installed
def __getitem__(self, index):
# type: (int) -> Candidate
# Implemented to satisfy the ABC check. This is not needed by the
# resolver, and should not be used by the provider either (for
# performance reasons).
raise NotImplementedError("don't do this")
def __iter__(self):
# type: () -> Iterator[Candidate]
if not self._installed:
return self._get_others()
others = (
candidate
for candidate in self._get_others()
if candidate.version != self._installed.version
)
if self._prefers_installed:
return itertools.chain([self._installed], others)
return _insert_installed(self._installed, others)
def __len__(self):
# type: () -> int
# Implemented to satisfy the ABC check. This is not needed by the
# resolver, and should not be used by the provider either (for
# performance reasons).
raise NotImplementedError("don't do this")
@lru_cache(maxsize=1)
def __bool__(self):
# type: () -> bool
if self._prefers_installed and self._installed:
return True
return any(self)
__nonzero__ = __bool__ # XXX: Python 2.
|
pantsbuild/pex
|
pex/vendor/_vendored/pip/pip/_internal/resolution/resolvelib/found_candidates.py
|
Python
|
apache-2.0
| 3,773
|
[
"VisIt"
] |
fe8f650a291e70cffad64825e7108a001caffe169110596b735a91a32541f6db
|
#!/usr/bin/env python
import sys
import os
import pdb
# Locate MOOSE directory
MOOSE_DIR = os.getenv('MOOSE_DIR', os.path.join(os.getcwd(), '..', 'moose'))
if not os.path.exists(MOOSE_DIR):
MOOSE_DIR = os.path.join(os.getenv('HOME'), 'projects', 'moose')
if not os.path.exists(MOOSE_DIR):
raise Exception('Failed to locate MOOSE, specify the MOOSE_DIR environment variable.')
# Append MOOSE python directory
MOOSE_PYTHON_DIR = os.path.join(MOOSE_DIR, 'python')
if MOOSE_PYTHON_DIR not in sys.path:
sys.path.append(MOOSE_PYTHON_DIR)
import MooseDocs
MooseDocs.MOOSE_DIR = MOOSE_DIR
if __name__ == '__main__':
sys.exit(MooseDocs.moosedocs())
|
paulthulstrup/moose
|
docs/moosedocs.py
|
Python
|
lgpl-2.1
| 651
|
[
"MOOSE"
] |
cc85b1743eeca04120c844f1f75b3d71de47dafa8787c4506de7248b2ab6a33f
|
'''==============================
Long non-coding RNA pipeline
==============================
Overview
========
The pipeline_rnaseqLncRNA.py pipeline aims to predict lncRNAs from an ab initio
assembly of transcripts.
It requires that raw reads have been mapped to a reference
transcriptome and assembled into transcript models using cufflinks and
therefore makes the assumption that the transcript building has gone
to plan.
Details
========
Prediction of LncRNA are not based on a reference annotation to begin
with, although downstream comparisons are made to a reference
non-coding gene set. The main features of the pipeline are as
follows:
* Build a coding gene set based on an ab initio assembly.
The ab initio assembly is filtered for protein coding genes. In
this step only genes that are compatible with an annotated protein
coding transcript are kept - this will reduce noise that is
associated with a large number of incomplete transfrags. The
filtering is based on output from cuffcompare (class code "=").
* build a non-coding gene set.
A reference non-coding set of transcripts is built by filtering a
provided ensembl reference set (usually a set that is built from
the transcript building pipeline) for transcripts that do not
belong to one of the following biotypes
protein_coding\n
ambiguous_orf\n
retained_intron\n
sense_intronic\n
antisense\n
sense_overlapping\n
This set of non-coding transcripts is required in the filtering of
the ab initio geneset for LncRNA prediction. This is because many
putative lncRNA have multiple associated biotypes. For example
MALAT1 is described as both a lncRNA and a processed transcript. To
avoid removing known ncRNA we therefore check for existence of
putative transcripts in this set.
* Build a putative lncRNA gene set.
The ab initio set of lncRNA are
filtered to remove overlapping protein coding exons. This filtering
is performed on the level of the transcript - although there may be
multiple isoform predictions per lncRNA, at this point the
sensitivity of lncRNA prediction is increased. Antisense
transcripts overlapping protein coding transcripts are retained.
* Filter putative lncRNA gene set
Due to many fragments being produced
from RNA-seq data, putative single exon lncRNA are flagged in
the lncRNA gtf file so it is easy to filter for the more reliable
multi-exonic lncRNA. Although many single exon lncRNA are likely
to be artifacts, we assess the overlap of putative single exon
lncRNA with sets of lncRNA that have been previously identified. If
an overlap is found with a transcript in the reference set then the
reference is added to the lncRNA gene set. This means that true
single exon lncRNA are still picked up - as long as there is
previous evidence to support their existence.
* Build final lncRNA gene set
The putative set of lncRNA are assessed for coding potential using
the coding potential calculator (CPC). Any lncRNA that are
annotated as 'coding' in this analysis are removed from downstream
analysis.
* Combine coding and non-coding gene sets
In order to assess expression levels between genes within samples
i.e. protein coding vs. lncRNA, it is required that the FPKM
estimation be made on a complete geneset. Therefore the lncRNA
geneset is concatenated to the protein coding gene set for use in
downstream analysis.
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general information
how to use CGAT pipelines.
Configuration
-------------
The pipeline requires a configured :file:`pipeline.ini` file.
The sphinxreport report requires a :file:`conf.py` and
:file:`sphinxreport.ini` file (see :ref:`PipelineReporting`)
input
-----
The pipeline generally assumes that transcripts have been assembled
using the pipeline_rnaseqtranscripts.py pipeline using cufflinks.
Files are supplied in the working directory.
They are specified in the configuration file and refer to:
* A coding geneset that is the output from a cufflinks transcript assembly.
abinitio_coding = :file:`<name>.gtf.gz`
* An abinitio geneset that is the output from a cufflinks transcript assembly.
This is to be used for lncRNA prediction.
(note that this may be different to the abinitio_coding geneset).
abinitio_lncrna = :file:`<name>.gtf.gz`
* A reference geneset containing known protein coding transcripts.
This is used for comparisons in the report.
refcoding = :file:`<name>.gtf.gz`
* A reference geneset from ensembl with all known expressed transcripts
reference = :file:`<name>.gtf.gz`
* An optional geneset containing previously identified lncRNA.
If this is not supplied then the pipeline uses a reference non-coding
set from the ensembl reference.
previous = :file:`<name>.gtf.gz`
Pipeline output
----------------
The pipeline produces three main files of interest:
+---------------------------------+------------------------------------------+
| Filename | Description |
+---------------------------------+------------------------------------------+
| |Ab initio set of lncRNA transcripts |
|:file:`lncrna_final.class.gtf.gz`|filtered for single exon status |
| |(excl.previously observed) and classified |
| |relative to protein coding transcripts |
+---------------------------------+------------------------------------------+
| |Ab inito assembled protein coding |
|:file:`<name>_coding.gtf.gz` |transcipts - for a comparable set to |
| |lncRNA transcripts |
+---------------------------------+------------------------------------------+
| |Combined set from the two sets above. |
|:file:`transcripts.gtf.gz` |to be used for downstream FPKM estimation |
| |and differential expression analysis |
+---------------------------------+------------------------------------------+
code
=====
'''
##########################################################
##########################################################
##########################################################
# load modules
from ruffus import *
import sqlite3
import sys
import os
import re
from rpy2.robjects import r as R
import CGAT.Experiment as E
import CGATPipelines.Pipeline as P
import CGAT.GTF as GTF
import CGAT.IOTools as IOTools
import CGATPipelines.PipelineLncRNA as PipelineLncRNA
###################################################
# Pipeline configuration
###################################################
P.getParameters(
["%s/pipeline.ini" % os.path.splitext(__file__)[0],
"../pipeline.ini",
"pipeline.ini"],
defaults={"annotations_dir": "",
"genesets_abinitio_coding": "pruned.gtf.gz",
"genesets_abinitio_lncrna": "pruned.gtf.gz",
"genesets_reference": "reference.gtf.gz",
"genesets_refcoding": "refcoding.gtf.gz",
"genesets_previous": ""})
PARAMS = P.PARAMS
PARAMS.update(P.peekParameters(
PARAMS["annotations_annotations_dir"],
"pipeline_annotations.py",
prefix="annotations_",
update_interface=True))
PREVIOUS = P.asList(PARAMS["genesets_previous"])
def connect():
'''connect to database.
This method also attaches to helper databases.
'''
dbh = sqlite3.connect(PARAMS["database_name"])
statement = '''ATTACH DATABASE '%s' as annotations''' % (
PARAMS["annotations_database"])
cc = dbh.cursor()
cc.execute(statement)
cc.close()
return dbh
#########################################################################
#########################################################################
#########################################################################
def Rconnect():
'''
connect to a database through R
'''
R('''library("RSQLite")''')
R('''library("sciplot")''')
R('''drv <- dbDriver("SQLite")''')
R('''con <- dbConnect(drv, dbname = "%s") ''' % PARAMS["database_name"])
return R('''con''')
#########################################################################
#########################################################################
#########################################################################
def updateFile(filename):
'''
create empty file for updating purposes
'''
outf = open(filename, "w")
outf.write("file created for ruffus update")
outf.close()
#########################################################################
#########################################################################
#########################################################################
def tabSplit(line):
'''
generic split by newline and tab for reading tsv files
'''
return line[:-1].split("\t")
#########################################################################
#########################################################################
#########################################################################
@follows(mkdir("gtfs"))
@merge([PARAMS["genesets_abinitio_coding"],
PARAMS["genesets_reference"]],
os.path.join(
"gtfs",
P.snip(PARAMS["genesets_abinitio_coding"],
".gtf.gz") + "_coding.gtf.gz"))
def buildCodingGeneSet(infiles, outfile):
'''
takes the output from cuffcompare of a transcript
assembly and filters for annotated protein coding
genes.
NB "pruned" refers to nomenclature in the transcript
building pipeline - transcripts that appear in at least
two samples.
Because an abinitio assembly will often contain
fragments of known transcripts and describe them as
novel, the default behaviour is to produce a set that
is composed of 'complete' or 'contained' transcripts
i.e. nothing novel. This may underestimate the number
of transcripts that are actually expressed
'''
PipelineLncRNA.buildCodingGeneSet(infiles[0], infiles[1], outfile)
##########################################################
##########################################################
##########################################################
@follows(buildCodingGeneSet)
@transform(PARAMS["genesets_refcoding"],
regex(r"(\S+).gtf.gz"),
add_inputs(buildCodingGeneSet),
r"gtfs/\1.gtf.gz")
def buildRefcodingGeneSet(infiles, outfile):
'''
builds a refcoding geneset based on the genes that are present in
the abinitio assembly
'''
PipelineLncRNA.buildRefcodingGeneSet(infiles[1], infiles[0], outfile)
##########################################################
##########################################################
##########################################################
@follows(mkdir("gtfs"))
@files(PARAMS["genesets_reference"], "gtfs/refnoncoding.gtf.gz")
def buildRefnoncodingGeneSet(infile, outfile):
'''
filter the refnoncoding geneset for things that are described in ensembl
as being:
Ambiguous_orf
Retained_intron
Sense_intronic
antisense
Sense_overlapping
Processed transcript
'''
PipelineLncRNA.buildRefnoncodingGeneSet(infile, outfile)
##########################################################
##########################################################
##########################################################
@follows(mkdir("gtfs"))
@merge((PARAMS["genesets_abinitio_lncrna"],
PARAMS["genesets_reference"],
buildRefnoncodingGeneSet,
os.path.join(PARAMS["annotations_dir"],
PARAMS["annotations_interface_pseudogenes_gtf"]),
os.path.join(PARAMS["annotations_dir"],
PARAMS["annotations_interface_numts_gtf"]),
), "gtfs/lncrna.gtf.gz")
def buildLncRNAGeneSet(infiles, outfile):
'''
build lncRNA gene set.
This is a set of transcripts in the abinitio set that
do not overlap at any protein coding or pseudogene transcripts
or additional biotypes from ensembl that are unwanted
(exons) in a reference gene set.
Transcripts need to have a length of at least 200 bp.
'''
PipelineLncRNA.buildLncRNAGeneSet(infiles[0],
infiles[1],
infiles[2],
infiles[3],
infiles[4],
outfile,
PARAMS["lncrna_min_length"])
##########################################################################
##########################################################################
##########################################################################
@transform(buildLncRNAGeneSet, suffix(".gtf.gz"), "_flag.gtf.gz")
def flagExonStatus(infile, outfile):
'''
Adds two attributes to the gtf entry:
exon_status_locus - specifies whether the gene model is multi- or
single-exon
exon_status - specifies whether the transcript is mult- or single exon
'''
PipelineLncRNA.flagExonStatus(infile, outfile)
##########################################################################
##########################################################################
##########################################################################
@follows(flagExonStatus)
@transform(PREVIOUS, regex(r"(\S+)/(\S+).gtf.gz"), r"\2.gtf.gz")
def renameTranscriptsInPreviousSets(infile, outfile):
'''
transcripts need to be renamed because they may use the same
cufflinks identifiers as we use in the analysis - don't do if they
have an ensembl id - sort by transcript
'''
inf = IOTools.openFile(infile)
for gtf in GTF.iterator(inf):
if gtf.gene_id.find("ENSG") != -1:
statement = '''zcat %(infile)s | grep -v "#"
| cgat gtf2gtf
--method=sort --sort-order=gene
--log=%(outfile)s.log
| gzip > %(outfile)s'''
else:
gene_pattern = "GEN" + P.snip(outfile, ".gtf.gz")
transcript_pattern = gene_pattern.replace("GEN",
"TRAN")
statement = '''
zcat %(infile)s | cgat gtf2gtf
--method=renumber-genes
--pattern-identifier=%(gene_pattern)s%%i
| cgat gtf2gtf
--method=renumber-transcripts
--pattern-identifier=%(transcript_pattern)s%%i
| cgat gtf2gtf
--method=sort --sort-order=gene
--log=%(outfile)s.log
| gzip > %(outfile)s'''
P.run()
##########################################################################
##########################################################################
##########################################################################
if PARAMS["genesets_previous"]:
@transform(flagExonStatus,
regex(r"(\S+)_flag.gtf.gz"),
add_inputs(renameTranscriptsInPreviousSets),
r"\1_filtered.gtf.gz")
def buildFilteredLncRNAGeneSet(infiles, outfile):
'''
Creates a filtered lncRNA geneset. That contains previously identified
gene models supplied in contig file.
'''
assert PARAMS["filtering_remove_single_exon"] in ["loci",
"transcripts",
None]
PipelineLncRNA.buildFilteredLncRNAGeneSet(
infiles[0],
outfile,
infiles[1:len(infiles)],
filter_se=PARAMS["filtering_remove_single_exon"])
else:
# Writing the following to log will cause all subsequent log messages
# to be empty.
# L.info("no previous lncRNA set provided: Using refnoncoding set")
@transform(flagExonStatus,
regex(r"(\S+)_flag.gtf.gz"),
r"\1_filtered.gtf.gz")
def buildFilteredLncRNAGeneSet(infile, outfile):
'''
Depending on on filtering_remove_single_exon will:
i) remove all single exon transcripts from all lncrna models
(transcripts)
ii) remove lncrna loci that only contain single exon transcripts
(loci)
iii) leave all single-exon and multi-exon loci in outfile
(None)
'''
if not PARAMS["filtering_remove_single_exon"]:
E.info("Both multi-exon and single-exon lncRNA are retained!")
statement = ("cp %(infile)s %(outfile)s")
elif PARAMS["filtering_remove_single_exon"] == "loci":
E.info("Warning: removing all single-exon"
" transcripts from lncRNA set")
statement = ("zcat %(infile)s |"
" grep 'exon_status_locus \"s\"'"
" gzip > %(outfile)s")
elif PARAMS["filtering_remove_single_exon"] == "transcripts":
E.info("Warning: removing loci with only single-exon transcripts")
statement = ("zcat %(infile)s |"
" grep 'exon_status \"s\"'"
" gzip > %(outfile)s")
else:
raise ValueError("Unregocnised parameter %s"
% PARAMS["filtering_remove_single_exon"])
P.run()
##########################################################################
##########################################################################
##########################################################################
@transform(buildFilteredLncRNAGeneSet,
suffix(".gtf.gz"),
add_inputs(PARAMS["genesets_refcoding"]),
".class.gtf.gz")
def classifyFilteredLncRNA(infiles, outfile):
'''
classifies all lincRNA before cpc filtering to define any classes that
are represented in the coding set that are filtered
NOTE: This task is not included when running the full pipeline
'''
PipelineLncRNA.classifyLncRNAGenes(
infiles[0], infiles[1], outfile, dist=PARAMS["lncrna_dist"])
##########################################################################
##########################################################################
##########################################################################
@follows(mkdir("fasta"))
@transform(buildFilteredLncRNAGeneSet,
regex(r"gtfs/(\S+).gtf.gz"),
r"fasta/\1.fasta")
def buildLncRNAFasta(infile, outfile):
'''
create fasta file from lncRNA geneset for testing coding
potential of transcripts
'''
genome = os.path.join(
PARAMS["general_genomedir"], PARAMS["genome"] + ".fasta")
statement = ("zcat %(infile)s |"
" cgat gff2fasta"
" --genome-file=%(genome)s"
" --log=%(outfile)s.log"
" --is-gtf"
" > %(outfile)s")
P.run()
##########################################################################
##########################################################################
##########################################################################
@transform(buildLncRNAFasta, regex(r"fasta/(\S+).fasta"), r"cpc/\1_cpc.result")
def runCPC(infile, outfile):
'''
run coding potential calculations on lncRNA geneset
'''
# farm.py is called from within cpc.sh
assert IOTools.which("farm.py"), \
"farm.py needs to be in $PATH for cpc to run"
# Default cpc parameters don't work with later versions of blast
E.info("Running cpc with blast version:%s" % IOTools.which("blastx"))
result_evidence = P.snip(outfile, ".result") + ".evidence"
working_dir = "cpc"
statement = ("%(pipeline_scriptsdir)s/cpc.sh"
" %(infile)s"
" %(outfile)s"
" %(working_dir)s"
" %(result_evidence)s")
P.run()
@follows(runCPC)
@transform(runCPC, regex("cpc/(\S+).result"), r"\1.load")
def loadCPCResults(infile, outfile):
'''
load the results of the cpc analysis
'''
P.load(infile,
outfile,
options="--header-names=transcript_id,feature,C_NC,CP_score "
"--add-index=transcript_id")
@follows(loadCPCResults)
@transform(buildFilteredLncRNAGeneSet,
regex(r"(\S+)_filtered.gtf.gz"),
r"\1_final.gtf.gz")
def buildFinalLncRNAGeneSet(infile, outfile):
'''
the final lncRNA gene set consists of transcripts that pass
the initial filtering stage i.e. are;
multi-exonic/previously seen single exon transcripts
display low evidence for coding potential
'''
# filter based on coding potential and rename
PipelineLncRNA.buildFinalLncRNAGeneSet(infile,
"lncrna_filtered_cpc_result",
outfile,
PARAMS["filtering_cpc"],
PARAMS["filtering_cpc_threshold"],
PARAMS["final_geneset_rename"])
##########################################################################
##########################################################################
##########################################################################
@transform(buildFinalLncRNAGeneSet,
regex(r"(\S+)/(\S+).gtf.gz"),
r"\2.stats")
def buildLncRNAGeneSetStats(infile, outfile):
'''
counts:
no. of transcripts
no. genes
average number of exons per transcript
average number of exons per gene
no. multi-exon transcripts
no. single exon transcripts
no. multi-exon genes
no. single exon genes
in the coding and lncRNA genesets
'''
outf = open(outfile, "w")
outf.write("\t".join(["no_transcripts",
"no_genes",
"no_exons_per_transcript",
"no_exons_per_gene",
"no_single_exon_transcripts",
"no_multi_exon_transcripts",
"no_single_exon_genes",
"no_multi_exon_genes"]) + "\n")
# For pep8 purposes
x = list(map(str, [PipelineLncRNA.CounterTranscripts(infile).count(),
PipelineLncRNA.CounterGenes(infile).count(),
PipelineLncRNA.CounterExonsPerTranscript(
infile).count(),
PipelineLncRNA.CounterExonsPerGene(
infile).count(),
PipelineLncRNA.CounterSingleExonTranscripts(
infile).count(),
PipelineLncRNA.CounterMultiExonTranscripts(
infile).count(),
PipelineLncRNA.CounterSingleExonGenes(
infile).count(),
PipelineLncRNA.CounterMultiExonGenes(
infile).count()]))
outf.write("\t".join(x))
@transform(buildRefcodingGeneSet,
regex(r"(\S+)/(\S+).gtf.gz"),
r"\2.stats")
def buildRefcodingGeneSetStats(infile, outfile):
'''
counts:
no. of transcripts
no. genes
average number of exons per transcript
average number of exons per gene
no. multi-exon transcripts
no. single exon transcripts
no. multi-exon genes
no. single exon genes
in the coding and lncRNA genesets
'''
# calculate exon status for refcoding genes.
tmpf = P.getTempFilename(".") + ".gz"
PipelineLncRNA.flagExonStatus(infile, tmpf)
outf = IOTools.openFile(outfile, "w")
outf.write("\t".join(["no_transcripts",
"no_genes",
"no_exons_per_transcript",
"no_exons_per_gene",
"no_single_exon_transcripts",
"no_multi_exon_transcripts",
"no_single_exon_genes",
"no_multi_exon_genes"]) + "\n")
outf.write("\t".join(map(str, [
PipelineLncRNA.CounterTranscripts(tmpf).count(),
PipelineLncRNA.CounterGenes(tmpf).count(),
PipelineLncRNA.CounterExonsPerTranscript(tmpf).count(),
PipelineLncRNA.CounterExonsPerGene(tmpf).count(),
PipelineLncRNA.CounterSingleExonTranscripts(tmpf).count(),
PipelineLncRNA.CounterMultiExonTranscripts(tmpf).count(),
PipelineLncRNA.CounterSingleExonGenes(tmpf).count(),
PipelineLncRNA.CounterMultiExonGenes(tmpf).count()])))
os.unlink(tmpf)
os.unlink(tmpf + ".log")
os.unlink(P.snip(tmpf, ".gz"))
@transform([buildLncRNAGeneSetStats, buildRefcodingGeneSetStats],
suffix(".stats"),
".load")
def loadGeneSetStats(infile, outfile):
'''
load stats on coding and lncRNA gene sets
'''
P.load(infile, outfile)
##########################################################################
##########################################################################
##########################################################################
@transform(buildFinalLncRNAGeneSet,
regex(r"(\S+).gtf.gz"),
add_inputs(PARAMS["genesets_refcoding"]),
r"\1.class.gtf.gz")
def classifyLncRNA(infiles, outfile):
'''
Classify lncRNA realtive to protein coding loci
Classify lincRNA in terms of their relationship to
protein coding genes - creates indices for intervals on the
fly - mayb should be creating additional annotations:
antisense
transcript overlapping protein coding exons on opposite strand
antisense_upstream
transcript < 2kb from tss on opposite strand
antisense_downstream
transcript < 2kb from gene end on opposite strand
sense_upstream
transcript < 2kb from tss on same strand
sense_downstream
transcript < 2kb from gene end on same strand
intergenic
transcript >2kb from any protein coding gene
intronic
overlaps protein coding gene intron on same strand
antisense_intronic
overlaps protein coding intron on opposite strand
'''
PipelineLncRNA.classifyLncRNAGenes(
infiles[0], infiles[1], outfile, dist=PARAMS["lncrna_dist"])
@transform(classifyLncRNA, suffix(".gtf.gz"), ".load")
def loadLncRNAClass(infile, outfile):
'''
load the lncRNA classifications
'''
# just load each transcript with its classification
temp = P.getTempFile(".")
inf = IOTools.openFile(infile)
for transcript in GTF.transcript_iterator(GTF.iterator(inf)):
temp.write("%s\t%s\t%s\n" % (
transcript[0].transcript_id,
transcript[0].gene_id,
transcript[0].source))
temp.close()
P.load(temp.name,
outfile,
options="--header-names=transcript_id,gene_id,class "
"--add-index=transcript_id "
"--add-index=gene_id")
os.unlink(temp.name)
@merge([buildCodingGeneSet, classifyLncRNA], "gtfs/transcripts.gtf.gz")
def buildFullGeneSet(infiles, outfile):
'''
produces a final gene set that can be used for
differential expression analysis and comparisons
between protein coding and lncRNA transcripts
'''
# change the source to be in keeping with classification
# of transcripts - f coming from cufflinks assembly
infs = " ".join(infiles)
statement = ("zcat %(infs)s |"
" sed 's/Cufflinks/protein_coding/g' |"
" cgat gtf2gtf"
" --method=sort --sort-order=gene"
" --log=%(outfile)s.log |"
" gzip > %(outfile)s")
P.run()
##########################################################################
##########################################################################
##########################################################################
@follows(mkdir("./phyloCSF"))
@transform(buildFinalLncRNAGeneSet,
regex("(.+)/(.+)_final.gtf.gz"),
r"./phyloCSF/\2.bed.gz")
def convertGTFToBed12(infile, outfile):
"""
Transform the lncrna_final.gtf.gz into lncrna_final.bed
"""
PipelineLncRNA.gtfToBed12(infile, outfile, "transcript")
# AH: added default empty location for phyloCSF_location_axt to allow
# import of pipeline script
@follows(mkdir("./phyloCSF"))
@merge(os.path.join(PARAMS.get("phyloCSF_location_axt", ""), "*.axt.gz"),
"./phyloCSF/filtered_alignment.maf.gz")
def createMAFAlignment(infiles, outfile):
"""
Takes all .axt files in the input directory, filters them to remove
files based on supplied regular expressions, converts to a single maf file
using axtToMaf, filters maf alignments under a specified length.
"""
outfile = P.snip(outfile, ".gz")
axt_dir = PARAMS["phyloCSF_location_axt"]
to_ignore = re.compile(PARAMS["phyloCSF_ignore"])
axt_files = []
for axt_file in os.listdir(axt_dir):
if axt_file.endswith("net.axt.gz") and not to_ignore.search(axt_file):
axt_files.append(os.path.join(axt_dir, axt_file))
axt_files = (" ").join(sorted(axt_files))
E.info("axt files from which MAF alignment will be created: %s" %
axt_files)
target_genome = PARAMS["phyloCSF_target_genome"]
target_contigs = os.path.join(PARAMS["annotations_dir"],
PARAMS["annotations_interface_contigs"])
query_genome = PARAMS["phyloCSF_query_genome"]
query_contigs = os.path.join(PARAMS["phyloCSF_query_assembly"],
PARAMS["annotations_interface_contigs"])
tmpf1 = P.getTempFilename("./phyloCSF")
tmpf2 = P.getTempFilename("./phyloCSF")
to_cluster = False
# concatenate axt files, then remove headers
statement = ("zcat %(axt_files)s"
" > %(tmpf1)s;"
" axtToMaf "
" -tPrefix=%(target_genome)s."
" -qPrefix=%(query_genome)s."
" %(tmpf1)s"
" %(target_contigs)s"
" %(query_contigs)s"
" %(tmpf2)s")
P.run()
E.info("Temporary axt file created %s" % os.path.abspath(tmpf1))
E.info("Temporary maf file created %s" % os.path.abspath(tmpf2))
removed = P.snip(outfile, ".maf") + "_removed.maf"
to_cluster = False
filtered = PipelineLncRNA.filterMAF(tmpf2,
outfile,
removed,
PARAMS["phyloCSF_filter_alignments"])
E.info("%s blocks were ignored in MAF alignment"
" because length of target alignment was too short" % filtered[0])
E.info("%s blocks were output to filtered MAF alignment" % filtered[1])
os.unlink(tmpf1)
os.unlink(tmpf2)
to_cluster = False
statement = ("gzip %(outfile)s;"
" gzip %(removed)s")
P.run()
@merge([convertGTFToBed12, createMAFAlignment],
"./phyloCSF/lncrna_transcripts.fasta.gz")
def extractLncRNAFastaAlignments(infiles, outfile):
"""
Recieves a MAF file containing pairwise alignments and a gtf12 file
containing intervals. Outputs a single fasta file containing aligned
sequence for each interval.
"""
bed_file, maf_file = infiles
maf_tmp = P.getTempFilename("./phyloCSF")
to_cluster = False
statement = ("gunzip -c %(maf_file)s > %(maf_tmp)s")
P.run()
target_genome = PARAMS["genome"]
query_genome = PARAMS["phyloCSF_query_genome"]
genome_file = os.path.join(PARAMS["genomedir"], PARAMS["genome"])
gene_models = PipelineLncRNA.extractMAFGeneBlocks(bed_file,
maf_tmp,
genome_file,
outfile,
target_genome,
query_genome,
keep_gaps=False)
E.info("%i gene_models extracted" % gene_models)
os.unlink(maf_tmp)
@follows(mkdir("./phyloCSF/lncrna_fasta"))
@split(extractLncRNAFastaAlignments, "./phyloCSF/lncrna_fasta/*.fasta")
def splitLncRNAFasta(infile, outfiles):
out_dir = "./phyloCSF/lncrna_fasta"
name_dict = {}
for mapping in PARAMS["phyloCSF_map_species_names"].split(","):
pair = mapping.split(":")
key = ">" + pair[0]
value = ">" + pair[1]
name_dict[key] = value
E.info("Name mapping: %s" % name_dict)
PipelineLncRNA.splitAlignedFasta(infile, out_dir, name_dict)
@transform(splitLncRNAFasta, suffix(".fasta"), ".phyloCSF")
def runLncRNAPhyloCSF(infile, outfile):
phylogeny = PARAMS["phyloCSF_phylogeny"]
n_frames = int(PARAMS["phyloCSF_n_frames"])
if PARAMS["phyloCSF_options"]:
options = PARAMS["phyloCSF_options"]
else:
options = ""
species = []
for mapping in PARAMS["phyloCSF_map_species_names"].split(","):
species.append(mapping.split(":")[1])
species = ",".join(species)
to_cluster = True
statement = ("PhyloCSF %(phylogeny)s"
" %(infile)s"
" --frames=%(n_frames)s"
" --species=%(species)s"
" %(options)s"
" > %(outfile)s")
P.run()
@merge(runLncRNAPhyloCSF, "lncRNA_phyloCSF.tsv")
def mergeLncRNAPhyloCSF(infiles, outfile):
file_name = " ".join([x for x in infiles])
statement = '''
cgat combine_tables
--no-titles
--cat=CAT
--missing-value=0
--log=%(outfile)s.log
%(file_name)s
> %(outfile)s
'''
P.run()
@transform(mergeLncRNAPhyloCSF, regex("(?:.*)/(.+).tsv"), r"\1.load")
def loadLncRNAPhyloCSF(infile, outfile):
tmpf = P.getTempFilename("/ifs/scratch")
PipelineLncRNA.parsePhyloCSF(infile, tmpf)
P.load(tmpf, outfile, options="--add-index=gene_id")
##########################################################################
##########################################################################
# calculate phyloCSF scores for ENSEMBL lincRNA
##########################################################################
##########################################################################
@active_if(PARAMS.get("control_geneset_data") and
PARAMS["control_geneset_data"] != "ensembl")
@follows(mkdir("lncRNA_control"))
@transform(PARAMS["genesets_reference"],
regex("reference.gtf.gz"),
r"lncRNA_control/lincRNA.gtf.gz")
def extractEnsemblLincRNA(infile, outfile):
tmpf = P.getTempFile("/ifs/scratch")
for gtf in GTF.iterator(IOTools.openFile(infile)):
if gtf.source == "lincRNA":
tmpf.write(str(gtf) + "\n")
else:
continue
tmpf.close()
tmpf = tmpf.name
statement = ("cat %(tmpf)s |"
" cgat gtf2gtf"
" --method=sort --sort-order=gene"
" --log=%(outfile)s.log |"
" gzip > %(outfile)s")
P.run()
os.unlink(tmpf)
@active_if(PARAMS.get("control_geneset_data") and
PARAMS["control_geneset_data"] == "ensembl")
@follows(mkdir("lncRNA_control"))
@files(os.path.join(".", PARAMS["control_geneset_data"]),
os.path.join("lncRNA_control", PARAMS["control_geneset_data"]))
def extractControlLncRNA(infile, outfile):
assert os.path.exists(infile), "Control file is missing:" % infile
assert ".gtf" in os.path.basename(infile), "Control file must be gtf"
os.symlink(infile, outfile)
@transform([extractEnsemblLincRNA, extractControlLncRNA],
regex("(.+).gtf(.*)"),
r"\1.bed.gz")
def convertControlGTFToBed12(infile, outfile):
"""
Convert either ensembl lincRNA, or control gtf to bed12 format
"""
PipelineLncRNA.gtfToBed12(infile, outfile, "transcript")
@collate(convertControlGTFToBed12,
regex("(.+)/(.+).bed.gz"),
add_inputs(createMAFAlignment),
r"\1/\2_transcripts.fasta.gz")
def extractControllLncRNAFastaAlignments(infiles, outfile):
bed_file, maf_file = infiles
maf_tmp = P.getTempFilename("/ifs/scratch")
to_cluster = False
statement = ("gunzip -c %(maf_file)s > %(maf_tmp)s")
P.run()
target_genome = PARAMS["genome"]
query_genome = PARAMS["phyloCSF_query_genome"]
genome_file = os.path.join(PARAMS["genomedir"], PARAMS["genome"])
gene_models = PipelineLncRNA.extractMAFGeneBlocks(bed_file,
maf_tmp,
genome_file,
outfile,
target_genome,
query_genome,
keep_gaps=False)
E.info("%i gene_models extracted" % gene_models)
os.unlink(maf_tmp)
@follows(mkdir("./lncRNA_control/aligned_fasta"))
@split(extractControllLncRNAFastaAlignments,
"./lncRNA_control/aligned_fasta/*.fasta")
def splitControlLncRNAFasta(infile, outfiles):
out_dir = "./lncRNA_control/aligned_fasta"
name_dict = {}
for mapping in PARAMS["phyloCSF_map_species_names"].split(","):
pair = mapping.split(":")
key = ">" + pair[0]
value = ">" + pair[1]
name_dict[key] = value
E.info("Name mapping: %s" % name_dict)
PipelineLncRNA.splitAlignedFasta(infile, out_dir, name_dict)
@transform(splitControlLncRNAFasta, suffix(".fasta"), ".phyloCSF")
def runControlLncRNAPhyloCSF(infile, outfile):
phylogeny = PARAMS["phyloCSF_phylogeny"]
n_frames = int(PARAMS["phyloCSF_n_frames"])
if PARAMS["phyloCSF_options"]:
options = PARAMS["phyloCSF_options"]
else:
options = ""
species = []
for mapping in PARAMS["phyloCSF_map_species_names"].split(","):
species.append(mapping.split(":")[1])
species = ",".join(species)
to_cluster = True
statement = ("PhyloCSF %(phylogeny)s"
" %(infile)s"
" --frames=%(n_frames)s"
" --species=%(species)s"
" %(options)s"
" > %(outfile)s")
P.run()
@merge(runControlLncRNAPhyloCSF, "./lncRNA_control/control_phyloCSF.tsv")
def mergeControlLncRNAPhyloCSF(infiles, outfile):
file_names = " ".join([x for x in infiles])
statement = '''
cgat combine_tables
--no-titles
--cat=CAT
--missing-value=0
--log=%(outfile)s.log
%(file_names)s
> %(outfile)s
'''
P.run()
@transform(mergeControlLncRNAPhyloCSF, regex("(?:.+)/(.+).tsv"), r"\1.load")
def loadControlLncRNAPhyloCSF(infile, outfile):
tmpf = P.getTempFilename("/ifs/scratch")
PipelineLncRNA.parsePhyloCSF(infile, tmpf)
P.load(tmpf, outfile, options="--add-index=gene_id")
##########################################################################
##########################################################################
# calculate CPC scores for ENSEMBL lincRNA
##########################################################################
#########################################################################
# @follows(mkdir("lincRNA_ensembl"))
# @transform( PARAMS["genesets_reference"],
# regex("reference.gtf.gz"),
# r"lincRNA_ensembl/lincRNA.gtf.gz" )
# def extractEnsemblLincRNA(infile, outfile):
@follows(mkdir("lncRNA_control/fasta"))
@transform([extractEnsemblLincRNA, extractControlLncRNA],
regex("(?:.+)/(.+).gtf(.*)"),
r"lncRNA_control/fasta/\1.fasta")
def buildControlFasta(infile, outfile):
genome = os.path.join(PARAMS["general_genomedir"],
PARAMS["genome"] + ".fasta")
statement = ("zcat %(infile)s |"
" cgat gff2fasta"
" --genome-file=%(genome)s"
" --log=%(outfile)s.log"
" --is-gtf"
" > %(outfile)s")
P.run()
@transform(buildControlFasta,
regex("(.+)/(.+)/(.*).fasta"),
r"\1/cpc/control_cpc.result")
def runControlCPC(infile, outfile):
# farm.py is called from within cpc.sh
assert IOTools.which(
"farm.py"), "farm.py needs to be in $PATH for cpc to run"
# Default cpc parameters don't work with later versions of blast
E.info("Running cpc with blast version:%s" % IOTools.which("blastx"))
result_evidence = P.snip(outfile, ".result") + ".evidence"
working_dir = "lncRNA_control/cpc"
statement = ("%(pipeline_scriptsdir)s/cpc.sh"
" %(infile)s"
" %(outfile)s"
" %(working_dir)s"
" %(result_evidence)s")
P.run()
@transform(runControlCPC, regex("(?:.+)/(.+)/(.+).result"), r"\2.load")
def loadControlCPCResults(infile, outfile):
P.load(infile,
outfile,
options="--header-names=transcript_id,feature,C_NC,CP_score "
"--add-index=transcript_id")
# targets
@follows(buildCodingGeneSet,
buildRefnoncodingGeneSet,
buildFinalLncRNAGeneSet,
loadGeneSetStats,
loadLncRNAClass,
buildFullGeneSet)
def GeneSets():
pass
@follows(mergeLncRNAPhyloCSF)
def phyloCSF():
pass
@follows(loadCPCResults)
def CodingPotential():
pass
##########################################################################
##########################################################################
##########################################################################
@follows(GeneSets, CodingPotential)
def full():
pass
##########################################################################
##########################################################################
##########################################################################
@follows(mkdir("report"))
def build_report():
'''build report from scratch.'''
E.info("starting documentation build process from scratch")
P.run_report(clean=True)
@follows(mkdir("report"))
def update_report():
'''update report.'''
E.info("updating documentation")
P.run_report(clean=False)
###################################################################
###################################################################
###################################################################
@follows(update_report)
def publish():
'''publish files.'''
# publish web pages
P.publish_report()
# publish additional data - i.e. the final lncRNA gtf file
web_dir = PARAMS["web_dir"]
if not os.path.exists(os.path.join(web_dir), "lncrna_final.class.gtf.gz"):
os.symlink("lncrna_final.class.gtf.gz", os.path.abspath(
os.path.join(os.path.join(web_dir), "lncrna_final.class.gtf.gz")))
##########################################################################
##########################################################################
##########################################################################
def main(argv=None):
if argv is None:
argv = sys.argv
P.main(argv)
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
|
CGATOxford/CGATPipelines
|
obsolete/pipeline_rnaseqlncrna.py
|
Python
|
mit
| 43,524
|
[
"BLAST"
] |
89abb2291e6c901c13c69a5c745cfd6f5f829c738ae07d2adaed2139078cbefa
|
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import base64
import imp
import json
import os
import shlex
import zipfile
from io import BytesIO
# from Ansible
from ansible import __version__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.utils.unicode import to_bytes, to_unicode
from ansible.plugins.strategy import action_write_locks
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>"
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = u'# -*- coding: utf-8 -*-'
# we've moved the module_common relative to the snippets, so fix the path
_SNIPPET_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# ******************************************************************************
ZIPLOADER_TEMPLATE = u'''%(shebang)s
%(coding)s
# This code is part of Ansible, but is an independent component.
# The code in this particular templatable string, and this templatable string
# only, is BSD licensed. Modules which end up using this snippet, which is
# dynamically combined together by Ansible still belong to the author of the
# module, and they may assign their own license to the complete work.
#
# Copyright (c), James Cammarata, 2016
# Copyright (c), Toshio Kuratomi, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import base64
import shutil
import zipfile
import tempfile
import subprocess
if sys.version_info < (3,):
bytes = str
PY3 = False
else:
unicode = str
PY3 = True
try:
# Python-2.6+
from io import BytesIO as IOStream
except ImportError:
# Python < 2.6
from StringIO import StringIO as IOStream
ZIPDATA = """%(zipdata)s"""
def invoke_module(module, modlib_path, json_params):
pythonpath = os.environ.get('PYTHONPATH')
if pythonpath:
os.environ['PYTHONPATH'] = ':'.join((modlib_path, pythonpath))
else:
os.environ['PYTHONPATH'] = modlib_path
p = subprocess.Popen(['%(interpreter)s', module], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate(json_params)
if not isinstance(stderr, (bytes, unicode)):
stderr = stderr.read()
if not isinstance(stdout, (bytes, unicode)):
stdout = stdout.read()
if PY3:
sys.stderr.buffer.write(stderr)
sys.stdout.buffer.write(stdout)
else:
sys.stderr.write(stderr)
sys.stdout.write(stdout)
return p.returncode
def debug(command, zipped_mod, json_params):
# The code here normally doesn't run. It's only used for debugging on the
# remote machine. Run with ANSIBLE_KEEP_REMOTE_FILES=1 envvar and -vvv
# to save the module file remotely. Login to the remote machine and use
# /path/to/module explode to extract the ZIPDATA payload into source
# files. Edit the source files to instrument the code or experiment with
# different values. Then use /path/to/module execute to run the extracted
# files you've edited instead of the actual zipped module.
# Okay to use __file__ here because we're running from a kept file
basedir = os.path.abspath(os.path.dirname(__file__))
args_path = os.path.join(basedir, 'args')
if command == 'explode':
# transform the ZIPDATA into an exploded directory of code and then
# print the path to the code. This is an easy way for people to look
# at the code on the remote machine for debugging it in that
# environment
z = zipfile.ZipFile(zipped_mod)
for filename in z.namelist():
if filename.startswith('/'):
raise Exception('Something wrong with this module zip file: should not contain absolute paths')
dest_filename = os.path.join(basedir, filename)
if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
os.makedirs(dest_filename)
else:
directory = os.path.dirname(dest_filename)
if not os.path.exists(directory):
os.makedirs(directory)
f = open(dest_filename, 'w')
f.write(z.read(filename))
f.close()
# write the args file
f = open(args_path, 'w')
f.write(json_params)
f.close()
print('Module expanded into:')
print('%%s' %% os.path.join(basedir, 'ansible'))
exitcode = 0
elif command == 'execute':
# Execute the exploded code instead of executing the module from the
# embedded ZIPDATA. This allows people to easily run their modified
# code on the remote machine to see how changes will affect it.
# This differs slightly from default Ansible execution of Python modules
# as it passes the arguments to the module via a file instead of stdin.
pythonpath = os.environ.get('PYTHONPATH')
if pythonpath:
os.environ['PYTHONPATH'] = ':'.join((basedir, pythonpath))
else:
os.environ['PYTHONPATH'] = basedir
p = subprocess.Popen(['%(interpreter)s', 'ansible_module_%(ansible_module)s.py', args_path], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if not isinstance(stderr, (bytes, unicode)):
stderr = stderr.read()
if not isinstance(stdout, (bytes, unicode)):
stdout = stdout.read()
if PY3:
sys.stderr.buffer.write(stderr)
sys.stdout.buffer.write(stdout)
else:
sys.stderr.write(stderr)
sys.stdout.write(stdout)
return p.returncode
elif command == 'excommunicate':
# This attempts to run the module in-process (by importing a main
# function and then calling it). It is not the way ansible generally
# invokes the module so it won't work in every case. It is here to
# aid certain debuggers which work better when the code doesn't change
# from one process to another but there may be problems that occur
# when using this that are only artifacts of how we're invoking here,
# not actual bugs (as they don't affect the real way that we invoke
# ansible modules)
# stub the
sys.argv = ['%(ansible_module)s', args_path]
from ansible_module_%(ansible_module)s import main
main()
print('WARNING: Module returned to wrapper instead of exiting')
sys.exit(1)
else:
print('WARNING: Unknown debug command. Doing nothing.')
exitcode = 0
return exitcode
if __name__ == '__main__':
ZIPLOADER_PARAMS = %(params)s
if PY3:
ZIPLOADER_PARAMS = ZIPLOADER_PARAMS.encode('utf-8')
try:
temp_path = tempfile.mkdtemp(prefix='ansible_')
zipped_mod = os.path.join(temp_path, 'ansible_modlib.zip')
modlib = open(zipped_mod, 'wb')
modlib.write(base64.b64decode(ZIPDATA))
modlib.close()
if len(sys.argv) == 2:
exitcode = debug(sys.argv[1], zipped_mod, ZIPLOADER_PARAMS)
else:
z = zipfile.ZipFile(zipped_mod)
module = os.path.join(temp_path, 'ansible_module_%(ansible_module)s.py')
f = open(module, 'wb')
f.write(z.read('ansible_module_%(ansible_module)s.py'))
f.close()
exitcode = invoke_module(module, zipped_mod, ZIPLOADER_PARAMS)
finally:
try:
shutil.rmtree(temp_path)
except OSError:
# tempdir creation probably failed
pass
sys.exit(exitcode)
'''
class ModuleDepFinder(ast.NodeVisitor):
# Caveats:
# This code currently does not handle:
# * relative imports from py2.6+ from . import urls
IMPORT_PREFIX_SIZE = len('ansible.module_utils.')
def __init__(self, *args, **kwargs):
"""
Walk the ast tree for the python module.
Save submodule[.submoduleN][.identifier] into self.submodules
self.submodules will end up with tuples like:
- ('basic',)
- ('urls', 'fetch_url')
- ('database', 'postgres')
- ('database', 'postgres', 'quote')
It's up to calling code to determine whether the final element of the
dotted strings are module names or something else (function, class, or
variable names)
"""
super(ModuleDepFinder, self).__init__(*args, **kwargs)
self.submodules = set()
def visit_Import(self, node):
# import ansible.module_utils.MODLIB[.MODLIBn] [as asname]
for alias in (a for a in node.names if a.name.startswith('ansible.module_utils.')):
py_mod = alias.name[self.IMPORT_PREFIX_SIZE:]
self.submodules.add((py_mod,))
self.generic_visit(node)
def visit_ImportFrom(self, node):
if node.module.startswith('ansible.module_utils'):
where_from = node.module[self.IMPORT_PREFIX_SIZE:]
if where_from:
# from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname]
py_mod = tuple(where_from.split('.'))
for alias in node.names:
self.submodules.add(py_mod + (alias.name,))
else:
# from ansible.module_utils import MODLIB [,MODLIB2] [as asname]
for alias in node.names:
self.submodules.add((alias.name,))
self.generic_visit(node)
def _strip_comments(source):
# Strip comments and blank lines from the wrapper
buf = []
for line in source.splitlines():
l = line.strip()
if not l or l.startswith(u'#'):
continue
buf.append(line)
return u'\n'.join(buf)
# ZIPLOADER_TEMPLATE stripped of comments for smaller over the wire size
STRIPPED_ZIPLOADER_TEMPLATE = _strip_comments(ZIPLOADER_TEMPLATE)
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path))
fd = open(path, 'rb')
data = fd.read()
fd.close()
return data
def _get_shebang(interpreter, task_vars, args=tuple()):
"""
Note not stellar API:
Returns None instead of always returning a shebang line. Doing it this
way allows the caller to decide to use the shebang it read from the
file rather than trust that we reformatted what they already have
correctly.
"""
interpreter_config = u'ansible_%s_interpreter' % os.path.basename(interpreter)
if interpreter_config not in task_vars:
return (None, interpreter)
interpreter = task_vars[interpreter_config]
shebang = u'#!' + interpreter
if args:
shebang = shebang + u' ' + u' '.join(args)
return (shebang, interpreter)
def _get_facility(task_vars):
facility = C.DEFAULT_SYSLOG_FACILITY
if 'ansible_syslog_facility' in task_vars:
facility = task_vars['ansible_syslog_facility']
return facility
def recursive_finder(name, data, py_module_names, py_module_cache, zf):
"""
Using ModuleDepFinder, make sure we have all of the module_utils files that
the module its module_utils files needs.
"""
# Parse the module and find the imports of ansible.module_utils
tree = ast.parse(data)
finder = ModuleDepFinder()
finder.visit(tree)
#
# Determine what imports that we've found are modules (vs class, function.
# variable names) for packages
#
normalized_modules = set()
# Loop through the imports that we've found to normalize them
# Exclude paths that match with paths we've already processed
# (Have to exclude them a second time once the paths are processed)
for py_module_name in finder.submodules.difference(py_module_names):
module_info = None
# Check whether either the last or the second to last identifier is
# a module name
for idx in (1, 2):
if len(py_module_name) < idx:
break
try:
module_info = imp.find_module(py_module_name[-idx],
[os.path.join(_SNIPPET_PATH, *py_module_name[:-idx])])
break
except ImportError:
continue
# Could not find the module. Construct a helpful error message.
if module_info is None:
msg = ['Could not find imported module support code for %s. Looked for' % name]
if idx == 2:
msg.append('either %s or %s' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
if idx == 2:
# We've determined that the last portion was an identifier and
# thus, not part of the module name
py_module_name = py_module_name[:-1]
# If not already processed then we've got work to do
if py_module_name not in py_module_names:
# If not in the cache, then read the file into the cache
# We already have a file handle for the module open so it makes
# sense to read it now
if py_module_name not in py_module_cache:
if module_info[2][2] == imp.PKG_DIRECTORY:
# Read the __init__.py instead of the module file as this is
# a python package
py_module_cache[py_module_name + ('__init__',)] = _slurp(os.path.join(os.path.join(_SNIPPET_PATH, *py_module_name), '__init__.py'))
normalized_modules.add(py_module_name + ('__init__',))
else:
py_module_cache[py_module_name] = module_info[0].read()
module_info[0].close()
normalized_modules.add(py_module_name)
# Make sure that all the packages that this module is a part of
# are also added
for i in range(1, len(py_module_name)):
py_pkg_name = py_module_name[:-i] + ('__init__',)
if py_pkg_name not in py_module_names:
normalized_modules.add(py_pkg_name)
py_module_cache[py_pkg_name] = _slurp('%s.py' % os.path.join(_SNIPPET_PATH, *py_pkg_name))
#
# iterate through all of the ansible.module_utils* imports that we haven't
# already checked for new imports
#
# set of modules that we haven't added to the zipfile
unprocessed_py_module_names = normalized_modules.difference(py_module_names)
for py_module_name in unprocessed_py_module_names:
py_module_path = os.path.join(*py_module_name)
py_module_file_name = '%s.py' % py_module_path
zf.writestr(os.path.join("ansible/module_utils",
py_module_file_name), py_module_cache[py_module_name])
# Add the names of the files we're scheduling to examine in the loop to
# py_module_names so that we don't re-examine them in the next pass
# through recursive_finder()
py_module_names.update(unprocessed_py_module_names)
for py_module_file in unprocessed_py_module_names:
recursive_finder(py_module_file, py_module_cache[py_module_file], py_module_names, py_module_cache, zf)
# Save memory; the file won't have to be read again for this ansible module.
del py_module_cache[py_module_file]
def _find_snippet_imports(module_name, module_data, module_path, module_args, task_vars, module_compression):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_substyle = module_style = 'old'
# module_style is something important to calling code (ActionBase). It
# determines how arguments are formatted (json vs k=v) and whether
# a separate arguments file needs to be sent over the wire.
# module_substyle is extra information that's useful internally. It tells
# us what we have to look to substitute in the module files and whether
# we're using module replacer or ziploader to format the module itself.
if REPLACER in module_data:
# Do REPLACER before from ansible.module_utils because we need make sure
# we substitute "from ansible.module_utils basic" for REPLACER
module_style = 'new'
module_substyle = 'python'
module_data = module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
elif b'from ansible.module_utils.' in module_data:
module_style = 'new'
module_substyle = 'python'
elif REPLACER_WINDOWS in module_data:
module_style = 'new'
module_substyle = 'powershell'
elif REPLACER_JSONARGS in module_data:
module_style = 'new'
module_substyle = 'jsonargs'
elif b'WANT_JSON' in module_data:
module_substyle = module_style = 'non_native_want_json'
shebang = None
# Neither old-style nor non_native_want_json modules should be modified
# except for the shebang line (Done by modify_module)
if module_style in ('old', 'non_native_want_json'):
return module_data, module_style, shebang
output = BytesIO()
py_module_names = set()
if module_substyle == 'python':
# ziploader for new-style python classes
constants = dict(
SELINUX_SPECIAL_FS=C.DEFAULT_SELINUX_SPECIAL_FS,
SYSLOG_FACILITY=_get_facility(task_vars),
)
params = dict(ANSIBLE_MODULE_ARGS=module_args,
ANSIBLE_MODULE_CONSTANTS=constants,
)
python_repred_params = to_bytes(repr(json.dumps(params)), errors='strict')
try:
compression_method = getattr(zipfile, module_compression)
except AttributeError:
display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
compression_method = zipfile.ZIP_STORED
lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ziploader_cache')
cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression))
zipdata = None
# Optimization -- don't lock if the module has already been cached
if os.path.exists(cached_module_filename):
zipdata = open(cached_module_filename, 'rb').read()
# Fool the check later... I think we should just remove the check
py_module_names.add(('basic',))
else:
with action_write_locks[module_name]:
# Check that no other process has created this while we were
# waiting for the lock
if not os.path.exists(cached_module_filename):
# Create the module zip data
zipoutput = BytesIO()
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
zf.writestr('ansible/__init__.py', b''.join((b"__version__ = '", to_bytes(__version__), b"'\n")))
zf.writestr('ansible/module_utils/__init__.py', b'')
zf.writestr('ansible_module_%s.py' % module_name, module_data)
py_module_cache = { ('__init__',): b'' }
recursive_finder(module_name, module_data, py_module_names, py_module_cache, zf)
zf.close()
zipdata = base64.b64encode(zipoutput.getvalue())
# Write the assembled module to a temp file (write to temp
# so that no one looking for the file reads a partially
# written file)
if not os.path.exists(lookup_path):
# Note -- if we have a global function to setup, that would
# be a better place to run this
os.mkdir(lookup_path)
with open(cached_module_filename + '-part', 'w') as f:
f.write(zipdata)
# Rename the file into its final position in the cache so
# future users of this module can read it off the
# filesystem instead of constructing from scratch.
os.rename(cached_module_filename + '-part', cached_module_filename)
if zipdata is None:
# Another process wrote the file while we were waiting for
# the write lock. Go ahead and read the data from disk
# instead of re-creating it.
try:
zipdata = open(cached_module_filename, 'rb').read()
except IOError:
raise AnsibleError('A different worker process failed to create module file. Look at traceback for that process for debugging information.')
# Fool the check later... I think we should just remove the check
py_module_names.add(('basic',))
shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars)
if shebang is None:
shebang = u'#!/usr/bin/python'
output.write(to_bytes(STRIPPED_ZIPLOADER_TEMPLATE % dict(
zipdata=zipdata,
ansible_module=module_name,
params=python_repred_params,
shebang=shebang,
interpreter=interpreter,
coding=ENCODING_STRING,
)))
module_data = output.getvalue()
# Sanity check from 1.x days. Maybe too strict. Some custom python
# modules that use ziploader may implement their own helpers and not
# need basic.py. All the constants that we substituted into basic.py
# for module_replacer are now available in other, better ways.
if ('basic',) not in py_module_names:
raise AnsibleError("missing required import in %s: Did not import ansible.module_utils.basic for boilerplate helper code" % module_path)
elif module_substyle == 'powershell':
# Module replacer for jsonargs and windows
lines = module_data.split(b'\n')
for line in lines:
if REPLACER_WINDOWS in line:
ps_data = _slurp(os.path.join(_SNIPPET_PATH, "powershell.ps1"))
output.write(ps_data)
py_module_names.add((b'powershell',))
continue
output.write(line + b'\n')
module_data = output.getvalue()
module_args_json = to_bytes(json.dumps(module_args))
module_data = module_data.replace(REPLACER_JSONARGS, module_args_json)
# Sanity check from 1.x days. This is currently useless as we only
# get here if we are going to substitute powershell.ps1 into the
# module anyway. Leaving it for when/if we add other powershell
# module_utils files.
if (b'powershell',) not in py_module_names:
raise AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path)
elif module_substyle == 'jsonargs':
module_args_json = to_bytes(json.dumps(module_args))
# these strings could be included in a third-party module but
# officially they were included in the 'basic' snippet for new-style
# python modules (which has been replaced with something else in
# ziploader) If we remove them from jsonargs-style module replacer
# then we can remove them everywhere.
python_repred_args = to_bytes(repr(module_args_json))
module_data = module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
module_data = module_data.replace(REPLACER_COMPLEX, python_repred_args)
module_data = module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))
# The main event -- substitute the JSON args string into the module
module_data = module_data.replace(REPLACER_JSONARGS, module_args_json)
facility = b'syslog.' + to_bytes(_get_facility(task_vars), errors='strict')
module_data = module_data.replace(b'syslog.LOG_USER', facility)
return (module_data, module_style, shebang)
# ******************************************************************************
def modify_module(module_name, module_path, module_args, task_vars=dict(), module_compression='ZIP_STORED'):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
All modules are required to import at least basic, though there will also
be other snippets.
For powershell, there's equivalent conventions like this:
# POWERSHELL_COMMON
which results in the inclusion of the common code from powershell.ps1
"""
with open(module_path, 'rb') as f:
# read in the module source
module_data = f.read()
(module_data, module_style, shebang) = _find_snippet_imports(module_name, module_data, module_path, module_args, task_vars, module_compression)
if shebang is None:
lines = module_data.split(b"\n", 1)
if lines[0].startswith(b"#!"):
shebang = lines[0].strip()
args = shlex.split(str(shebang[2:]))
interpreter = args[0]
interpreter = to_bytes(interpreter)
new_shebang = to_bytes(_get_shebang(interpreter, task_vars, args[1:])[0], errors='strict', nonstring='passthru')
if new_shebang:
lines[0] = shebang = new_shebang
if os.path.basename(interpreter).startswith(b'python'):
lines.insert(1, to_bytes(ENCODING_STRING))
else:
# No shebang, assume a binary module?
pass
module_data = b"\n".join(lines)
else:
shebang = to_bytes(shebang, errors='strict')
return (module_data, module_style, shebang)
|
benjixx/ansible
|
lib/ansible/executor/module_common.py
|
Python
|
gpl-3.0
| 29,173
|
[
"VisIt"
] |
8768e7e75160153bb1aa4e545f9f899de5e8369b4193c44b2c6c8d67241eb556
|
#!/usr/bin/python
"""Test to verify bug #469367 is still fixed.
Orca StarOffice script not properly announcing (potential) indentation
in OOo Writer.
"""
from macaroon.playback import *
import utils
sequence = MacroSequence()
######################################################################
# 1. Start oowriter. There is a bug_469367.params file that will
# automatically load empty_document.odt. This uses the FreeSerif
# font as the default which should be available on all test systems.
#
sequence.append(WaitForWindowActivate("empty_document(.odt|) - " + utils.getOOoName("Writer"), None))
sequence.append(WaitForFocus("", acc_role=pyatspi.ROLE_PARAGRAPH))
######################################################################
# 2. Type Control-Home to move the text caret to the start of the document.
#
sequence.append(KeyComboAction("<Control>Home"))
######################################################################
# 3. Enter two tabs, three spaces and the text "This is a test." followed by
# Return.
#
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("Tab"))
sequence.append(TypeAction(" This is a test."))
sequence.append(KeyComboAction("Return"))
sequence.append(WaitForFocus("", acc_role=pyatspi.ROLE_PARAGRAPH))
######################################################################
# 4. Enter up arrow to position the text caret on the first line.
#
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(WaitForFocus("", acc_role=pyatspi.ROLE_PARAGRAPH))
sequence.append(utils.AssertPresentationAction(
"Enter up arrow to position the text caret on the first line",
["BRAILLE LINE: '" + utils.getOOoBrailleLine("Writer", "empty_document(.odt|)", " This is a test. \$l") + "'",
" VISIBLE: ' This is a test. $l', cursor=1",
"BRAILLE LINE: '" + utils.getOOoBrailleLine("Writer", "empty_document(.odt|)", " This is a test. \$l") + "'",
" VISIBLE: ' This is a test. $l', cursor=1",
"SPEECH OUTPUT: ' This is a test.'"]))
######################################################################
# 5. Enter Insert-f to get text information.
#
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(TypeAction ("f"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"Enter Insert-f to get text information",
["SPEECH OUTPUT: 'size 12'",
"SPEECH OUTPUT: 'family name FreeSerif'",
"SPEECH OUTPUT: 'paragraph style Default'"]))
######################################################################
# 6. Enter Alt-f, Alt-c to close the Writer application.
#
sequence.append(KeyComboAction("<Alt>f"))
sequence.append(WaitForFocus("New", acc_role=pyatspi.ROLE_MENU))
sequence.append(KeyComboAction("<Alt>c"))
# We'll get a new window, but we'll wait for the "Save" button to get focus.
#
sequence.append(WaitForFocus("Save", acc_role=pyatspi.ROLE_PUSH_BUTTON))
######################################################################
# 7. Enter Tab and Return to discard the current changes.
#
sequence.append(KeyComboAction("Tab"))
sequence.append(WaitForFocus("Discard", acc_role=pyatspi.ROLE_PUSH_BUTTON))
sequence.append(KeyComboAction("Return"))
######################################################################
# 8. Wait for things to get back to normal.
#
sequence.append(PauseAction(3000))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
h4ck3rm1k3/orca-sonar
|
test/keystrokes/oowriter/bug_469367.py
|
Python
|
lgpl-2.1
| 3,573
|
[
"ORCA"
] |
8d7093244b18a4cf810c201cf3f1baa1fc442fb6ce291db5fa5cb6eb8902e649
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Tianyu Zhu <zhutianyu1991@gmail.com>
#
'''
Spin-unrestricted G0W0 approximation with analytic continuation
This implementation has N^4 scaling, and is faster than GW-CD (N^4)
and analytic GW (N^6) methods.
GW-AC is recommended for valence states only, and is inaccuarate for core states.
Method:
See T. Zhu and G.K.-L. Chan, arxiv:2007.03148 (2020) for details
Compute Sigma on imaginary frequency with density fitting,
then analytically continued to real frequency
Useful References:
J. Chem. Theory Comput. 12, 3623-3635 (2016)
New J. Phys. 14, 053020 (2012)
'''
from functools import reduce
import numpy
import numpy as np
import h5py
from scipy.optimize import newton, least_squares
from pyscf import lib
from pyscf.lib import logger
from pyscf.ao2mo import _ao2mo
from pyscf import df, scf
from pyscf.mp.ump2 import get_nocc, get_nmo, get_frozen_mask
from pyscf import __config__
einsum = lib.einsum
def kernel(gw, mo_energy, mo_coeff, Lpq=None, orbs=None,
nw=None, vhf_df=False, verbose=logger.NOTE):
'''
GW-corrected quasiparticle orbital energies
Returns:
A list : converged, mo_energy, mo_coeff
'''
mf = gw._scf
mol = gw.mol
if gw.frozen is None:
frozen = 0
else:
frozen = gw.frozen
assert (isinstance(frozen, int))
nocca, noccb = gw.nocc
nmoa, nmob = gw.nmo
# only support frozen core
assert (frozen < nocca and frozen < noccb)
if Lpq is None:
Lpq = gw.ao2mo(mo_coeff)
if orbs is None:
orbs = range(nmoa)
else:
orbs = [x - frozen for x in orbs]
if orbs[0] < 0:
logger.warn(gw, 'GW orbs must be larger than frozen core!')
raise RuntimeError
# v_xc
v_mf = mf.get_veff()
vj = mf.get_j()
v_mf[0] = v_mf[0] - (vj[0] + vj[1])
v_mf[1] = v_mf[1] - (vj[0] + vj[1])
v_mf_frz = np.zeros((2, nmoa-frozen, nmob-frozen))
for s in range(2):
v_mf_frz[s] = reduce(numpy.dot, (mo_coeff[s].T, v_mf[s], mo_coeff[s]))
v_mf = v_mf_frz
# v_hf from DFT/HF density
if vhf_df and frozen == 0:
# density fitting vk
vk = np.zeros_like(v_mf)
vk[0] = -einsum('Lni,Lim->nm',Lpq[0,:,:,:nocca],Lpq[0,:,:nocca,:])
vk[1] = -einsum('Lni,Lim->nm',Lpq[1,:,:,:noccb],Lpq[1,:,:noccb,:])
else:
# exact vk without density fitting
dm = mf.make_rdm1()
uhf = scf.UHF(mol)
vk = uhf.get_veff(mol, dm)
vj = uhf.get_j(mol, dm)
vk[0] = vk[0] - (vj[0] + vj[1])
vk[1] = vk[1] - (vj[0] + vj[1])
vk_frz = np.zeros((2, nmoa-frozen, nmob-frozen))
for s in range(2):
vk_frz[s] = reduce(numpy.dot, (mo_coeff[s].T, vk[s], mo_coeff[s]))
vk = vk_frz
# Grids for integration on imaginary axis
freqs,wts = _get_scaled_legendre_roots(nw)
# Compute self-energy on imaginary axis i*[0,iw_cutoff]
sigmaI,omega = get_sigma_diag(gw, orbs, Lpq, freqs, wts, iw_cutoff=5.)
# Analytic continuation
if gw.ac == 'twopole':
coeff_a = AC_twopole_diag(sigmaI[0], omega[0], orbs, nocca)
coeff_b = AC_twopole_diag(sigmaI[1], omega[1], orbs, noccb)
elif gw.ac == 'pade':
coeff_a, omega_fit_a = AC_pade_thiele_diag(sigmaI[0], omega[0])
coeff_b, omega_fit_b = AC_pade_thiele_diag(sigmaI[1], omega[1])
omega_fit = np.asarray((omega_fit_a,omega_fit_b))
coeff = np.asarray((coeff_a,coeff_b))
conv = True
homo = max(mo_energy[0][nocca-1], mo_energy[1][noccb-1])
lumo = min(mo_energy[0][nocca], mo_energy[1][noccb])
ef = (homo+lumo)/2.
mf_mo_energy = mo_energy.copy()
mo_energy = np.zeros_like(np.asarray(gw._scf.mo_energy))
for s in range(2):
for p in orbs:
if gw.linearized:
# linearized G0W0
de = 1e-6
ep = mf_mo_energy[s][p]
#TODO: analytic sigma derivative
if gw.ac == 'twopole':
sigmaR = two_pole(ep-ef, coeff[s,:,p-orbs[0]]).real
dsigma = two_pole(ep-ef+de, coeff[s,:,p-orbs[0]]).real - sigmaR.real
elif gw.ac == 'pade':
sigmaR = pade_thiele(ep-ef, omega_fit[s,p-orbs[0]], coeff[s,:,p-orbs[0]]).real
dsigma = pade_thiele(ep-ef+de, omega_fit[s,p-orbs[0]], coeff[s,:,p-orbs[0]]).real - sigmaR.real
zn = 1.0/(1.0-dsigma/de)
e = ep + zn*(sigmaR.real + vk[s,p,p] - v_mf[s,p,p])
mo_energy[s,p+frozen] = e
else:
# self-consistently solve QP equation
def quasiparticle(omega):
if gw.ac == 'twopole':
sigmaR = two_pole(omega-ef, coeff[s,:,p-orbs[0]]).real
elif gw.ac == 'pade':
sigmaR = pade_thiele(omega-ef, omega_fit[s,p-orbs[0]], coeff[s,:,p-orbs[0]]).real
return omega - mf_mo_energy[s][p] - (sigmaR.real + vk[s,p,p] - v_mf[s,p,p])
try:
e = newton(quasiparticle, mf_mo_energy[s][p], tol=1e-6, maxiter=100)
mo_energy[s,p+frozen] = e
except RuntimeError:
conv = False
mo_coeff = gw._scf.mo_coeff
if gw.verbose >= logger.DEBUG:
numpy.set_printoptions(threshold=nmoa)
logger.debug(gw, ' GW mo_energy spin-up =\n%s', mo_energy[0])
logger.debug(gw, ' GW mo_energy spin-down =\n%s', mo_energy[1])
numpy.set_printoptions(threshold=1000)
return conv, mo_energy, mo_coeff
def get_rho_response(omega, mo_energy, Lpqa, Lpqb):
'''
Compute density response function in auxiliary basis at freq iw
'''
naux, nocca, nvira = Lpqa.shape
naux, noccb, nvirb = Lpqb.shape
eia_a = mo_energy[0,:nocca,None] - mo_energy[0,None,nocca:]
eia_b = mo_energy[1,:noccb,None] - mo_energy[1,None,noccb:]
eia_a = eia_a/(omega**2+eia_a*eia_a)
eia_b = eia_b/(omega**2+eia_b*eia_b)
Pia_a = einsum('Pia,ia->Pia',Lpqa,eia_a)
Pia_b = einsum('Pia,ia->Pia',Lpqb,eia_b)
# Response from both spin-up and spin-down density
Pi = 2.* (einsum('Pia,Qia->PQ',Pia_a,Lpqa) + einsum('Pia,Qia->PQ',Pia_b,Lpqb))
return Pi
def get_sigma_diag(gw, orbs, Lpq, freqs, wts, iw_cutoff=None):
'''
Compute GW correlation self-energy (diagonal elements)
in MO basis on imaginary axis
'''
mo_energy = _mo_energy_without_core(gw, gw._scf.mo_energy)
nocca, noccb = gw.nocc
nmoa, nmob = gw.nmo
nw = len(freqs)
naux = Lpq[0].shape[0]
norbs = len(orbs)
# TODO: Treatment of degeneracy
homo = max(mo_energy[0][nocca-1], mo_energy[1][noccb-1])
lumo = min(mo_energy[0][nocca], mo_energy[1][noccb])
if (lumo-homo) < 1e-3:
logger.warn(gw, 'GW not well-defined for degeneracy!')
ef = (homo+lumo)/2.
# Integration on numerical grids
if iw_cutoff is not None:
nw_sigma = sum(iw < iw_cutoff for iw in freqs) + 1
else:
nw_sigma = nw + 1
# Compute occ for -iw and vir for iw separately
# to avoid branch cuts in analytic continuation
omega_occ = np.zeros((nw_sigma), dtype=np.complex128)
omega_vir = np.zeros((nw_sigma), dtype=np.complex128)
omega_occ[0] = 0.j
omega_vir[0] = 0.j
omega_occ[1:] = -1j*freqs[:(nw_sigma-1)]
omega_vir[1:] = 1j*freqs[:(nw_sigma-1)]
orbs_occ_a = [i for i in orbs if i < nocca]
orbs_occ_b = [i for i in orbs if i < noccb]
norbs_occ_a = len(orbs_occ_a)
norbs_occ_b = len(orbs_occ_b)
emo_occ_a = omega_occ[None,:] + ef - mo_energy[0,:,None]
emo_occ_b = omega_occ[None,:] + ef - mo_energy[1,:,None]
emo_vir_a = omega_vir[None,:] + ef - mo_energy[0,:,None]
emo_vir_b = omega_vir[None,:] + ef - mo_energy[1,:,None]
sigma = np.zeros((2,norbs,nw_sigma),dtype=np.complex128)
omega = np.zeros((2,norbs,nw_sigma),dtype=np.complex128)
for s in range(2):
for p in range(norbs):
orbp = orbs[p]
if orbp < gw.nocc[s]:
omega[s,p] = omega_occ.copy()
else:
omega[s,p] = omega_vir.copy()
for w in range(nw):
Pi = get_rho_response(freqs[w], mo_energy, Lpq[0,:,:nocca,nocca:], Lpq[1,:,:noccb,noccb:])
Pi_inv = np.linalg.inv(np.eye(naux)-Pi)-np.eye(naux)
g0_occ_a = wts[w] * emo_occ_a / (emo_occ_a**2+freqs[w]**2)
g0_vir_a = wts[w] * emo_vir_a / (emo_vir_a**2+freqs[w]**2)
g0_occ_b = wts[w] * emo_occ_b / (emo_occ_b**2+freqs[w]**2)
g0_vir_b = wts[w] * emo_vir_b / (emo_vir_b**2+freqs[w]**2)
Qnm_a = einsum('Pnm,PQ->Qnm',Lpq[0][:,orbs,:],Pi_inv)
Qnm_b = einsum('Pnm,PQ->Qnm',Lpq[1][:,orbs,:],Pi_inv)
Wmn_a = einsum('Qnm,Qmn->mn',Qnm_a,Lpq[0][:,:,orbs])
Wmn_b = einsum('Qnm,Qmn->mn',Qnm_b,Lpq[1][:,:,orbs])
sigma[0,:norbs_occ_a] += -einsum('mn,mw->nw',Wmn_a[:,:norbs_occ_a],g0_occ_a)/np.pi
sigma[0,norbs_occ_a:] += -einsum('mn,mw->nw',Wmn_a[:,norbs_occ_a:],g0_vir_a)/np.pi
sigma[1,:norbs_occ_b] += -einsum('mn,mw->nw',Wmn_b[:,:norbs_occ_b],g0_occ_b)/np.pi
sigma[1,norbs_occ_b:] += -einsum('mn,mw->nw',Wmn_b[:,norbs_occ_b:],g0_vir_b)/np.pi
return sigma, omega
def _get_scaled_legendre_roots(nw):
"""
Scale nw Legendre roots, which lie in the
interval [-1, 1], so that they lie in [0, inf)
Ref: www.cond-mat.de/events/correl19/manuscripts/ren.pdf
Returns:
freqs : 1D ndarray
wts : 1D ndarray
"""
freqs, wts = np.polynomial.legendre.leggauss(nw)
x0 = 0.5
freqs_new = x0*(1.+freqs)/(1.-freqs)
wts = wts*2.*x0/(1.-freqs)**2
return freqs_new, wts
def _get_clenshaw_curtis_roots(nw):
"""
Clenshaw-Curtis qaudrature on [0,inf)
Ref: J. Chem. Phys. 132, 234114 (2010)
Returns:
freqs : 1D ndarray
wts : 1D ndarray
"""
freqs = np.zeros(nw)
wts = np.zeros(nw)
a = 0.2
for w in range(nw):
t = (w+1.0)/nw * np.pi/2.
freqs[w] = a / np.tan(t)
if w != nw-1:
wts[w] = a*np.pi/2./nw/(np.sin(t)**2)
else:
wts[w] = a*np.pi/4./nw/(np.sin(t)**2)
return freqs[::-1], wts[::-1]
def two_pole_fit(coeff, omega, sigma):
cf = coeff[:5] + 1j*coeff[5:]
f = cf[0] + cf[1]/(omega+cf[3]) + cf[2]/(omega+cf[4]) - sigma
f[0] = f[0]/0.01
return np.array([f.real,f.imag]).reshape(-1)
def two_pole(freqs, coeff):
cf = coeff[:5] + 1j*coeff[5:]
return cf[0] + cf[1]/(freqs+cf[3]) + cf[2]/(freqs+cf[4])
def AC_twopole_diag(sigma, omega, orbs, nocc):
"""
Analytic continuation to real axis using a two-pole model
Returns:
coeff: 2D array (ncoeff, norbs)
"""
norbs, nw = sigma.shape
coeff = np.zeros((10,norbs))
for p in range(norbs):
# target = np.array([sigma[p].real,sigma[p].imag]).reshape(-1)
if orbs[p] < nocc:
x0 = np.array([0, 1, 1, 1, -1, 0, 0, 0, -1.0, -0.5])
else:
x0 = np.array([0, 1, 1, 1, -1, 0, 0, 0, 1.0, 0.5])
#TODO: analytic gradient
xopt = least_squares(two_pole_fit, x0, jac='3-point', method='trf', xtol=1e-10,
gtol = 1e-10, max_nfev=1000, verbose=0, args=(omega[p], sigma[p]))
if xopt.success is False:
print('WARN: 2P-Fit Orb %d not converged, cost function %e'%(p,xopt.cost))
coeff[:,p] = xopt.x.copy()
return coeff
def thiele(fn,zn):
nfit = len(zn)
g = np.zeros((nfit,nfit),dtype=np.complex128)
g[:,0] = fn.copy()
for i in range(1,nfit):
g[i:,i] = (g[i-1,i-1]-g[i:,i-1])/((zn[i:]-zn[i-1])*g[i:,i-1])
a = g.diagonal()
return a
def pade_thiele(freqs,zn,coeff):
nfit = len(coeff)
X = coeff[-1]*(freqs-zn[-2])
for i in range(nfit-1):
idx = nfit-i-1
X = coeff[idx]*(freqs-zn[idx-1])/(1.+X)
X = coeff[0]/(1.+X)
return X
def AC_pade_thiele_diag(sigma, omega):
"""
Analytic continuation to real axis using a Pade approximation
from Thiele's reciprocal difference method
Reference: J. Low Temp. Phys. 29, 179 (1977)
Returns:
coeff: 2D array (ncoeff, norbs)
omega: 2D array (norbs, npade)
"""
idx = range(1,40,6)
sigma1 = sigma[:,idx].copy()
sigma2 = sigma[:,(idx[-1]+4)::4].copy()
sigma = np.hstack((sigma1,sigma2))
omega1 = omega[:,idx].copy()
omega2 = omega[:,(idx[-1]+4)::4].copy()
omega = np.hstack((omega1,omega2))
norbs, nw = sigma.shape
npade = nw // 2
coeff = np.zeros((npade*2,norbs),dtype=np.complex128)
for p in range(norbs):
coeff[:,p] = thiele(sigma[p,:npade*2], omega[p,:npade*2])
return coeff, omega[:,:npade*2]
def _mo_energy_without_core(gw, mo_energy):
moidx = get_frozen_mask(gw)
mo_energy = (mo_energy[0][moidx[0]], mo_energy[1][moidx[1]])
return np.asarray(mo_energy)
def _mo_without_core(gw, mo):
moidx = get_frozen_mask(gw)
mo = (mo[0][:,moidx[0]], mo[1][:,moidx[1]])
return np.asarray(mo)
class UGWAC(lib.StreamObject):
linearized = getattr(__config__, 'gw_ugw_UGW_linearized', False)
# Analytic continuation: pade or twopole
ac = getattr(__config__, 'gw_ugw_UGW_ac', 'pade')
def __init__(self, mf, frozen=None):
self.mol = mf.mol
self._scf = mf
self.verbose = self.mol.verbose
self.stdout = self.mol.stdout
self.max_memory = mf.max_memory
self.frozen = frozen
# DF-GW must use density fitting integrals
if getattr(mf, 'with_df', None):
self.with_df = mf.with_df
else:
self.with_df = df.DF(mf.mol)
self.with_df.auxbasis = df.make_auxbasis(mf.mol, mp2fit=True)
self._keys.update(['with_df'])
##################################################
# don't modify the following attributes, they are not input options
self._nocc = None
self._nmo = None
# self.mo_energy: GW quasiparticle energy, not scf mo_energy
self.mo_energy = None
self.mo_coeff = mf.mo_coeff
self.mo_occ = mf.mo_occ
self.sigma = None
keys = set(('linearized','ac'))
self._keys = set(self.__dict__.keys()).union(keys)
def dump_flags(self):
log = logger.Logger(self.stdout, self.verbose)
log.info('')
log.info('******** %s ********', self.__class__)
log.info('method = %s', self.__class__.__name__)
nocca, noccb = self.nocc
nmoa, nmob = self.nmo
nvira = nmoa - nocca
nvirb = nmob - noccb
log.info('GW (nocca, noccb) = (%d, %d), (nvira, nvirb) = (%d, %d)',
nocca, noccb, nvira, nvirb)
if self.frozen is not None:
log.info('frozen orbitals %s', str(self.frozen))
logger.info(self, 'use perturbative linearized QP eqn = %s', self.linearized)
logger.info(self, 'analytic continuation method = %s', self.ac)
return self
@property
def nocc(self):
return self.get_nocc()
@nocc.setter
def nocc(self, n):
self._nocc = n
@property
def nmo(self):
return self.get_nmo()
@nmo.setter
def nmo(self, n):
self._nmo = n
get_nocc = get_nocc
get_nmo = get_nmo
get_frozen_mask = get_frozen_mask
def kernel(self, mo_energy=None, mo_coeff=None, Lpq=None, orbs=None, nw=100, vhf_df=False):
"""
Input:
orbs: self-energy orbs
nw: grid number
vhf_df: whether using density fitting for HF exchange
Output:
mo_energy: GW quasiparticle energy
"""
if mo_coeff is None:
mo_coeff = _mo_without_core(self, self._scf.mo_coeff)
if mo_energy is None:
mo_energy = _mo_energy_without_core(self, self._scf.mo_energy)
cput0 = (logger.process_clock(), logger.perf_counter())
self.dump_flags()
self.converged, self.mo_energy, self.mo_coeff = \
kernel(self, mo_energy, mo_coeff,
Lpq=Lpq, orbs=orbs, nw=nw, vhf_df=vhf_df, verbose=self.verbose)
logger.warn(self, 'GW QP energies may not be sorted from min to max')
logger.timer(self, 'GW', *cput0)
return self.mo_energy
def ao2mo(self, mo_coeff=None):
nmoa, nmob = self.nmo
nao = self.mo_coeff[0].shape[0]
naux = self.with_df.get_naoaux()
mem_incore = (nmoa**2*naux + nmob**2*naux + nao**2*naux) * 8/1e6
mem_now = lib.current_memory()[0]
moa = numpy.asarray(mo_coeff[0], order='F')
mob = numpy.asarray(mo_coeff[1], order='F')
ijslicea = (0, nmoa, 0, nmoa)
ijsliceb = (0, nmob, 0, nmob)
Lpqa = None
Lpqb = None
if (mem_incore + mem_now < 0.99*self.max_memory) or self.mol.incore_anyway:
Lpqa = _ao2mo.nr_e2(self.with_df._cderi, moa, ijslicea, aosym='s2', out=Lpqa)
Lpqb = _ao2mo.nr_e2(self.with_df._cderi, mob, ijsliceb, aosym='s2', out=Lpqb)
return np.asarray((Lpqa.reshape(naux,nmoa,nmoa),Lpqb.reshape(naux,nmob,nmob)))
else:
logger.warn(self, 'Memory may not be enough!')
raise NotImplementedError
if __name__ == '__main__':
from pyscf import gto, dft
mol = gto.Mole()
mol.verbose = 4
mol.atom = 'O 0 0 0'
mol.basis = 'aug-cc-pvdz'
mol.spin = 2
mol.build()
mf = dft.UKS(mol)
mf.xc = 'pbe0'
mf.kernel()
nocca = (mol.nelectron + mol.spin)//2
noccb = mol.nelectron - nocca
nmo = len(mf.mo_energy[0])
nvira = nmo - nocca
nvirb = nmo - noccb
gw = UGWAC(mf)
gw.frozen = 0
gw.linearized = False
gw.ac = 'pade'
gw.kernel(orbs=range(nocca-3,nocca+3))
assert (abs(gw.mo_energy[0][nocca-1]- -0.521932084529) < 1e-5)
assert (abs(gw.mo_energy[0][nocca] -0.167547592784) < 1e-5)
assert (abs(gw.mo_energy[1][noccb-1]- -0.464605523684) < 1e-5)
assert (abs(gw.mo_energy[1][noccb]- -0.0133557793765) < 1e-5)
|
sunqm/pyscf
|
pyscf/gw/ugw_ac.py
|
Python
|
apache-2.0
| 18,653
|
[
"PySCF"
] |
b10f2e5064b25ae5b9b507e2b075a6a0146f2af2f5688f63976e43f1a8da5e21
|
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_I import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:19]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but sometimes useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_1 = ['Rigid-Fixed']*35 + ['Rigid-Movable']*35 + ['Soft-Fixed']*35 + ['Soft-Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=7)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True')
#show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,30.3,0,1.2])
grid('True')
show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_kNN/Single_Contact_Classification/Scaled_Features/results/4_categories/test10_cross_validate_categories_1200ms_scaled_method_i.py
|
Python
|
mit
| 4,665
|
[
"Mayavi"
] |
048ccb7ef4fd5bd568d19ec8efff212ae57a564d80c371441607bc9a7c4d78f7
|
# Adaptive Comfort Calculator
#
# Ladybug: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Ladybug.
#
# Copyright (c) 2013-2015, Chris Mackey <Chris@MackeyArchitecture.com>
# Ladybug is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Ladybug is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ladybug; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to calculate the adaptive comfort for a given set of input conditions.
This component will output a stream of 0's and 1's indicating whether certain conditions are comfortable given the prevailing mean monthly temperature that ocuppants tend to adapt themselves to.
This component will also output a series of interger numbers that indicate the following: -1 = The average monthly temperature is too extreme for the adaptive model. 0 = The input conditions are too cold for occupants. 1 = The input conditions are comfortable for occupants. 2 = The input conditions are too hot for occupants.
Lastly, this component outputs the percent of time comfortable, hot, cold and monthly extreme as well as a lit of numbers indicating the upper temperature of comfort and lower temperature of comfort.
_
The adaptive comfort model was created in response to the shortcomings of the PMV model that became apparent when it was applied to buildings without air conditioning. Namely, the PMV model was over-estimating the discomfort of occupants in warm conditions of nautrally ventilated buildings.
Accordingly, the adaptive comfort model was built on the work of hundreds of field studies in which people in naturally ventilated buildings were asked asked about how comfortable they were.
Results showed that users tended to adapt themselves to the monthly mean temperature and would be comfortable in buildings so long as the building temperature remained around a value close to that monthly mean. This situation held true so long as the monthly mean temperature remained above 10 C and below 33.5 C.
_
The comfort models that make this component possible were translated to python from a series of validated javascript comfort models coded at the Berkely Center for the Built Environment (CBE). The Adaptive model used by both the CBE Tool and this component was originally published in ASHARAE 55.
Special thanks goes to the authors of the online CBE Thermal Comfort Tool who first coded the javascript: Hoyt Tyler, Schiavon Stefano, Piccioli Alberto, Moon Dustin, and Steinfeld Kyle. http://cbe.berkeley.edu/comforttool/
-
Provided by Ladybug 0.0.61
Args:
_dryBulbTemperature: A number representing the dry bulb temperature of the air in degrees Celcius. This input can also accept a list of temperatures representing conditions at different times or the direct output of dryBulbTemperature from the Import EPW component.
meanRadiantTemperature_: A number representing the mean radiant temperature of the surrounding surfaces in degrees Celcius. If no value is plugged in here, this component will assume that the mean radiant temperature is equal to air temperature value above. This input can also accept a list of temperatures representing conditions at different times or the direct output of dryBulbTemperature from the Import EPW component.
_prevailingOutdoorTemp: A number representing the average monthly outdoor temperature in degrees Celcius. This average monthly outdoor temperature is the temperature that occupants in naturally ventilated buildings tend to adapt themselves to. For this reason, this input can also accept the direct output of dryBulbTemperature from the Import EPW component if houlry values for the full year are connected for the other inputs of this component.
windSpeed_: A number representing the wind speed of the air in meters per second. If no value is plugged in here, this component will assume a very low wind speed of 0.3 m/s, characteristic of most naturally ventilated buildings. This input can also accept a list of wind speeds representing conditions at different times or the direct output of windSpeed from of the Import EPW component.
------------------------------: ...
comfortPar_: Optional comfort parameters from the "Ladybug_Adaptive Comfort Parameters" component. Use this to select either the US or European comfort model, set the threshold of acceptibility for comfort or compute prevailing outdoor temperature by a monthly average or running mean. These comfortPar can also be used to set a levelOfConditioning, which makes use of research outside of the official published standards that surveyed people in air conditioned buildings.
analysisPeriod_: An optional analysis period from the Analysis Period component. If no Analysis period is given and epw data from the ImportEPW component has been connected, the analysis will be run for the enitre year.
_runIt: Set to "True" to run the component and calculate the adaptive comfort metrics.
Returns:
readMe!: ...
------------------------------: ...
comfortableOrNot: A stream of 0's and 1's (or "False" and "True" values) indicating whether occupants are comfortable under the input conditions given the fact that these occupants tend to adapt themselves to the prevailing mean monthly temperature. 0 indicates that a person is not comfortable while 1 indicates that a person is comfortable.
conditionOfPerson: A stream of interger values from -1 to +1 that correspond to each hour of the input data and indicate the following: -1 = The input conditions are too cold for occupants. 0 = The input conditions are comfortable for occupants. +1 = The input conditions are too hot for occupants.
degreesFromTarget: A stream of temperature values in degrees Celcius indicating how far from the target temperature the conditions of the people are. Positive values indicate conditions hotter than the target temperature while negative values indicate degrees below the target temperture.
------------------------------: ...
targetTemperature: A stream of temperature values in degrees Celcius indicating the mean target temperture or neutral temperature that the most people will find comfortable.
upperTemperatureBound: A stream of temperature values in degrees Celcius indicating the highest possible temperature in the comfort range for each hour of the input conditions.
lowerTemperatureBound: A stream of temperature values in degrees Celcius indicating the lowest possible temperature in the comfort range for each hour of the input conditions.
------------------------------: ...
percentOfTimeComfortable: The percent of the input data for which the occupants are comfortable. Comfortable conditions are when the indoor temperature is within the comfort range determined by the prevailing outdoor temperature.
percentHotCold: A list of 2 numerical values indicating the following: 0) The percent of the input data for which the occupants are too hot. 1) The percent of the input data for which the occupants are too cold.
"""
ghenv.Component.Name = "Ladybug_Adaptive Comfort Calculator"
ghenv.Component.NickName = 'AdaptiveComfortCalculator'
ghenv.Component.Message = 'VER 0.0.61\nNOV_05_2015'
ghenv.Component.Category = "Ladybug"
ghenv.Component.SubCategory = "1 | AnalyzeWeatherData"
#compatibleLBVersion = VER 0.0.60\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "3"
except: pass
import Grasshopper.Kernel as gh
import math
import scriptcontext as sc
# Give people proper warning if they hook up data directly from the Import EPW component.
outdoorConditions = False
try:
if _dryBulbTemperature[2] == "Dry Bulb Temperature":
outdoorConditions = True
except: pass
try:
if meanRadiantTemperature_[2] == "Dry Bulb Temperature" or meanRadiantTemperature_[2] == "Solar-Adjusted Mean Radiant Temperature":
outdoorConditions = True
except: pass
try:
if windSpeed_[2] == "Wind Speed":
outdoorConditions = True
except: pass
if outdoorConditions == True:
message1 = "Because the adaptive comfort model is derived from indoor comfort studies and you have hooked up outdoor data, the values out of this component only indicate how much\n" + \
"the outdoor condtions should be changed in order to make indoor conditions comfortable. They do not idicate whether someone will actually be comfortable outdoors.\n" + \
"If you are interested in whether the outdoors are actually comfortable, you should use the Ladybug Outdoor Comfort Calculator."
print message1
m = gh.GH_RuntimeMessageLevel.Remark
ghenv.Component.AddRuntimeMessage(m, message1)
ghenv.Component.Attributes.Owner.OnPingDocument()
def checkTheInputs():
#Define a value that will indicate whether someone has hooked up epw data.
epwData = False
epwStr = []
epwPrevailTemp = False
epwPrevailStr = []
coldTimes = []
#Check to see if there are any comfortPar connected and, if not, set the defaults to ASHRAE.
checkData6 = True
ASHRAEorEN = True
comfClass = False
avgMonthOrRunMean = True
levelOfConditioning = 0
if comfortPar_ != []:
try:
ASHRAEorEN = comfortPar_[0]
comfClass = comfortPar_[1]
avgMonthOrRunMean = comfortPar_[2]
levelOfConditioning = comfortPar_[3]
except:
checkData6 = False
warning = 'The connected comfortPar_ are not valid comfort parameters from the "Ladybug_Adaptive Comfort Parameters" component.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
#Define a function to duplicate data
def duplicateData(data, calcLength):
dupData = []
for count in range(calcLength):
dupData.append(data[0])
return dupData
#Check lenth of the _dryBulbTemperature list and evaluate the contents.
checkData1 = False
airTemp = []
airMultVal = False
if len(_dryBulbTemperature) != 0:
try:
if "Temperature" in _dryBulbTemperature[2]:
airTemp = _dryBulbTemperature[7:]
checkData1 = True
epwData = True
epwStr = _dryBulbTemperature[0:7]
except: pass
if checkData1 == False:
for item in _dryBulbTemperature:
try:
airTemp.append(float(item))
checkData1 = True
except: checkData1 = False
if len(airTemp) > 1: airMultVal = True
if checkData1 == False:
warning = '_dryBulbTemperature input does not contain valid temperature values in degrees Celcius.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
print 'Connect a temperature in degrees celcius for _dryBulbTemperature'
#Check lenth of the meanRadiantTemperature_ list and evaluate the contents.
checkData2 = False
radTemp = []
radMultVal = False
if len(meanRadiantTemperature_) != 0:
try:
if "Temperature" in meanRadiantTemperature_[2]:
radTemp = meanRadiantTemperature_[7:]
checkData2 = True
epwData = True
epwStr = meanRadiantTemperature_[0:7]
except: pass
if checkData2 == False:
for item in meanRadiantTemperature_:
try:
radTemp.append(float(item))
checkData2 = True
except: checkData2 = False
if len(radTemp) > 1: radMultVal = True
if checkData2 == False:
warning = 'meanRadiantTemperature_ input does not contain valid temperature values in degrees Celcius.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
checkData2 = True
radTemp = airTemp
if len (radTemp) > 1: radMultVal = True
print 'No value connected for meanRadiantTemperature_. It will be assumed that the radiant temperature is the same as the air temperature.'
#Check lenth of the _prevailingOutdoorTemp list and evaluate the contents.
checkData3 = False
prevailTemp = []
prevailMultVal = False
if len(_prevailingOutdoorTemp) != 0:
try:
if _prevailingOutdoorTemp[2] == 'Dry Bulb Temperature' and _prevailingOutdoorTemp[3] == 'C' and _prevailingOutdoorTemp[4] == 'Hourly' and _prevailingOutdoorTemp[5] == (1, 1, 1) and _prevailingOutdoorTemp[6] == (12, 31, 24):
if avgMonthOrRunMean == True:
#Calculate the monthly average temperatures.
monthPrevailList = [float(sum(_prevailingOutdoorTemp[7:751])/744), float(sum(_prevailingOutdoorTemp[751:1423])/672), float(sum(_prevailingOutdoorTemp[1423:2167])/744), float(sum(_prevailingOutdoorTemp[2167:2887])/720), float(sum(_prevailingOutdoorTemp[2887:3631])/744), float(sum(_prevailingOutdoorTemp[3631:4351])/720), float(sum(_prevailingOutdoorTemp[4351:5095])/744), float(sum(_prevailingOutdoorTemp[5095:5839])/744), float(sum(_prevailingOutdoorTemp[5839:6559])/720), float(sum(_prevailingOutdoorTemp[6559:7303])/744), float(sum(_prevailingOutdoorTemp[7303:8023])/720), float(sum(_prevailingOutdoorTemp[8023:])/744)]
hoursInMonth = [744, 672, 744, 720, 744, 720, 744, 744, 720, 744, 720, 744]
for monthCount, monthPrevailTemp in enumerate(monthPrevailList):
prevailTemp.extend(duplicateData([monthPrevailTemp], hoursInMonth[monthCount]))
if monthPrevailTemp < 10: coldTimes.append(monthCount+1)
else:
#Calculate a running mean temperature.
alpha = 0.8
divisor = 1 + alpha + math.pow(alpha,2) + math.pow(alpha,3) + math.pow(alpha,4) + math.pow(alpha,5)
dividend = (sum(_prevailingOutdoorTemp[-24:-1] + [_prevailingOutdoorTemp[-1]])/24) + (alpha*(sum(_prevailingOutdoorTemp[-48:-24])/24)) + (math.pow(alpha,2)*(sum(_prevailingOutdoorTemp[-72:-48])/24)) + (math.pow(alpha,3)*(sum(_prevailingOutdoorTemp[-96:-72])/24)) + (math.pow(alpha,4)*(sum(_prevailingOutdoorTemp[-120:-96])/24)) + (math.pow(alpha,5)*(sum(_prevailingOutdoorTemp[-144:-120])/24))
startingTemp = dividend/divisor
if startingTemp < 10: coldTimes.append(0)
outdoorTemp = _prevailingOutdoorTemp[7:]
startingMean = sum(outdoorTemp[:24])/24
dailyRunMeans = [startingTemp]
dailyMeans = [startingMean]
prevailTemp.extend(duplicateData([startingTemp], 24))
startHour = 24
for count in range(364):
dailyMean = sum(outdoorTemp[startHour:startHour+24])/24
dailyRunMeanTemp = ((1-alpha)*dailyMeans[-1]) + alpha*dailyRunMeans[-1]
if dailyRunMeanTemp < 10: coldTimes.append(count+1)
prevailTemp.extend(duplicateData([dailyRunMeanTemp], 24))
dailyRunMeans.append(dailyRunMeanTemp)
dailyMeans.append(dailyMean)
startHour +=24
checkData3 = True
epwPrevailTemp = True
epwPrevailStr = _prevailingOutdoorTemp[0:7]
except: pass
if checkData3 == False:
checkData3 = True
for item in _prevailingOutdoorTemp:
try:
prevailTemp.append(float(item))
except: checkData3 = False
if len(prevailTemp) > 1: prevailMultVal = True
if checkData3 == False:
warning = '_prevailingOutdoorTemp input must either be the annual hourly dryBulbTemperature from the ImportEPW component, a list of temperature values that matches the length other inputs or a single temperature to be used for all cases.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
print 'Connect a temperature in degrees celcius for _prevailingOutdoorTemp'
#Check lenth of the windSpeed_ list and evaluate the contents.
checkData4 = False
windSpeed = []
windMultVal = False
nonPositive = True
if len(windSpeed_) != 0:
try:
if windSpeed_[2] == 'Wind Speed':
windSpeed = windSpeed_[7:]
checkData4 = True
epwData = True
if epwStr == []:
epwStr = windSpeed_[0:7]
except: pass
if checkData4 == False:
for item in windSpeed_:
try:
if float(item) >= 0:
windSpeed.append(float(item))
checkData4 = True
else: nonPositive = False
except: checkData4 = False
if nonPositive == False: checkData4 = False
if len(windSpeed) > 1: windMultVal = True
if checkData4 == False:
warning = 'windSpeed_ input does not contain valid wind speed in meters per second. Note that wind speed must be positive.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
checkData4 = True
windSpeed = [0.05]
print 'No value connected for windSpeed_. It will be assumed that the wind speed is a low 0.05 m/s.'
#Finally, for those lists of length greater than 1, check to make sure that they are all the same length.
checkData5 = False
if checkData1 == True and checkData2 == True and checkData3 == True and checkData4 == True:
if airMultVal == True or radMultVal == True or prevailMultVal == True or windMultVal == True:
listLenCheck = []
secondListLenCheck = []
if airMultVal == True:
listLenCheck.append(len(airTemp))
secondListLenCheck.append(len(airTemp))
if radMultVal == True:
listLenCheck.append(len(radTemp))
secondListLenCheck.append(len(radTemp))
if prevailMultVal == True: listLenCheck.append(len(prevailTemp))
if windMultVal == True:
listLenCheck.append(len(windSpeed))
secondListLenCheck.append(len(windSpeed))
if all(x == listLenCheck[0] for x in listLenCheck) == True:
checkData5 = True
calcLength = listLenCheck[0]
if airMultVal == False: airTemp = duplicateData(airTemp, calcLength)
if radMultVal == False: radTemp = duplicateData(radTemp, calcLength)
if prevailMultVal == False: prevailTemp = duplicateData(prevailTemp, calcLength)
if windMultVal == False: windSpeed = duplicateData(windSpeed, calcLength)
elif all(x == secondListLenCheck[0] for x in secondListLenCheck) == True and epwPrevailTemp == True and epwData == True and epwPrevailStr[5] == (1,1,1) and epwPrevailStr[6] == (12,31,24):
checkData5 = True
calcLength = listLenCheck[0]
if airMultVal == False: airTemp = duplicateData(airTemp, calcLength)
if radMultVal == False: radTemp = duplicateData(radTemp, calcLength)
if windMultVal == False: windSpeed = duplicateData(windSpeed, calcLength)
HOYs, mon, days = lb_preparation.getHOYsBasedOnPeriod([epwStr[5], epwStr[6]], 1)
newPrevailTemp = []
for hour in HOYs:
newPrevailTemp.append(prevailTemp[hour-1])
prevailTemp = newPrevailTemp
else:
calcLength = None
warning = 'If you have put in lists with multiple values, the lengths of these lists must match across the parameters or you have a single value for a given parameter to be applied to all values in the list.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
checkData5 = True
calcLength = 1
else:
calcLength = 0
#If all of the checkDatas have been good to go, let's give a final go ahead.
if checkData1 == True and checkData2 == True and checkData3 == True and checkData4 == True and checkData5 == True and checkData6 == True:
checkData = True
else:
checkData = False
#Let's return everything we need.
return checkData, epwData, epwStr, calcLength, airTemp, radTemp, prevailTemp, windSpeed, ASHRAEorEN, comfClass, avgMonthOrRunMean, coldTimes, levelOfConditioning
def main(checkData, epwData, epwStr, calcLength, airTemp, radTemp, prevailTemp, windSpeed, ASHRAEorEN, comfClass, avgMonthOrRunMean, coldTimes, levelOfConditioning, lb_preparation, lb_comfortModels):
#Check if there is an analysisPeriod_ connected and, if not, run it for the whole year.
individualCases = False
daysForMonths = lb_preparation.numOfDays
if calcLength == 8760 and len(analysisPeriod_)!=0 and epwData == True:
HOYS, months, days = lb_preparation.getHOYsBasedOnPeriod(analysisPeriod_, 1)
runPeriod = analysisPeriod_
calcLength = len(HOYS)
dayNums = []
for month in months:
if days[0] == 1 and days[-1] == 31: dayNums.extend(range(daysForMonths[month-1], daysForMonths[month]))
elif days[0] == 1 and days[-1] != 31: dayNums.extend(range(daysForMonths[month-1], daysForMonths[month-1]+days[-1]))
elif days[0] != 1 and days[-1] == 31: dayNums.extend(range(daysForMonths[month-1]+days[0], daysForMonths[month]))
else: dayNums.extend(range(daysForMonths[month-1]+days[0], daysForMonths[month-1]+days[-1]))
elif len(analysisPeriod_)==0 and epwData == True:
HOYS = range(calcLength)[1:] + [calcLength]
runPeriod = [epwStr[5], epwStr[6]]
months = [1,2,3,4,5,6,7,8,9,10,11,12]
dayNums = range(365)
else:
HOYS = range(calcLength)[1:] + [calcLength]
runPeriod = [(1,1,1), (12,31,24)]
months = []
days = []
individualCases = True
#Check to see if there are any times when the prevailing temperature is too cold and give a comment that we are using a non-standard model.
monthNames = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
if ASHRAEorEN == True: modelName = "ASHRAE 55"
else: modelName = "EN-15251"
if coldTimes != []:
coldThere = False
if avgMonthOrRunMean == True:
coldMsg = "The following months were too cold for the official " + modelName + " standard and have used a correlation from recent research:"
for month in months:
if month in coldTimes:
coldThere = True
coldMsg += '\n'
coldMsg += monthNames[month-1]
if coldThere == True:
print coldMsg
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Remark, coldMsg)
else:
totalColdInPeriod = []
for day in dayNums:
if day in coldTimes: totalColdInPeriod.append(day)
if totalColdInPeriod != []:
coldMsg = "There were " + str(len(totalColdInPeriod)) + " days of the analysis period when the outdoor temperatures were too cold for the official " + modelName + " standard. \n A correlation from recent research has been used in these cases."
print coldMsg
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Remark, coldMsg)
elif individualCases:
totalColdInPeriod = []
for temp in prevailTemp:
if temp < 10: totalColdInPeriod.append(temp)
if totalColdInPeriod != []:
coldMsg = "There were " + str(len(totalColdInPeriod)) + " cases when the prevailing outdoor temperatures were too cold for the official " + modelName + " standard. \n A correlation from recent research has been used in these cases."
print coldMsg
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Remark, coldMsg)
#If things are good, run it through the comfort model.
comfortableOrNot = []
extremeColdComfortableHot = []
upperTemperatureBound = []
lowerTemperatureBound = []
targetTemperature = []
degreesFromTarget = []
percentOfTimeComfortable = None
percentHotColdAndExtreme = []
if checkData == True and epwData == True and 'for' not in epwStr[2]:
targetTemperature.extend([epwStr[0], epwStr[1], 'Adaptive Target Temperature', 'C', epwStr[4], runPeriod[0], runPeriod[1]])
degreesFromTarget.extend([epwStr[0], epwStr[1], 'Degrees from Target Temperature', 'C', epwStr[4], runPeriod[0], runPeriod[1]])
comfortableOrNot.extend([epwStr[0], epwStr[1], 'Comfortable Or Not', 'Boolean', epwStr[4], runPeriod[0], runPeriod[1]])
extremeColdComfortableHot.extend([epwStr[0], epwStr[1], 'Adaptive Comfort', '-1 = Cold, 0 = Comfortable, 1 = Hot', epwStr[4], runPeriod[0], runPeriod[1]])
upperTemperatureBound.extend([epwStr[0], epwStr[1], 'Adaptive Upper Comfort Temperature', 'C', epwStr[4], runPeriod[0], runPeriod[1]])
lowerTemperatureBound.extend([epwStr[0], epwStr[1], 'Adaptive Lower Comfort Temperature', 'C', epwStr[4], runPeriod[0], runPeriod[1]])
elif checkData == True and epwData == True and 'for' in epwStr[2]:
targetTemperature.extend([epwStr[0], epwStr[1], 'Adaptive Target Temperature' + ' for ' + epwStr[2].split('for ')[-1], 'C', epwStr[4], runPeriod[0], runPeriod[1]])
degreesFromTarget.extend([epwStr[0], epwStr[1], 'Degrees from Target Temperature' + ' for ' + epwStr[2].split('for ')[-1], 'C', epwStr[4], runPeriod[0], runPeriod[1]])
comfortableOrNot.extend([epwStr[0], epwStr[1], 'Comfortable Or Not' + ' for ' + epwStr[2].split('for ')[-1], 'Boolean', epwStr[4], runPeriod[0], runPeriod[1]])
extremeColdComfortableHot.extend([epwStr[0], epwStr[1], 'Adaptive Comfort' + ' for ' + epwStr[2].split('for ')[-1], '-1 = Cold, 0 = Comfortable, 1 = Hot', epwStr[4], runPeriod[0], runPeriod[1]])
upperTemperatureBound.extend([epwStr[0], epwStr[1], 'Adaptive Upper Comfort Temperature' + ' for ' + epwStr[2].split('for ')[-1], 'C', epwStr[4], runPeriod[0], runPeriod[1]])
lowerTemperatureBound.extend([epwStr[0], epwStr[1], 'Adaptive Lower Comfort Temperature' + ' for ' + epwStr[2].split('for ')[-1], 'C', epwStr[4], runPeriod[0], runPeriod[1]])
if checkData == True:
try:
comfOrNot = []
extColdComfHot = []
upperTemp = []
lowerTemp = []
comfortTemp = []
degreesTarget = []
for count in HOYS:
# let the user cancel the process
if gh.GH_Document.IsEscapeKeyDown(): assert False
if ASHRAEorEN == True: comfTemp, distFromTarget, lowTemp, upTemp, comf, condition = lb_comfortModels.comfAdaptiveComfortASH55(airTemp[count-1], radTemp[count-1], prevailTemp[count-1], windSpeed[count-1], comfClass, levelOfConditioning)
else: comfTemp, distFromTarget, lowTemp, upTemp, comf, condition = lb_comfortModels.comfAdaptiveComfortEN15251(airTemp[count-1], radTemp[count-1], prevailTemp[count-1], windSpeed[count-1], comfClass, levelOfConditioning)
if comf == True:comfOrNot.append(1)
else: comfOrNot.append(0)
extColdComfHot.append(condition)
upperTemp.append(upTemp)
lowerTemp.append(lowTemp)
comfortTemp.append(comfTemp)
degreesTarget.append(distFromTarget)
percentOfTimeComfortable = [((sum(comfOrNot))/calcLength)*100]
extreme = []
hot = []
cold = []
for item in extColdComfHot:
if item == -1: cold.append(1.0)
elif item == 1: hot.append(1.0)
else: pass
percentHot = ((sum(hot))/calcLength)*100
percentCold = ((sum(cold))/calcLength)*100
percentHotCold = [percentHot, percentCold]
comfortableOrNot.extend(comfOrNot)
extremeColdComfortableHot.extend(extColdComfHot)
upperTemperatureBound.extend(upperTemp)
lowerTemperatureBound.extend(lowerTemp)
targetTemperature.extend(comfortTemp)
degreesFromTarget.extend(degreesTarget)
except:
comfortableOrNot = []
extremeColdComfortableHot = []
upperTemperatureBound = []
lowerTemperatureBound = []
targetTemperature = []
degreesFromTarget = []
percentOfTimeComfortable = []
percentHotCold = []
print "The calculation has been terminated by the user!"
e = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(e, "The calculation has been terminated by the user!")
#Return all of the info.
return comfortableOrNot, extremeColdComfortableHot, percentOfTimeComfortable, percentHotCold, upperTemperatureBound, lowerTemperatureBound, targetTemperature, degreesFromTarget
checkData = False
if sc.sticky.has_key('ladybug_release'):
try:
if not sc.sticky['ladybug_release'].isCompatible(ghenv.Component): pass
except:
warning = "You need a newer version of Ladybug to use this compoent." + \
"Use updateLadybug component to update userObjects.\n" + \
"If you have already updated userObjects drag Ladybug_Ladybug component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
lb_preparation = sc.sticky["ladybug_Preparation"]()
lb_comfortModels = sc.sticky["ladybug_ComfortModels"]()
#Check the inputs and organize the incoming data into streams that can be run throught the comfort model.
checkData, epwData, epwStr, calcLength, airTemp, radTemp, prevailTemp, windSpeed, ASHRAEorEN, comfClass, avgMonthOrRunMean, coldTimes, levelOfConditioning = checkTheInputs()
else:
print "You should first let the Ladybug fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should first let the Ladybug fly...")
if _runIt == True and checkData == True:
results = main(checkData, epwData, epwStr, calcLength, airTemp, radTemp, prevailTemp, windSpeed, ASHRAEorEN, comfClass, avgMonthOrRunMean, coldTimes, levelOfConditioning, lb_preparation, lb_comfortModels)
if results!=-1:
comfortableOrNot, conditionOfPerson, percentOfTimeComfortable, \
percentHotCold, upperTemperatureBound, lowerTemperatureBound, targetTemperature, degreesFromTarget = results
|
boris-p/ladybug
|
src/Ladybug_Adaptive Comfort Calculator.py
|
Python
|
gpl-3.0
| 31,814
|
[
"EPW"
] |
10db9c92ab0e46e93daf45740bd57146353c978c6c6441dc18f44c87c0a9b4ff
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""usage: blobtools create -i FASTA [-y FASTATYPE] [-o PREFIX] [--title TITLE]
[-b BAM...] [-C] [-a CAS...] [-c COV...]
[--nodes <NODES>] [--names <NAMES>] [--db <NODESDB>]
[-t HITS...] [-x TAXRULE...] [-m FLOAT] [-d FLOAT] [--tax_collision_random]
[-h|--help]
Options:
-h --help show this
-i, --infile FASTA FASTA file of assembly. Headers are split at whitespaces.
-y, --type FASTATYPE Assembly program used to create FASTA. If specified,
coverage will be parsed from FASTA header.
(Parsing supported for 'spades', 'velvet', 'platanus')
-t, --hitsfile HITS... Hits file in format (qseqid\\ttaxid\\tbitscore)
(e.g. BLAST output "--outfmt '6 qseqid staxids bitscore'")
Can be specified multiple times
-x, --taxrule <TAXRULE>... Taxrule determines how taxonomy of blobs
is computed (by default both are calculated)
"bestsum" : sum bitscore across all
hits for each taxonomic rank
"bestsumorder" : sum bitscore across all
hits for each taxonomic rank.
- If first <TAX> file supplies hits, bestsum is calculated.
- If no hit is found, the next <TAX> file is used.
-m, --min_score <FLOAT> Minimal score necessary to be considered for taxonomy calculaton, otherwise set to 'no-hit'
[default: 0.0]
-d, --min_diff <FLOAT> Minimal score difference between highest scoring
taxonomies (otherwise "unresolved") [default: 0.0]
--tax_collision_random Random allocation of taxonomy if highest scoring
taxonomies have equal scores (otherwise "unresolved") [default: False]
--nodes <NODES> NCBI nodes.dmp file. Not required if '--db'
--names <NAMES> NCBI names.dmp file. Not required if '--db'
--db <NODESDB> NodesDB file (default: $BLOBTOOLS/data/nodesDB.txt). If --nodes, --names and --db
are all given and NODESDB does not exist, create it from NODES and NAMES.
-b, --bam <BAM>... BAM file(s), can be specified multiple times
-a, --cas <CAS>... CAS file(s) (requires clc_mapping_info in $PATH), can be specified multiple times
-c, --cov <COV>... COV file(s), can be specified multiple times
-C, --calculate_cov Legacy coverage when getting coverage from BAM (does not apply to COV parsing).
New default is to estimate coverages which is faster,
-o, --out <PREFIX> BlobDB output prefix
--title TITLE Title of BlobDB [default: output prefix)
"""
from __future__ import division
from docopt import docopt
from os.path import join, dirname, abspath
import lib.BtCore as BtCore
import lib.BtLog as BtLog
import lib.BtIO as BtIO
import lib.interface as interface
def main():
#main_dir = dirname(__file__)
args = docopt(__doc__)
fasta_f = args['--infile']
fasta_type = args['--type']
bam_fs = args['--bam']
cov_fs = args['--cov']
cas_fs = args['--cas']
hit_fs = args['--hitsfile']
prefix = args['--out']
nodesDB_f = args['--db']
names_f = args['--names']
estimate_cov_flag = True if not args['--calculate_cov'] else False
nodes_f = args['--nodes']
taxrules = args['--taxrule']
try:
min_bitscore_diff = float(args['--min_diff'])
min_score = float(args['--min_score'])
except ValueError():
BtLog.error('45')
tax_collision_random = args['--tax_collision_random']
title = args['--title']
# outfile
out_f = BtIO.getOutFile("blobDB", prefix, "json")
if not (title):
title = out_f
# coverage
if not (fasta_type) and not bam_fs and not cov_fs and not cas_fs:
BtLog.error('1')
cov_libs = [BtCore.CovLibObj('bam' + str(idx), 'bam', lib_f) for idx, lib_f in enumerate(bam_fs)] + \
[BtCore.CovLibObj('cas' + str(idx), 'cas', lib_f) for idx, lib_f in enumerate(cas_fs)] + \
[BtCore.CovLibObj('cov' + str(idx), 'cov', lib_f) for idx, lib_f in enumerate(cov_fs)]
# taxonomy
hit_libs = [BtCore.HitLibObj('tax' + str(idx), 'tax', lib_f) for idx, lib_f in enumerate(hit_fs)]
# Create BlobDB object
blobDb = BtCore.BlobDb(title)
blobDb.version = interface.__version__
# Parse FASTA
blobDb.parseFasta(fasta_f, fasta_type)
# Parse nodesDB OR names.dmp, nodes.dmp
nodesDB_default = join(dirname(abspath(__file__)), "../data/nodesDB.txt")
nodesDB, nodesDB_f = BtIO.parseNodesDB(nodes=nodes_f, names=names_f, nodesDB=nodesDB_f, nodesDBdefault=nodesDB_default)
blobDb.nodesDB_f = nodesDB_f
# Parse similarity hits
if (hit_libs):
blobDb.parseHits(hit_libs)
if not taxrules:
if len(hit_libs) > 1:
taxrules = ['bestsum', 'bestsumorder']
else:
taxrules = ['bestsum']
blobDb.computeTaxonomy(taxrules, nodesDB, min_score, min_bitscore_diff, tax_collision_random)
else:
print(BtLog.warn_d['0'])
# Parse coverage
blobDb.parseCoverage(covLibObjs=cov_libs, estimate_cov=estimate_cov_flag, prefix=prefix)
# Generating BlobDB and writing to file
print(BtLog.status_d['7'] % out_f)
BtIO.writeJson(blobDb.dump(), out_f)
if __name__ == '__main__':
main()
|
DRL/blobtools
|
lib/create.py
|
Python
|
gpl-3.0
| 6,174
|
[
"BLAST"
] |
89a3c38eb64c739ad9d45bf0c210986461e1944a2e16c1e66ae0a3a11dac9356
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRsamtools(RPackage):
"""Binary alignment (BAM), FASTA, variant call (BCF), and tabix file
import
This package provides an interface to the 'samtools', 'bcftools', and
'tabix' utilities for manipulating SAM (Sequence Alignment / Map),
FASTA, binary variant call (BCF) and compressed indexed tab-delimited
(tabix) files."""
homepage = "https://bioconductor.org/packages/Rsamtools"
git = "https://git.bioconductor.org/packages/Rsamtools.git"
version('2.6.0', commit='f2aea061517c5a55e314c039251ece9831c7fad2')
version('2.2.1', commit='f10084658b4c9744961fcacd79c0ae9a7a40cd30')
version('2.0.3', commit='17d254cc026574d20db67474260944bf60befd70')
version('1.34.1', commit='0ec1d45c7a14b51d019c3e20c4aa87c6bd2b0d0c')
version('1.32.3', commit='0aa3f134143b045aa423894de81912becf64e4c2')
version('1.30.0', commit='61b365fe3762e796b3808cec7238944b7f68d7a6')
version('1.28.0', commit='dfa5b6abef68175586f21add7927174786412472')
depends_on('r-genomeinfodb@1.1.3:', type=('build', 'run'))
depends_on('r-genomicranges@1.21.6:', type=('build', 'run'))
depends_on('r-genomicranges@1.31.8:', when='@1.32.3:', type=('build', 'run'))
depends_on('r-biostrings@2.37.1:', type=('build', 'run'))
depends_on('r-biostrings@2.47.6:', when='@1.32.3:', type=('build', 'run'))
depends_on('r-biocgenerics@0.1.3:', type=('build', 'run'))
depends_on('r-biocgenerics@0.25.1:', when='@1.32.3:', type=('build', 'run'))
depends_on('r-s4vectors@0.13.8:', type=('build', 'run'))
depends_on('r-s4vectors@0.17.25:', when='@1.32.3:', type=('build', 'run'))
depends_on('r-iranges@2.3.7:', type=('build', 'run'))
depends_on('r-iranges@2.13.12:', when='@1.32.3:', type=('build', 'run'))
depends_on('r-xvector@0.15.1:', type=('build', 'run'))
depends_on('r-xvector@0.19.7:', when='@1.32.3:', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
depends_on('r-bitops', type=('build', 'run'))
depends_on('r-biocparallel', type=('build', 'run'))
depends_on('r-rhtslib@1.16.3', when='@2.0.3', type=('build', 'run'))
depends_on('r-rhtslib@1.17.7:', when='@2.2.1:', type=('build', 'run'))
depends_on('gmake', type='build')
# this is not a listed dependency but is needed
depends_on('curl')
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-rsamtools/package.py
|
Python
|
lgpl-2.1
| 2,555
|
[
"Bioconductor"
] |
a8c44785429c638a084c161e480bef4a0c970f2df9fc3419dd4cf65cf354701d
|
import sys, Image
from numpy import *
import scipy.ndimage
def score_from_autocorr(img0, img1, corres):
# Code by Philippe Weinzaepfel
# Compute autocorrelation
# parameters
sigma_image = 0.8 # for the gaussian filter applied to images before computing derivatives
sigma_matrix = 3.0 # for the integration gaussian filter
derivfilter = array([-0.5,0,0.5]) # function to compute the derivatives
# smooth_images
tmp = scipy.ndimage.filters.gaussian_filter1d(img0.astype(float32), sigma_image, axis=0, order=0, mode='nearest')
img0_smooth = scipy.ndimage.filters.gaussian_filter1d(tmp, sigma_image, axis=1, order=0, mode='nearest')
# compute the derivatives
img0_dx = scipy.ndimage.filters.convolve1d(img0_smooth, derivfilter, axis=0, mode='nearest')
img0_dy = scipy.ndimage.filters.convolve1d(img0_smooth, derivfilter, axis=1, mode='nearest')
# compute the auto correlation matrix
dx2 = sum(img0_dx*img0_dx,axis=2)
dxy = sum(img0_dx*img0_dy,axis=2)
dy2 = sum(img0_dy*img0_dy,axis=2)
# integrate it
tmp = scipy.ndimage.filters.gaussian_filter1d(dx2, sigma_matrix, axis=0, order=0, mode='nearest')
dx2_smooth = scipy.ndimage.filters.gaussian_filter1d(tmp, sigma_matrix, axis=1, order=0, mode='nearest')
tmp = scipy.ndimage.filters.gaussian_filter1d(dxy, sigma_matrix, axis=0, order=0, mode='nearest')
dxy_smooth = scipy.ndimage.filters.gaussian_filter1d(tmp, sigma_matrix, axis=1, order=0, mode='nearest')
tmp = scipy.ndimage.filters.gaussian_filter1d(dy2, sigma_matrix, axis=0, order=0, mode='nearest')
dy2_smooth = scipy.ndimage.filters.gaussian_filter1d(tmp, sigma_matrix, axis=1, order=0, mode='nearest')
# compute minimal eigenvalues: it is done by computing (dx2+dy2)/2 - sqrt( ((dx2+dy2)/2)^2 + (dxy)^2 - dx^2*dy^2)
tmp = 0.5*(dx2_smooth+dy2_smooth)
small_eigen = tmp - sqrt( maximum(0,tmp*tmp + dxy_smooth*dxy_smooth - dx2_smooth*dy2_smooth)) # the numbers can be negative in practice due to rounding errors
large_eigen = tmp + sqrt( maximum(0,tmp*tmp + dxy_smooth*dxy_smooth - dx2_smooth*dy2_smooth))
# Compute weight as flow score: preparing variable
#parameters
sigma_image = 0.8 # gaussian applied to images
derivfilter = array([1.0,-8.0,0.0,8.0,-1.0])/12.0 # filter to compute the derivatives
sigma_score = 50.0 # gaussian to convert dist to score
mul_coef = 10.0 # multiplicative coefficients
# smooth images
tmp = scipy.ndimage.filters.gaussian_filter1d(img0.astype(float32), sigma_image, axis=0, order=0, mode='nearest')
img0_smooth = scipy.ndimage.filters.gaussian_filter1d(tmp, sigma_image, axis=1, order=0, mode='nearest')
tmp = scipy.ndimage.filters.gaussian_filter1d(img1.astype(float32), sigma_image, axis=0, order=0, mode='nearest')
img1_smooth = scipy.ndimage.filters.gaussian_filter1d(tmp, sigma_image, axis=1, order=0, mode='nearest')
# compute derivatives
img0_dx = scipy.ndimage.filters.convolve1d(img0_smooth, derivfilter, axis=0, mode='nearest')
img0_dy = scipy.ndimage.filters.convolve1d(img0_smooth, derivfilter, axis=1, mode='nearest')
img1_dx = scipy.ndimage.filters.convolve1d(img1_smooth, derivfilter, axis=0, mode='nearest')
img1_dy = scipy.ndimage.filters.convolve1d(img1_smooth, derivfilter, axis=1, mode='nearest')
# compute it
res = []
for pos0, pos1, score in corres:
p0, p1 = tuple(pos0)[::-1], tuple(pos1)[::-1] # numpy coordinates
dist = sum( abs(img0_smooth[p0]-img1_smooth[p1]) + abs(img0_dx[p0]-img1_dx[p1]) + abs(img0_dy[p0]-img1_dy[p1]) )
score = mul_coef * sqrt( max(0,small_eigen[p0])) / (sigma_score*sqrt(2*pi))*exp(-0.5*square(dist/sigma_score));
res.append((pos0,pos1,score))
return res
if __name__=='__main__':
args = sys.argv[1:]
img0 = array(Image.open(args[0]).convert('RGB'))
img1 = array(Image.open(args[1]).convert('RGB'))
out = open(args[2]) if len(args)>=3 else sys.stdout
ty0, tx0 = img0.shape[:2]
ty1, tx1 = img1.shape[:2]
rint = lambda s: int(0.5+float(s))
retained_matches = []
for line in sys.stdin:
line = line.split()
if not line or len(line)!=6 or not line[0][0].isdigit(): continue
x0, y0, x1, y1, score, index = line
retained_matches.append(((min(tx0-1,rint(x0)),min(ty0-1,rint(y0))),
(min(tx1-1,rint(x1)),min(ty1-1,rint(y1))),0))
assert retained_matches, 'error: no matches piped to this program'
for p0, p1, score in score_from_autocorr(img0, img1, retained_matches):
print >>out, '%d %d %d %d %f' %(p0[0],p0[1],p1[0],p1[1],score)
|
CansenJIANG/deepMatchingGUI
|
src/rescore.py
|
Python
|
mit
| 4,539
|
[
"Gaussian"
] |
da80fd4c1f37e6e8b7066364cbf86a780292b4c8e935cedb0a910a3945e68774
|
# coding: utf-8
__author__ = 'czhou <czhou@ilegendsoft.com>'
mime_types = {
"ai": "application/postscript",
"aif": "audio/x-aiff",
"aifc": "audio/x-aiff",
"aiff": "audio/x-aiff",
"asc": "text/plain",
"atom": "application/atom+xml",
"au": "audio/basic",
"avi": "video/x-msvideo",
"bcpio": "application/x-bcpio",
"bin": "application/octet-stream",
"bmp": "image/bmp",
"cdf": "application/x-netcdf",
"cgm": "image/cgm",
"class": "application/octet-stream",
"cpio": "application/x-cpio",
"cpt": "application/mac-compactpro",
"csh": "application/x-csh",
"css": "text/css",
"dcr": "application/x-director",
"dif": "video/x-dv",
"dir": "application/x-director",
"djv": "image/vnd.djvu",
"djvu": "image/vnd.djvu",
"dll": "application/octet-stream",
"dmg": "application/octet-stream",
"dms": "application/octet-stream",
"doc": "application/msword",
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml." +
"document",
"dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml." +
"template",
"docm": "application/vnd.ms-word.document.macroEnabled.12",
"dotm": "application/vnd.ms-word.template.macroEnabled.12",
"dtd": "application/xml-dtd",
"dv": "video/x-dv",
"dvi": "application/x-dvi",
"dxr": "application/x-director",
"eps": "application/postscript",
"etx": "text/x-setext",
"exe": "application/octet-stream",
"ez": "application/andrew-inset",
"gif": "image/gif",
"gram": "application/srgs",
"grxml": "application/srgs+xml",
"gtar": "application/x-gtar",
"hdf": "application/x-hdf",
"hqx": "application/mac-binhex40",
"htm": "text/html",
"html": "text/html",
"ice": "x-conference/x-cooltalk",
"ico": "image/x-icon",
"ics": "text/calendar",
"ief": "image/ief",
"ifb": "text/calendar",
"iges": "model/iges",
"igs": "model/iges",
"jnlp": "application/x-java-jnlp-file",
"jp2": "image/jp2",
"jpe": "image/jpeg",
"jpeg": "image/jpeg",
"jpg": "image/jpeg",
"js": "application/x-javascript",
"kar": "audio/midi",
"latex": "application/x-latex",
"lha": "application/octet-stream",
"lzh": "application/octet-stream",
"m3u": "audio/x-mpegurl",
"m4a": "audio/mp4a-latm",
"m4b": "audio/mp4a-latm",
"m4p": "audio/mp4a-latm",
"m4u": "video/vnd.mpegurl",
"m4v": "video/x-m4v",
"mac": "image/x-macpaint",
"man": "application/x-troff-man",
"mathml": "application/mathml+xml",
"me": "application/x-troff-me",
"mesh": "model/mesh",
"mid": "audio/midi",
"midi": "audio/midi",
"mif": "application/vnd.mif",
"mov": "video/quicktime",
"movie": "video/x-sgi-movie",
"mp2": "audio/mpeg",
"mp3": "audio/mpeg",
"mp4": "video/mp4",
"mpe": "video/mpeg",
"mpeg": "video/mpeg",
"mpg": "video/mpeg",
"mpga": "audio/mpeg",
"ms": "application/x-troff-ms",
"msh": "model/mesh",
"mxu": "video/vnd.mpegurl",
"nc": "application/x-netcdf",
"oda": "application/oda",
"ogg": "application/ogg",
"pbm": "image/x-portable-bitmap",
"pct": "image/pict",
"pdb": "chemical/x-pdb",
"pdf": "application/pdf",
"pgm": "image/x-portable-graymap",
"pgn": "application/x-chess-pgn",
"pic": "image/pict",
"pict": "image/pict",
"png": "image/png",
"pnm": "image/x-portable-anymap",
"pnt": "image/x-macpaint",
"pntg": "image/x-macpaint",
"ppm": "image/x-portable-pixmap",
"ppt": "application/vnd.ms-powerpoint",
"pptx": "application/vnd.openxmlformats-officedocument.presentationml." +
"presentation",
"potx": "application/vnd.openxmlformats-officedocument.presentationml." +
"template",
"ppsx": "application/vnd.openxmlformats-officedocument.presentationml." +
"slideshow",
"ppam": "application/vnd.ms-powerpoint.addin.macroEnabled.12",
"pptm": "application/vnd.ms-powerpoint.presentation.macroEnabled.12",
"potm": "application/vnd.ms-powerpoint.template.macroEnabled.12",
"ppsm": "application/vnd.ms-powerpoint.slideshow.macroEnabled.12",
"ps": "application/postscript",
"qt": "video/quicktime",
"qti": "image/x-quicktime",
"qtif": "image/x-quicktime",
"ra": "audio/x-pn-realaudio",
"ram": "audio/x-pn-realaudio",
"ras": "image/x-cmu-raster",
"rdf": "application/rdf+xml",
"rgb": "image/x-rgb",
"rm": "application/vnd.rn-realmedia",
"roff": "application/x-troff",
"rtf": "text/rtf",
"rtx": "text/richtext",
"sgm": "text/sgml",
"sgml": "text/sgml",
"sh": "application/x-sh",
"shar": "application/x-shar",
"silo": "model/mesh",
"sit": "application/x-stuffit",
"skd": "application/x-koan",
"skm": "application/x-koan",
"skp": "application/x-koan",
"skt": "application/x-koan",
"smi": "application/smil",
"smil": "application/smil",
"snd": "audio/basic",
"so": "application/octet-stream",
"spl": "application/x-futuresplash",
"src": "application/x-wais-source",
"sv4cpio": "application/x-sv4cpio",
"sv4crc": "application/x-sv4crc",
"svg": "image/svg+xml",
"swf": "application/x-shockwave-flash",
"t": "application/x-troff",
"tar": "application/x-tar",
"tcl": "application/x-tcl",
"tex": "application/x-tex",
"texi": "application/x-texinfo",
"texinfo": "application/x-texinfo",
"tif": "image/tiff",
"tiff": "image/tiff",
"tr": "application/x-troff",
"tsv": "text/tab-separated-values",
"txt": "text/plain",
"ustar": "application/x-ustar",
"vcd": "application/x-cdlink",
"vrml": "model/vrml",
"vxml": "application/voicexml+xml",
"wav": "audio/x-wav",
"wbmp": "image/vnd.wap.wbmp",
"wbmxl": "application/vnd.wap.wbxml",
"wml": "text/vnd.wap.wml",
"wmlc": "application/vnd.wap.wmlc",
"wmls": "text/vnd.wap.wmlscript",
"wmlsc": "application/vnd.wap.wmlscriptc",
"wrl": "model/vrml",
"xbm": "image/x-xbitmap",
"xht": "application/xhtml+xml",
"xhtml": "application/xhtml+xml",
"xls": "application/vnd.ms-excel",
"xml": "application/xml",
"xpm": "image/x-xpixmap",
"xsl": "application/xml",
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml." +
"template",
"xlsm": "application/vnd.ms-excel.sheet.macroEnabled.12",
"xltm": "application/vnd.ms-excel.template.macroEnabled.12",
"xlam": "application/vnd.ms-excel.addin.macroEnabled.12",
"xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12",
"xslt": "application/xslt+xml",
"xul": "application/vnd.mozilla.xul+xml",
"xwd": "image/x-xwindowdump",
"xyz": "chemical/x-xyz",
"zip": "application/zip"
}
|
MaxLeap/SDK-CloudCode-Python
|
ML/mime_type.py
|
Python
|
cc0-1.0
| 6,931
|
[
"NetCDF"
] |
06df4a44cfae3e3afad6789ac19e5295eb4cf4158c0803a7a460fa7b99118d2e
|
# coding=utf-8
import json
import logging
import os
import datetime
from collections import OrderedDict
import pytz
import shutil
from PyQt4.QtCore import QObject, QFileInfo, QUrl, Qt
from PyQt4.QtXml import QDomDocument
from qgis.core import (
QgsProject,
QgsCoordinateReferenceSystem,
QgsMapLayerRegistry,
QgsRasterLayer,
QgsComposition,
QgsPoint,
QgsRectangle)
from jinja2 import Template
from headless.tasks.utilities import download_file
from realtime.exceptions import MapComposerError
from realtime.utilities import realtime_logger_name
from safe.common.exceptions import ZeroImpactException, KeywordNotFoundError
from safe.common.utilities import format_int
from safe.impact_functions.core import population_rounding
from safe.impact_functions.impact_function_manager import \
ImpactFunctionManager
from safe.test.utilities import get_qgis_app
from safe.utilities.clipper import clip_layer
from safe.utilities.gis import get_wgs84_resolution
from safe.utilities.keyword_io import KeywordIO
from safe.utilities.styling import set_vector_categorized_style, \
set_vector_graduated_style, setRasterStyle
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
from safe.common.version import get_version
from safe.storage.core import read_qgis_layer
__author__ = 'Rizky Maulana Nugraha <lana.pcfre@gmail.com>'
__date__ = '7/13/16'
LOGGER = logging.getLogger(realtime_logger_name())
class AshEvent(QObject):
def __init__(
self,
event_time=None,
volcano_name=None,
volcano_location=None,
eruption_height=None,
region=None,
alert_level=None,
locale=None,
working_dir=None,
hazard_path=None,
overview_path=None,
highlight_base_path=None,
population_path=None,
volcano_path=None,
landcover_path=None,
cities_path=None,
airport_path=None):
"""
:param event_time:
:param volcano_name:
:param volcano_location:
:param eruption_height:
:param region:
:param alert_level:
:param locale:
:param working_dir:
:param hazard_path: It can be a url or local file path
:param population_path:
:param landcover_path:
:param cities_path:
:param airport_path:
"""
QObject.__init__(self)
if event_time:
self.time = event_time
else:
self.time = datetime.datetime.now().replace(tzinfo=pytz.timezone('Asia/Jakarta'))
# Check timezone awareness
if not self.time.tzinfo:
raise Exception('Need timezone aware object for event time')
self.volcano_name = volcano_name
self.volcano_location = volcano_location
if self.volcano_location:
self.longitude = self.volcano_location[0]
self.latitude = self.volcano_location[1]
else:
self.longitude = None
self.latitude = None
self.erupction_height = eruption_height
self.region = region
self.alert_level = alert_level
self.locale = locale
if not self.locale:
self.locale = 'en'
if not working_dir:
raise Exception("Working directory can't be empty")
self.working_dir = working_dir
if not os.path.exists(self.working_dir_path()):
os.makedirs(self.working_dir_path())
# save hazard layer
self.hazard_path = self.working_dir_path('hazard.tif')
self.save_hazard_layer(hazard_path)
if not os.path.exists(self.hazard_path):
IOError("Hazard path doesn't exists")
self.population_html_path = self.working_dir_path('population-table.html')
self.nearby_html_path = self.working_dir_path('nearby-table.html')
self.landcover_html_path = self.working_dir_path('landcover-table.html')
self.map_report_path = self.working_dir_path('report.pdf')
self.project_path = self.working_dir_path('project.qgs')
self.impact_exists = None
self.locale = 'en'
self.population_path = population_path
self.cities_path = cities_path
self.airport_path = airport_path
self.landcover_path = landcover_path
self.volcano_path = volcano_path
self.highlight_base_path = highlight_base_path
self.overview_path = overview_path
# load layers
self.hazard_layer = read_qgis_layer(self.hazard_path, 'Ash Fall')
self.population_layer = read_qgis_layer(
self.population_path, 'Population')
self.landcover_layer = read_qgis_layer(
self.landcover_path, 'Landcover')
self.cities_layer = read_qgis_layer(
self.cities_path, 'Cities')
self.airport_layer = read_qgis_layer(
self.airport_path, 'Airport')
self.volcano_layer = read_qgis_layer(
self.volcano_path, 'Volcano')
self.highlight_base_layer = read_qgis_layer(
self.highlight_base_path, 'Base Map')
self.overview_layer = read_qgis_layer(
self.overview_path, 'Overview')
# Write metadata for self reference
self.write_metadata()
def save_hazard_layer(self, hazard_path):
# download or copy hazard path/url
# It is a single tif file
if not hazard_path and not os.path.exists(self.hazard_path):
raise IOError('Hazard file not specified')
if hazard_path:
temp_hazard = download_file(hazard_path)
shutil.copy(temp_hazard, self.hazard_path)
# copy qml and metadata
shutil.copy(
self.ash_fixtures_dir('hazard.qml'),
self.working_dir_path('hazard.qml'))
keyword_io = KeywordIO()
keywords = {
'hazard_category': u'single_event',
'keyword_version': u'3.5',
'title': u'Ash Fall',
'hazard': u'volcanic_ash',
'continuous_hazard_unit': u'centimetres',
'layer_geometry': u'raster',
'layer_purpose': u'hazard',
'layer_mode': u'continuous'
}
hazard_layer = read_qgis_layer(self.hazard_path, 'Ash Fall')
keyword_io.write_keywords(hazard_layer, keywords)
def write_metadata(self):
"""Write metadata file for this event folder
write metadata
example metadata json:
{
'volcano_name': 'Sinabung',
'volcano_location': [107, 6],
'alert_level': 'Siaga',
'eruption_height': 7000, # eruption height in meters
'event_time': '2016-07-20 11:22:33 +0700',
'region': 'North Sumatra'
}
:return:
"""
dateformat = '%Y-%m-%d %H:%M:%S %z'
metadata_dict = {
'volcano_name': self.volcano_name,
'volcano_location': self.volcano_location,
'alert_level': self.alert_level,
'eruption_height': self.erupction_height,
'event_time': self.time.strftime(dateformat),
'region': self.region
}
with open(self.working_dir_path('metadata.json'), 'w') as f:
f.write(json.dumps(metadata_dict))
def working_dir_path(self, path=''):
dateformat = '%Y%m%d%H%M%S'
timestring = self.time.strftime(dateformat)
event_folder = '%s-%s' % (timestring, self.volcano_name)
return os.path.join(self.working_dir, event_folder, path)
def event_dict(self):
tz = pytz.timezone('Asia/Jakarta')
timestamp = self.time.astimezone(tz=tz)
time_format = '%-d-%b-%Y %H:%M:%S'
timestamp_string = timestamp.strftime(time_format)
point = QgsPoint(
self.longitude,
self.latitude)
coordinates = point.toDegreesMinutesSeconds(2)
tokens = coordinates.split(',')
longitude_string = tokens[0]
latitude_string = tokens[1]
elapsed_time = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) - self.time
elapsed_hour = elapsed_time.seconds/3600
elapsed_minute = (elapsed_time.seconds/60) % 60
event = {
'report-title': self.tr('Volcanic Ash Impact'),
'report-timestamp': self.tr('Volcano: %s, Alert Level: %s %s') % (
self.volcano_name,
self.alert_level, timestamp_string),
'report-province': self.tr('Province: %s') % (self.region,),
'report-location': self.tr(
'Longitude %s Latitude %s;'
' Eruption Column Height (a.s.l) - %d m') % (
longitude_string, latitude_string, self.erupction_height),
'report-elapsed': self.tr('Elapsed time since event %s hour(s) and %s minute(s)') % (elapsed_hour, elapsed_minute),
'header-impact-table': self.tr('Potential impact at each fallout level'),
'header-nearby-table': self.tr('Nearby places'),
'header-landcover-table': self.tr('Land Cover Impact'),
'content-disclaimer': self.tr(
'The impact estimation is automatically generated and only '
'takes into account the population, cities and land cover '
'affected by different levels of volcanic ash fallout at '
'surface level. The estimate is based on volcanic ash '
'fallout data from Badan Geologi, population count data '
'derived by DMInnovation from worldpop.org.uk, place '
'information from geonames.org, land cover classification '
'data provided by Indonesian Geospatial Portal at '
'http://portal.ina-sdi.or.id and software developed by BNPB. '
'Limitation in the estimates of surface fallout, population '
'and place names datasets may result in significant '
'misrepresentation of the on-the-surface situation in the '
'figures shown here. Consequently decisions should not be '
'made soley on the information presented here and should '
'always be verified by ground truthing and other reliable '
'information sources.'
),
'content-notes': self.tr(
'This report was created using InaSAFE version %s. Visit '
'http://inasafe.org for more information. ') % get_version()
}
return event
@classmethod
def ash_fixtures_dir(cls, fixtures_path=None):
dirpath = os.path.dirname(__file__)
path = os.path.join(dirpath, 'fixtures')
if fixtures_path:
return os.path.join(path, fixtures_path)
return path
def render_population_table(self):
with open(self.working_dir_path('population_impact.json')) as f:
population_impact_data = json.loads(f.read())
impact_summary = population_impact_data['impact summary']['fields']
key_mapping = {
'Population in very low hazard zone': 'very_low',
'Population in medium hazard zone': 'medium',
'Population in high hazard zone': 'high',
'Population in very high hazard zone': 'very_high',
'Population in low hazard zone': 'low'
}
population_dict = {}
for val in impact_summary:
if val[0] in key_mapping:
population_dict[key_mapping[val[0]]] = val[1]
for key, val in key_mapping.iteritems():
if val not in population_dict:
population_dict[val] = 0
else:
# divide per 1000 people (unit used in the report)
population_dict[val] /= 1000
population_dict[val] = format_int(
population_rounding(population_dict[val]))
# format:
# {
# 'very_low': 1,
# 'low': 2,
# 'medium': 3,
# 'high': 4,
# 'very_high': 5
# }
population_template = self.ash_fixtures_dir(
'population-table.template.html')
with open(population_template) as f:
template = Template(f.read())
html_string = template.render(**population_dict)
with open(self.population_html_path, 'w') as f:
f.write(html_string)
def render_landcover_table(self):
with open(self.working_dir_path('landcover_impact.json')) as f:
landcover_impact_data = json.loads(f.read())
landcover_dict = OrderedDict()
for entry in landcover_impact_data['impact table']['data']:
land_type = entry[0]
area = entry[3]
# convert from ha to km^2
area /= 100
if land_type in landcover_dict:
landcover_dict[land_type] += area
else:
landcover_dict[land_type] = area
# format:
# landcover_list =
# [
# {
# 'type': 'settlement',
# 'area': 1000
# },
# {
# 'type': 'rice field',
# 'area': 10
# },
# ]
landcover_list = []
for land_type, area in landcover_dict.iteritems():
if not land_type.lower() == 'other':
landcover_list.append({
'type': land_type,
'area': format_int(int(area))
})
landcover_list.sort(key=lambda x: x['area'], reverse=True)
landcover_template = self.ash_fixtures_dir(
'landcover-table.template.html')
with open(landcover_template) as f:
template = Template(f.read())
# generate table here
html_string = template.render(landcover_list=landcover_list)
with open(self.landcover_html_path, 'w') as f:
f.write(html_string)
def render_nearby_table(self):
hazard_mapping = {
0: 'Very Low',
1: 'Low',
2: 'Moderate',
3: 'High',
4: 'Very High'
}
# load PLACES
keyword_io = KeywordIO()
try:
cities_impact = read_qgis_layer(
self.working_dir_path('cities_impact.shp'),
'Cities')
hazard = keyword_io.read_keywords(
cities_impact, 'target_field')
hazard_field_index = cities_impact.fieldNameIndex(hazard)
name_field = keyword_io.read_keywords(
self.cities_layer, 'name_field')
name_field_index = cities_impact.fieldNameIndex(name_field)
try:
population_field = keyword_io.read_keywords(
self.cities_layer, 'population_field')
population_field_index = cities_impact.fieldNameIndex(
population_field)
except KeywordNotFoundError:
population_field = None
population_field_index = None
table_places = []
for f in cities_impact.getFeatures():
haz_class = f.attributes()[hazard_field_index]
city_name = f.attributes()[name_field_index]
if population_field_index >= 0:
city_pop = f.attributes()[population_field_index]
else:
city_pop = 1
# format:
# [
# 'hazard class',
# 'city's population',
# 'city's name',
# 'the type'
# ]
haz = hazard_mapping[haz_class]
item = {
'class': haz_class,
'hazard': haz,
'css': haz.lower().replace(' ', '-'),
'population': format_int(
population_rounding(city_pop / 1000)),
'name': city_name.title(),
'type': 'places'
}
table_places.append(item)
# sort table by hazard zone, then population
table_places = sorted(
table_places,
key=lambda x: (-x['class'], -x['population']))
except Exception as e:
LOGGER.exception(e)
table_places = []
# load AIRPORTS
try:
airport_impact = read_qgis_layer(
self.working_dir_path('airport_impact.shp'),
'Airport')
hazard = keyword_io.read_keywords(
airport_impact, 'target_field')
hazard_field_index = airport_impact.fieldNameIndex(hazard)
name_field = keyword_io.read_keywords(
self.airport_layer, 'name_field')
name_field_index = airport_impact.fieldNameIndex(name_field)
# airport doesnt have population, so enter 0 for population
table_airports = []
for f in airport_impact.getFeatures():
haz_class = f.attributes()[hazard_field_index]
airport_name = f.attributes()[name_field_index]
haz = hazard_mapping[haz_class]
item = {
'class': haz_class,
'hazard': haz,
'css': haz.lower().replace(' ', '-'),
'population': 0,
'name': airport_name.title(),
'type': 'airport'
}
table_airports.append(item)
# Sort by hazard class
table_airports = sorted(
table_airports,
key=lambda x: -x['class'])
except Exception as e:
LOGGER.exception(e)
table_airports = []
# decide which to show
# maximum 2 airport
max_airports = 2
airport_count = min(max_airports, len(table_airports))
# maximum total 7 entries to show
max_rows = 6
places_count = min(len(table_places), max_rows - airport_count)
# get top airport
table_airports = table_airports[:airport_count]
# get top places
table_places = table_places[:places_count]
item_list = table_places + table_airports
# sort entry by hazard level
item_list = sorted(
item_list,
key=lambda x: (-x['class'], -x['population']))
nearby_template = self.ash_fixtures_dir(
'nearby-table.template.html')
with open(nearby_template) as f:
template = Template(f.read())
# generate table here
html_string = template.render(item_list=item_list)
with open(self.nearby_html_path, 'w') as f:
f.write(html_string)
# copy airport logo
shutil.copy(
self.ash_fixtures_dir('logo/airport.jpg'),
self.working_dir_path('airport.jpg'))
def copy_layer(self, layer, target_base_name):
"""Copy layer to working directory with specified base_name
:param layer: Safe layer
:return:
"""
base_name, _ = os.path.splitext(layer.filename)
dir_name = os.path.dirname(layer.filename)
for (root, dirs, files) in os.walk(dir_name):
for f in files:
source_filename = os.path.join(root, f)
if source_filename.find(base_name) >= 0:
extensions = source_filename.replace(base_name, '')
new_path = self.working_dir_path(
target_base_name + extensions)
shutil.copy(source_filename, new_path)
@classmethod
def set_impact_style(cls, impact):
# Determine styling for QGIS layer
qgis_impact_layer = impact.as_qgis_native()
style = impact.get_style_info()
style_type = impact.get_style_type()
if impact.is_vector:
LOGGER.debug('myEngineImpactLayer.is_vector')
if not style:
# Set default style if possible
pass
elif style_type == 'categorizedSymbol':
LOGGER.debug('use categorized')
set_vector_categorized_style(qgis_impact_layer, style)
elif style_type == 'graduatedSymbol':
LOGGER.debug('use graduated')
set_vector_graduated_style(qgis_impact_layer, style)
elif impact.is_raster:
LOGGER.debug('myEngineImpactLayer.is_raster')
if not style:
qgis_impact_layer.setDrawingStyle("SingleBandPseudoColor")
else:
setRasterStyle(qgis_impact_layer, style)
def calculate_specified_impact(
self, function_id, hazard_layer,
exposure_layer, output_basename):
LOGGER.info('Calculate %s' % function_id)
if_manager = ImpactFunctionManager()
impact_function = if_manager.get_instance(function_id)
impact_function.hazard = hazard_layer
extent = impact_function.hazard.extent()
hazard_extent = [
extent.xMinimum(), extent.yMinimum(),
extent.xMaximum(), extent.yMaximum()]
# clip exposure if required (if it is too large)
if isinstance(exposure_layer, QgsRasterLayer):
cell_size, _ = get_wgs84_resolution(exposure_layer)
else:
cell_size = None
clipped_exposure = clip_layer(
layer=exposure_layer,
extent=hazard_extent,
cell_size=cell_size)
exposure_layer = clipped_exposure
impact_function.exposure = exposure_layer
impact_function.requested_extent = hazard_extent
impact_function.requested_extent_crs = impact_function.hazard.crs()
impact_function.force_memory = True
try:
impact_function.run_analysis()
impact_layer = impact_function.impact
if impact_layer:
self.set_impact_style(impact_layer)
# copy results of impact to report_path directory
self.copy_layer(impact_layer, output_basename)
except ZeroImpactException as e:
# in case zero impact, just return
LOGGER.info('No impact detected')
LOGGER.info(e.message)
return False
except Exception as e:
LOGGER.info('Calculation error')
LOGGER.exception(e)
return False
LOGGER.info('Calculation completed.')
return True
def calculate_impact(self):
# calculate population impact
LOGGER.info('Calculating Impact Function')
population_impact_success = self.calculate_specified_impact(
'AshRasterPopulationFunction',
self.hazard_layer,
self.population_layer,
'population_impact')
# calculate landcover impact
landcover_impact_success = self.calculate_specified_impact(
'AshRasterLandCoverFunction',
self.hazard_layer,
self.landcover_layer,
'landcover_impact')
# calculate cities impact
cities_impact_success = self.calculate_specified_impact(
'AshRasterPlacesFunction',
self.hazard_layer,
self.cities_layer,
'cities_impact')
# calculate airport impact
airport_impact_success = self.calculate_specified_impact(
'AshRasterPlacesFunction',
self.hazard_layer,
self.airport_layer,
'airport_impact')
self.impact_exists = True
def generate_report(self):
# Generate pdf report from impact/hazard
LOGGER.info('Generating report')
if not self.impact_exists:
# Cannot generate report when no impact layer present
LOGGER.info('Cannot Generate report when no impact present.')
return
project_instance = QgsProject.instance()
project_instance.setFileName(self.project_path)
project_instance.read()
# get layer registry
layer_registry = QgsMapLayerRegistry.instance()
layer_registry.removeAllMapLayers()
# Set up the map renderer that will be assigned to the composition
map_renderer = CANVAS.mapRenderer()
# Enable on the fly CRS transformations
map_renderer.setProjectionsEnabled(True)
default_crs = map_renderer.destinationCrs()
crs = QgsCoordinateReferenceSystem('EPSG:4326')
map_renderer.setDestinationCrs(crs)
# add place name layer
layer_registry.addMapLayer(self.cities_layer, False)
# add airport layer
layer_registry.addMapLayer(self.airport_layer, False)
# add volcano layer
layer_registry.addMapLayer(self.volcano_layer, False)
# add impact layer
hazard_layer = read_qgis_layer(
self.hazard_path, self.tr('People Affected'))
layer_registry.addMapLayer(hazard_layer, False)
# add basemap layer
layer_registry.addMapLayer(self.highlight_base_layer, False)
# add basemap layer
layer_registry.addMapLayer(self.overview_layer, False)
CANVAS.setExtent(hazard_layer.extent())
CANVAS.refresh()
template_path = self.ash_fixtures_dir('realtime-ash.qpt')
with open(template_path) as f:
template_content = f.read()
document = QDomDocument()
document.setContent(template_content)
# Now set up the composition
# map_settings = QgsMapSettings()
# composition = QgsComposition(map_settings)
composition = QgsComposition(map_renderer)
subtitution_map = self.event_dict()
LOGGER.debug(subtitution_map)
# load composition object from template
result = composition.loadFromTemplate(document, subtitution_map)
if not result:
LOGGER.exception(
'Error loading template %s with keywords\n %s',
template_path, subtitution_map)
raise MapComposerError
# get main map canvas on the composition and set extent
map_impact = composition.getComposerItemById('map-impact')
if map_impact:
map_impact.zoomToExtent(hazard_layer.extent())
map_impact.renderModeUpdateCachedImage()
else:
LOGGER.exception('Map canvas could not be found in template %s',
template_path)
raise MapComposerError
# get overview map canvas on the composition and set extent
map_overall = composition.getComposerItemById('map-overall')
if map_overall:
map_overall.setLayerSet([self.overview_layer.id()])
# this is indonesia extent
indonesia_extent = QgsRectangle(
94.0927980005593554,
-15.6629591962689343,
142.0261493318861312,
10.7379406374101816)
map_overall.zoomToExtent(indonesia_extent)
map_overall.renderModeUpdateCachedImage()
else:
LOGGER.exception(
'Map canvas could not be found in template %s',
template_path)
raise MapComposerError
# setup impact table
self.render_population_table()
self.render_nearby_table()
self.render_landcover_table()
impact_table = composition.getComposerItemById(
'table-impact')
if impact_table is None:
message = 'table-impact composer item could not be found'
LOGGER.exception(message)
raise MapComposerError(message)
impacts_html = composition.getComposerHtmlByItem(
impact_table)
if impacts_html is None:
message = 'Impacts QgsComposerHtml could not be found'
LOGGER.exception(message)
raise MapComposerError(message)
impacts_html.setUrl(QUrl(self.population_html_path))
# setup nearby table
nearby_table = composition.getComposerItemById(
'table-nearby')
if nearby_table is None:
message = 'table-nearby composer item could not be found'
LOGGER.exception(message)
raise MapComposerError(message)
nearby_html = composition.getComposerHtmlByItem(
nearby_table)
if nearby_html is None:
message = 'Nearby QgsComposerHtml could not be found'
LOGGER.exception(message)
raise MapComposerError(message)
nearby_html.setUrl(QUrl(self.nearby_html_path))
# setup landcover table
landcover_table = composition.getComposerItemById(
'table-landcover')
if landcover_table is None:
message = 'table-landcover composer item could not be found'
LOGGER.exception(message)
raise MapComposerError(message)
landcover_html = composition.getComposerHtmlByItem(
landcover_table)
if landcover_html is None:
message = 'Landcover QgsComposerHtml could not be found'
LOGGER.exception(message)
raise MapComposerError(message)
landcover_html.setUrl(QUrl(self.landcover_html_path))
# setup logos
logos_id = ['logo-bnpb', 'logo-geologi']
for logo_id in logos_id:
logo_picture = composition.getComposerItemById(logo_id)
if logo_picture is None:
message = '%s composer item could not be found' % logo_id
LOGGER.exception(message)
raise MapComposerError(message)
pic_path = os.path.basename(logo_picture.picturePath())
pic_path = os.path.join('logo', pic_path)
logo_picture.setPicturePath(self.ash_fixtures_dir(pic_path))
# save a pdf
composition.exportAsPDF(self.map_report_path)
project_instance.write(QFileInfo(self.project_path))
layer_registry.removeAllMapLayers()
map_renderer.setDestinationCrs(default_crs)
map_renderer.setProjectionsEnabled(False)
LOGGER.info('Report generation completed.')
|
Samweli/inasafe
|
realtime/ash/ash_event.py
|
Python
|
gpl-3.0
| 30,298
|
[
"VisIt"
] |
64a9ba178e5dfe2c312b840d536ce0fc83e3ff1aa38be2770057ec7254ef6e79
|
#import pdb # pause code for debugging at pdb.set_trace()
import numpy as np
import toolbox as tool
import slab_functions as sf
from pysac.plot.mayavi_seed_streamlines import SeedStreamline
import matplotlib.pyplot as plt
from mayavi import mlab
import gc
#import move_seed_points as msp
import mayavi_plotting_functions as mpf
import dispersion_diagram
import img2vid as i2v
from functools import partial
import os
# ================================
# Preamble: set mode options and view parameters
# ================================
# What mode do you want? OPTIONS:
mode_options = ['slow-kink-surf', 'slow-saus-surf', 'slow-saus-body-3',
'slow-kink-body-3', 'slow-saus-body-2', 'slow-kink-body-2',
'slow-saus-body-1', 'slow-kink-body-1', 'fast-saus-body-1',
'fast-kink-body-1', 'fast-saus-body-2', 'fast-kink-body-2',
'fast-saus-body-3', 'fast-kink-body-3', 'fast-kink-surf',
'fast-saus-surf', 'shear-alfven', 'shear-alfven-broadband']
# Which angle shall we view from? OPTIONS:
view_options = ['front', 'front-parallel', 'top', 'top-parallel', 'front-top',
'front-side', 'front-top-side']
# Uniform lighting?
#uniform_light = True
uniform_light = False
show_density = False
show_density_pert = False
show_mag = False
show_mag_scale = False
show_mag_fade = False
show_mag_vec = False
show_vel_front = False
show_vel_front_pert = False
show_vel_top = False
show_vel_top_pert = False
show_disp_top = False
show_disp_front = False
show_axes = False
show_axis_labels = False
show_mini_axis = False
show_boundary = False
# Uncomment the parametrer you would like to see
# No density perturbations or vel/disp pert for alfven modes.
#show_density = True
#show_density_pert = True
show_mag = True
#show_mag_scale = True #must also have show_mag = True
#show_mag_fade = True
#show_mag_vec = True
#show_vel_front = True
#show_vel_front_pert = True
#show_vel_top = True
#show_vel_top_pert = True
#show_disp_top = True
#show_disp_front = True
show_axes = True
#show_axis_labels = True
show_mini_axis = True
show_boundary = True
# Visualisation modules in string form for file-names
vis_modules = [show_density, show_density_pert, show_mag, show_mag_scale,
show_mag_fade, show_mag_vec, show_vel_front, show_vel_front_pert,
show_vel_top, show_vel_top_pert, show_disp_top, show_disp_front]
vis_modules_strings = ['show_density', 'show_density_pert', 'show_mag', 'show_mag_scale',
'show_mag_fade', 'show_mag_vec', 'show_vel_front', 'show_vel_front_pert',
'show_vel_top', 'show_vel_top_pert', 'show_disp_top', 'show_disp_front']
vis_mod_string = ''
for i, j in enumerate(vis_modules):
if vis_modules[i]:
vis_mod_string = vis_mod_string + vis_modules_strings[i][5:] + '_'
# Set to True if you would like the dispersion diagram with chosen mode highlighted.
show_dispersion = False
#show_dispersion = True
# Wanna see the animation? Of course you do
#show_animation = False
show_animation = True
# Basic plot to see which eigensolutions have been found.
show_quick_plot = False
#show_quick_plot = True
# Video resolution
#res = (1920,1080) # There is a problem with this resolution- height must be odd number - Mayavi bug apparently
res = tuple(101 * np.array((16,9)))
#res = tuple(51 * np.array((16,9)))
#res = tuple(21 * np.array((16,9)))
number_of_frames = 1
# Frames per second of output video
fps = 20
#save_images = False
save_images = True
make_video = False
#make_video = True
# Where should I save the animation images/videos?
os.path.abspath(os.curdir)
os.chdir('..')
save_directory = os.path.join(os.path.abspath(os.curdir), '3D_vis_animations')
# Where should I save the dispersion diagrams?
save_dispersion_diagram_directory = os.path.join(os.path.abspath(os.curdir), '3D_vis_dispersion_diagrams')
# ================================
# Visualisation set-up
# ================================
# Variable definitions (for reference):
# x = k*x
# y = k*y
# z = k*z
# W = omega/k
# K = k*x_0
# t = omega*t
# Loop through selected modes
for mode_ind in [0]:#range(8,14): # for all others. REMEMBER SBB pparameters
#for mode_ind in [14,15]: #for fast body surf. REMEMBER SBS parameters
#for mode_ind in [16, 17]:
#for mode_ind in [13]: #for an individual mode
#for mode_ind in range(2,14):
if mode_ind not in range(len(mode_options)):
raise NameError('Mode not in mode_options')
# (note that fast surface modes, i.e. 14 and 15, can only be
# found with SBS parameters in slab_functions...)
mode = mode_options[mode_ind]
# Specify oscillation parameters
if 'slow' in mode and 'surf' in mode or 'alfven' in mode:
K = 2.
elif 'slow' in mode and 'body' in mode:
K = 8.
elif 'fast' in mode and 'body-1' in mode:
K = 8.
elif 'fast' in mode and 'body-2' in mode:
K = 15.
elif 'fast' in mode and 'body-3' in mode:
K = 22.
elif 'fast' in mode and 'surf' in mode:
K = 8.
else:
raise NameError('Mode not found')
# Specify density ratio R1 := rho_1 / rho_0
# R1 = 1.5 # Higher denisty on left than right
# R1 = 1.8
# R1 = 1.9 # Disp_diagram will only work for R1=1.5, 1.8, 2.0
R1 = 2. # Symmetric slab
# Reduce number of variables in dispersion relation
disp_rel_partial = partial(sf.disp_rel_asym, R1=R1)
# find eigenfrequencies W (= omega/k) within the range Wrange for the given parameters.
Wrange1 = np.linspace(0., sf.cT, 11)
Wrange2 = np.linspace(sf.cT, sf.c0, 401)
Wrange3 = np.linspace(sf.c0, sf.c2, 11)
Woptions_slow_surf = np.real(tool.point_find(disp_rel_partial, np.array(K), Wrange1, args=None).transpose())
Woptions_slow_body = np.real(tool.point_find(disp_rel_partial, np.array(K), Wrange2, args=None).transpose())
Woptions_fast = np.real(tool.point_find(disp_rel_partial, np.array(K), Wrange3, args=None).transpose())
# Remove W values that are very close to characteristic speeds - these are spurious solutions
tol = 1e-2
indices_to_rm = []
for i, w in enumerate(Woptions_slow_surf):
spurious_roots_diff = abs(np.array([w, w - sf.c0, w - sf.c1(R1), w - sf.c2, w - sf.vA]))
if min(spurious_roots_diff) < tol or w < 0 or w > sf.cT:
indices_to_rm.append(i)
Woptions_slow_surf = np.delete(Woptions_slow_surf, indices_to_rm)
Woptions_slow_surf.sort()
indices_to_rm = []
for i, w in enumerate(Woptions_slow_body):
spurious_roots_diff = abs(np.array([w, w - sf.c0, w - sf.c1(R1), w - sf.c2, w - sf.vA]))
if min(spurious_roots_diff) < tol or w < sf.cT or w > sf.c0:
indices_to_rm.append(i)
Woptions_slow_body = np.delete(Woptions_slow_body, indices_to_rm)
Woptions_slow_body.sort()
indices_to_rm = []
for i, w in enumerate(Woptions_fast):
spurious_roots_diff = abs(np.array([w, w - sf.c0, w - sf.c1(R1), w - sf.c2, w - sf.vA]))
if min(spurious_roots_diff) < tol or w < sf.c0 or w > min(sf.c1, sf.c2):
indices_to_rm.append(i)
Woptions_fast = np.delete(Woptions_fast, indices_to_rm)
Woptions_fast.sort()
# remove any higher order slow body modes - we only want to do the first 3 saus/kink
if len(Woptions_slow_body) > 6:
Woptions_slow_body = np.delete(Woptions_slow_body, range(len(Woptions_slow_body) - 6))
Woptions = np.concatenate((Woptions_slow_surf, Woptions_slow_body, Woptions_fast))
# set W to be the eigenfrequency for the requested mode
if 'fast-saus-body' in mode or 'fast-kink-surf' in mode:
W = Woptions_fast[-2]
elif 'fast-kink-body' in mode or 'fast-saus-surf' in mode:
W = Woptions_fast[-1]
elif 'slow' in mode and 'surf' in mode:
W = Woptions_slow_surf[mode_ind]
elif 'slow' in mode and 'body' in mode:
W = Woptions_slow_body[mode_ind-2]
if 'alfven' in mode:
W = sf.vA
else:
W = np.real(W)
# Quick plot to see if we are hitting correct mode
if show_quick_plot:
plt.plot([K] * len(Woptions), Woptions, '.')
plt.plot(K+0.5, W, 'go')
plt.xlim([0,23])
plt.show()
# ================================
# Dispersion diagram
# ================================
if show_dispersion:
if 'alfven' in mode:
raise NameError('Disperion plot requested for an alfven mode. Cant do that.')
dispersion_diagram.dispersion_diagram(mode_options, mode,
disp_rel_partial, K, W, R1)
# plt.tight_layout() # seems to make it chop the sides off with this
plt.savefig(os.path.join(save_dispersion_diagram_directory, 'R1_' + str(R1) + '_' + mode + '.png') )
plt.close()
# ================================
# Animation
# ================================
if show_animation:
print('Starting ' + mode)
# set grid parameters
xmin = -2.*K
xmax = 2.*K
ymin = 0.
ymax = 4.
zmin = 0.
zmax = 2*np.pi
# You can change ny but be careful changing nx, nz.
nx = 300#100 #100 #300 gives us reduced bouncing of field lines for the same video size, but there is significant computational cost.
ny = 300#100 #100 #100#20 #100
nz = 300#100 #100
nt = number_of_frames
if nz % nt != 0:
print("nt doesnt divide nz so there may be a problem with chopping in z direction for each time step")
t_start = 0.
t_end = zmax
t = t_start
xvals = np.linspace(xmin, xmax, nx)
yvals = np.linspace(ymin, ymax, ny)
zvals = np.linspace(zmin, zmax, nz, endpoint=False) # A fudge to give the height as exactly one wavelength
x_spacing = max(nx, ny, nz) / nx
y_spacing = max(nx, ny, nz) / ny
z_spacing = max(nx, ny, nz) / nz
# For masking points for plotting vector fields- have to do it manually due to Mayavi bug
mod = int(4 * nx / 100)
mod_y = int(np.ceil(mod / y_spacing))
# Get the data xi=displacement, v=velocity, b=mag field
if show_disp_top or show_disp_front:
xixvals = np.real(np.repeat(sf.xix(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
xizvals = np.real(np.repeat(sf.xiz(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
xiyvals = np.real(np.repeat(sf.xiy(mode, xvals, zvals, t, W, K)[:, :, np.newaxis], ny, axis=2))
if show_vel_front or show_vel_top:
vxvals = np.real(np.repeat(sf.vx(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
vzvals = np.real(np.repeat(sf.vz(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
vyvals = np.real(np.repeat(sf.vy(mode, xvals, zvals, t, K)[:, :, np.newaxis], ny, axis=2))
if show_vel_front_pert or show_vel_top_pert:
vxvals = np.real(np.repeat(sf.vx_pert(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
vzvals = np.real(np.repeat(sf.vz_pert(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
vyvals = np.zeros_like(vxvals)
# Axis is defined on the mag field so we have to set up this data
bxvals = np.real(np.repeat(sf.bx(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
byvals = np.real(np.repeat(sf.by(mode, xvals, zvals, t, K)[:, :, np.newaxis], ny, axis=2))
bz_eq3d = np.repeat(sf.bz_eq(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2)
bzvals = np.real(np.repeat(-sf.bz(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2) +
bz_eq3d)
# displacement at the right and left boundaries
if show_boundary:
xix_boundary_r_vals = np.real(np.repeat(K + sf.xix_boundary(mode, zvals, t, W, K, R1, boundary='r')[:, np.newaxis], ny, axis=1))
xix_boundary_l_vals = np.real(np.repeat(-K + sf.xix_boundary(mode, zvals, t, W, K, R1, boundary='l')[:, np.newaxis], ny, axis=1))
if show_density:
rho_vals = np.real(np.repeat(sf.rho(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
if show_density_pert:
rho_vals = np.real(np.repeat(sf.rho_pert(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
bxvals_t = bxvals
byvals_t = byvals
bzvals_t = bzvals
if show_disp_top or show_disp_front:
xixvals_t = xixvals
xiyvals_t = xiyvals
xizvals_t = xizvals
if show_vel_top or show_vel_top_pert or show_vel_front or show_vel_front_pert:
vxvals_t = vxvals
vyvals_t = vyvals
vzvals_t = vzvals
if show_boundary:
xix_boundary_r_vals_t = xix_boundary_r_vals
xix_boundary_l_vals_t = xix_boundary_l_vals
if show_density or show_density_pert:
rho_vals_t = rho_vals
# ================================
# Starting figure and visualisation modules
# ================================
zgrid_zy, ygrid_zy = np.mgrid[0:nz:(nz)*1j,
0:ny:(ny)*1j]
fig = mlab.figure(size=res) # (1920, 1080) for 1080p , tuple(101 * np.array((16,9))) #16:9 aspect ratio for video upload
# Spacing of grid so that we can display a visualisation cube without having the same number of grid points in each dimension
spacing = np.array([x_spacing, z_spacing, y_spacing])
if show_density or show_density_pert:
# Scalar field density
rho = mlab.pipeline.scalar_field(rho_vals_t, name="density", figure=fig)
rho.spacing = spacing
mpf.volume_red_blue(rho, rho_vals_t)
#Masking points
if show_mag_vec:
bxvals_mask_front_t, byvals_mask_front_t, bzvals_mask_front_t = mpf.mask_points(bxvals_t, byvals_t, bzvals_t,
'front', mod, mod_y)
if show_disp_top:
xixvals_mask_top_t, xiyvals_mask_top_t, xizvals_mask_top_t = mpf.mask_points(xixvals_t, xiyvals_t, xizvals_t,
'top', mod, mod_y)
if show_disp_front:
xixvals_mask_front_t, xiyvals_mask_front_t, xizvals_mask_front_t = mpf.mask_points(xixvals_t, xiyvals_t, xizvals_t,
'front', mod, mod_y)
if show_vel_top or show_vel_top_pert:
vxvals_mask_top_t, vyvals_mask_top_t, vzvals_mask_top_t = mpf.mask_points(vxvals_t, vyvals_t, vzvals_t,
'top', mod, mod_y)
if show_vel_front or show_vel_front_pert:
vxvals_mask_front_t, vyvals_mask_front_t, vzvals_mask_front_t = mpf.mask_points(vxvals_t, vyvals_t, vzvals_t,
'front', mod, mod_y)
xgrid, zgrid, ygrid = np.mgrid[0:nx:(nx)*1j,
0:nz:(nz)*1j,
0:ny:(ny)*1j]
field = mlab.pipeline.vector_field(bxvals_t, bzvals_t, byvals_t, name="B field",
figure=fig, scalars=zgrid)
field.spacing = spacing
if show_axes:
mpf.axes_no_label(field)
if show_mini_axis:
mpf.mini_axes()
if uniform_light:
#uniform lighting, but if we turn shading of volumes off, we are ok without
mpf.uniform_lighting(fig)
#Black background
mpf.background_colour(fig, (0., 0., 0.))
scalefactor = 8. * nx / 100. # scale factor for direction field vectors
# Set up visualisation modules
if show_mag_vec:
bdirfield_front = mlab.pipeline.vector_field(bxvals_mask_front_t, bzvals_mask_front_t,
byvals_mask_front_t, name="B field front",
figure=fig)
bdirfield_front.spacing = spacing
mpf.vector_cut_plane(bdirfield_front, 'front', nx, ny, nz,
y_spacing, scale_factor=scalefactor)
if show_vel_top or show_vel_top_pert:
vdirfield_top = mlab.pipeline.vector_field(vxvals_mask_top_t, np.zeros_like(vxvals_mask_top_t),
vyvals_mask_top_t, name="V field top",
figure=fig)
vdirfield_top.spacing = spacing
mpf.vector_cut_plane(vdirfield_top, 'top', nx, ny, nz,
y_spacing, scale_factor=scalefactor)
if show_vel_front or show_vel_front_pert:
vdirfield_front = mlab.pipeline.vector_field(vxvals_mask_front_t, vzvals_mask_front_t,
vyvals_mask_front_t, name="V field front",
figure=fig)
vdirfield_front.spacing = spacing
mpf.vector_cut_plane(vdirfield_front,'front', nx, ny, nz,
y_spacing, scale_factor=scalefactor)
if show_disp_top:
xidirfield_top = mlab.pipeline.vector_field(xixvals_mask_top_t, np.zeros_like(xixvals_mask_top_t),
xiyvals_mask_top_t, name="Xi field top",
figure=fig)
xidirfield_top.spacing = spacing
mpf.vector_cut_plane(xidirfield_top, 'top', nx, ny, nz,
y_spacing, scale_factor=scalefactor)
if show_disp_front:
xidirfield_front = mlab.pipeline.vector_field(xixvals_mask_front_t, xizvals_mask_front_t,
xiyvals_mask_front_t, name="Xi field front",
figure=fig)
xidirfield_front.spacing = spacing
mpf.vector_cut_plane(xidirfield_front, 'front', nx, ny, nz,
y_spacing, scale_factor=scalefactor)
# Loop through time
for t_ind in range(nt):
if t_ind == 0:
bxvals_t = bxvals
byvals_t = byvals
bzvals_t = bzvals
if show_disp_top or show_disp_front:
xixvals_t = xixvals
xiyvals_t = xiyvals
xizvals_t = xizvals
if show_vel_top or show_vel_top_pert or show_vel_front or show_vel_front_pert:
vxvals_t = vxvals
vyvals_t = vyvals
vzvals_t = vzvals
if show_boundary:
xix_boundary_r_vals_t = xix_boundary_r_vals
xix_boundary_l_vals_t = xix_boundary_l_vals
if show_density or show_density_pert:
rho_vals_t = rho_vals
else:
bxvals = np.real(np.repeat(sf.bx(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
byvals = np.real(np.repeat(sf.by(mode, xvals, zvals, t, K)[:, :, np.newaxis], ny, axis=2))
bz_eq3d = np.repeat(sf.bz_eq(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2)
bzvals = np.real(np.repeat(-sf.bz(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2) +
bz_eq3d)
bxvals_t = bxvals
byvals_t = byvals
bzvals_t = bzvals
# Update mag field data
field.mlab_source.set(u=bxvals_t, v=bzvals_t, w=byvals_t)
# Update mag field visualisation module
if show_mag_vec:
bxvals_mask_front_t, byvals_mask_front_t, bzvals_mask_front_t = mpf.mask_points(bxvals_t, byvals_t, bzvals_t,
'front', mod, mod_y)
bdirfield_front.mlab_source.set(u=bxvals_mask_front_t, v=bzvals_mask_front_t, w=byvals_mask_front_t)
# Update displacement field data
if show_disp_top or show_disp_front:
xixvals_split = np.split(xixvals, [nz - (nz / nt) * t_ind], axis=1)
xiyvals_split = np.split(xiyvals, [nz - (nz / nt) * t_ind], axis=1)
xizvals_split = np.split(xizvals, [nz - (nz / nt) * t_ind], axis=1)
xixvals_t = np.concatenate((xixvals_split[1], xixvals_split[0]), axis=1)
xiyvals_t = np.concatenate((xiyvals_split[1], xiyvals_split[0]), axis=1)
xizvals_t = np.concatenate((xizvals_split[1], xizvals_split[0]), axis=1)
# Update displacement field visualisation module
if show_disp_top:
xixvals_mask_top_t, xiyvals_mask_top_t, xizvals_mask_top_t = mpf.mask_points(xixvals_t, xiyvals_t, xizvals_t,
'top', mod, mod_y)
xidirfield_top.mlab_source.set(u=xixvals_mask_top_t, v=np.zeros_like(xixvals_mask_top_t), w=xiyvals_mask_top_t)
if show_disp_front:
xixvals_mask_front_t, xiyvals_mask_front_t, xizvals_mask_front_t = mpf.mask_points(xixvals_t, xiyvals_t, xizvals_t,
'front', mod, mod_y)
xidirfield_front.mlab_source.set(u=xixvals_mask_front_t, v=xizvals_mask_front_t, w=xiyvals_mask_front_t)
# Update velocity field data
if show_vel_top or show_vel_top_pert or show_vel_front or show_vel_front_pert:
vxvals_split = np.split(vxvals, [nz - (nz / nt) * t_ind], axis=1)
vyvals_split = np.split(vyvals, [nz - (nz / nt) * t_ind], axis=1)
vzvals_split = np.split(vzvals, [nz - (nz / nt) * t_ind], axis=1)
vxvals_t = np.concatenate((vxvals_split[1], vxvals_split[0]), axis=1)
vyvals_t = np.concatenate((vyvals_split[1], vyvals_split[0]), axis=1)
vzvals_t = np.concatenate((vzvals_split[1], vzvals_split[0]), axis=1)
# Update velocity field visualisation module
if show_vel_top or show_vel_top_pert:
vxvals_mask_top_t, vyvals_mask_top_t, vzvals_mask_top_t = mpf.mask_points(vxvals_t, vyvals_t, vzvals_t,
'top', mod, mod_y)
vdirfield_top.mlab_source.set(u=vxvals_mask_top_t, v=np.zeros_like(vxvals_mask_top_t), w=vyvals_mask_top_t)
if show_vel_front or show_vel_front_pert:
vxvals_mask_front_t, vyvals_mask_front_t, vzvals_mask_front_t = mpf.mask_points(vxvals_t, vyvals_t, vzvals_t,
'front', mod, mod_y)
vdirfield_front.mlab_source.set(u=vxvals_mask_front_t, v=vzvals_mask_front_t, w=vyvals_mask_front_t)
# Update boundary displacement data
if show_boundary:
xix_boundary_r_vals_split = np.split(xix_boundary_r_vals, [nz - (nz / nt) * t_ind], axis=0)
xix_boundary_l_vals_split = np.split(xix_boundary_l_vals, [nz - (nz / nt) * t_ind], axis=0)
xix_boundary_r_vals_t = np.concatenate((xix_boundary_r_vals_split[1], xix_boundary_r_vals_split[0]), axis=0)
xix_boundary_l_vals_t = np.concatenate((xix_boundary_l_vals_split[1], xix_boundary_l_vals_split[0]), axis=0)
# Update density data
if show_density or show_density_pert:
rho_vals_split = np.split(rho_vals, [nz - (nz / nt) * t_ind], axis=1)
rho_vals_t = np.concatenate((rho_vals_split[1], rho_vals_split[0]), axis=1)
rho.mlab_source.set(scalars=rho_vals_t)
# Boundary data - Letting mayavi know where to plot the boundary
if show_boundary:
ext_min_r = ((nx) * (xix_boundary_r_vals_t.min() - xmin) / (xmax - xmin)) * x_spacing
ext_max_r = ((nx) * (xix_boundary_r_vals_t.max() - xmin) / (xmax - xmin)) * x_spacing
ext_min_l = ((nx) * (xix_boundary_l_vals_t.min() - xmin) / (xmax - xmin)) * x_spacing
ext_max_l = ((nx) * (xix_boundary_l_vals_t.max() - xmin) / (xmax - xmin)) * x_spacing
#Make field lines
if show_mag:
# move seed points up with phase speed. - Bit of a fudge.
# Create an array of points for which we want mag field seeds
nx_seed = 9
ny_seed = 13
start_x = 30. * nx / 100.
end_x = nx+1 - start_x
start_y = 1.
if ny == 20: # so that the lines dont go right up to the edge of the box
end_y = ny - 1.
elif ny == 100:
end_y = ny - 2.
elif ny == 300:
end_y = ny - 6.
else:
end_y = ny - 1
seeds=[]
dx_res = (end_x - start_x) / (nx_seed-1)
dy_res = (end_y - start_y) / (ny_seed-1)
for j in range(ny_seed):
for i in range(nx_seed):
x = start_x + (i * dx_res) * x_spacing
y = start_y + (j * dy_res) * y_spacing
z = 1. + (t_start + t_ind*(t_end - t_start)/nt)/zmax * nz
seeds.append((x,z,y))
if 'alfven' in mode:
for i in range(nx_seed):
del seeds[0]
del seeds[-1]
# Remove previous field lines - field lines cannot be updated, just the data that they are built from
if t_ind != 0:
field_lines.remove() # field_lines is defined in first go through loop
field_lines = SeedStreamline(seed_points=seeds)
# Field line visualisation tinkering
field_lines.stream_tracer.integration_direction='both'
field_lines.streamline_type = 'tube'
field_lines.stream_tracer.maximum_propagation = nz * 2
field_lines.tube_filter.number_of_sides = 20
field_lines.tube_filter.radius = 0.7 * max(nx, ny, nz) / 100.
field_lines.tube_filter.capping = True
field_lines.actor.property.opacity = 1.0
field.add_child(field_lines)
module_manager = field_lines.parent
# Colormap of magnetic field strength plotted on the field lines
if show_mag_scale:
module_manager.scalar_lut_manager.lut_mode = 'coolwarm'
module_manager.scalar_lut_manager.data_range=[7,18]
else:
mag_lut = module_manager.scalar_lut_manager.lut.table.to_array()
mag_lut[:,0] = [220]*256
mag_lut[:,1] = [20]*256
mag_lut[:,2] = [20]*256
module_manager.scalar_lut_manager.lut.table = mag_lut
if show_mag_fade:
mpf.colormap_fade(module_manager, fade_value=20)
# Which views do you want to show? Options are defined at the start
views_selected = [0]#[0,1,4,5,6] #range(7) #[2,3]
for view_ind, view_selected in enumerate(views_selected):
view = view_options[view_selected]
# Display boundary - cannot be updated each time
if show_boundary:
# Boundaries should look different depending on view
if view == 'front-parallel':
#remove previous boundaries
if t != 0 or view_ind != 0:
boundary_r.remove()
boundary_l.remove()
# Make a fading colormap by changing opacity at ends
lut = np.reshape(np.array([150, 150, 150, 255]*256), (256,4))
fade_value = 125
lut[:fade_value,-1] = np.linspace(0, 255, fade_value)
lut[-fade_value:,-1] = np.linspace(255, 0, fade_value)
# Set up boundary visualisation
boundary_r = mlab.mesh(xix_boundary_r_vals_t, zgrid_zy, ygrid_zy,
extent=[ext_min_r, ext_max_r, 1, nz, 0, (ny-1) * y_spacing],
opacity=1., representation='wireframe',
line_width=12., scalars=zgrid_zy)
boundary_l = mlab.mesh(xix_boundary_l_vals_t, zgrid_zy, ygrid_zy,
extent=[ext_min_l, ext_max_l, 1, nz, 0, (ny-1) * y_spacing],
opacity=1., representation='wireframe',
line_width=12., scalars=zgrid_zy)
# Boundary color and other options
boundary_r.module_manager.scalar_lut_manager.lut.table = lut
boundary_l.module_manager.scalar_lut_manager.lut.table = lut
boundary_r.actor.property.lighting = False
boundary_r.actor.property.shading = False
boundary_l.actor.property.lighting = False
boundary_l.actor.property.shading = False
else:
#remove previous boundaries
if t != 0 or view_ind != 0:
boundary_r.remove()
boundary_l.remove()
# Make a fading colormap by changing opacity at ends
lut = np.reshape(np.array([150, 150, 150, 255]*256), (256,4))
fade_value = 20
lut[:fade_value,-1] = np.linspace(0, 255, fade_value)
lut[-fade_value:,-1] = np.linspace(255, 0, fade_value)
# Set up boundary visualisation
boundary_r = mlab.mesh(xix_boundary_r_vals_t, zgrid_zy, ygrid_zy,
extent=[ext_min_r, ext_max_r, 1, nz, 0, (ny-1) * y_spacing],
opacity=0.7, scalars=zgrid_zy)
boundary_l = mlab.mesh(xix_boundary_l_vals_t, zgrid_zy, ygrid_zy,
extent=[ext_min_l, ext_max_l, 1, nz, 0, (ny-1) * y_spacing],
opacity=0.7, scalars=zgrid_zy)
# Boundary color and other options
boundary_r.module_manager.scalar_lut_manager.lut.table = lut
boundary_l.module_manager.scalar_lut_manager.lut.table = lut
boundary_r.actor.property.lighting = False
boundary_r.actor.property.shading = False
boundary_l.actor.property.lighting = False
boundary_l.actor.property.shading = False
# Set viewing angle - For some unknown reason we must redefine the camera position each time.
# This is something to do with the boundaries being replaced each time.
mpf.view_position(fig, view, nx, ny, nz)
if save_images:
prefix = 'R1_'+str(R1) + '_' + mode + '_' + vis_mod_string + view + '_'# + '_norho_'
mlab.savefig(os.path.join(save_directory, prefix + str(t_ind+1) + '.png'))
if t_ind == nt - 1:
if make_video:
i2v.image2video(filepath=save_directory, prefix=prefix,
output_name=prefix+'video', out_extension='mp4',
fps=fps, n_loops=4, delete_images=True,
delete_old_videos=True, res=res[1])
# Log: to keep us updated with progress
if t_ind % 5 == 4:
print('Finished frame number ' + str(t_ind + 1) + ' out of ' + str(number_of_frames))
#Release some memory after each time step
gc.collect()
#step t forward
t = t + (t_end - t_start) / nt
# Close Mayavi window each time if we cant to make a video
if make_video:
mlab.close(fig)
print('Finished ' + mode)
|
SP2RC-Coding-Club/Codes
|
13_07_2017/3D_slab_modes.py
|
Python
|
mit
| 35,096
|
[
"Mayavi"
] |
b64507ac6de475e036aee59f2a628b3140248f3a159d6b0d641b0a8fe9c4ba16
|
from __future__ import division, print_function
import numpy as np
from bct.utils import BCTParamError, binarize
from bct.utils import pick_four_unique_nodes_quickly
from .clustering import number_of_components
def latmio_dir_connected(R, itr, D=None):
'''
This function "latticizes" a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions. The
function also ensures that the randomized network maintains
connectedness, the ability for every node to reach every other node in
the network. The input network for this function must be connected.
Parameters
----------
R : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
D : np.ndarray | None
distance-to-diagonal matrix. Defaults to the actual distance matrix
if not specified.
Returns
-------
Rlatt : NxN np.ndarray
latticized network in original node ordering
Rrp : NxN np.ndarray
latticized network in node ordering used for latticization
ind_rp : Nx1 np.ndarray
node ordering used for latticization
eff : int
number of actual rewirings carried out
'''
n = len(R)
ind_rp = np.random.permutation(n) # random permutation of nodes
R = R.copy()
R = R[np.ix_(ind_rp, ind_rp)]
# create distance to diagonal matrix if not specified by user
if D is None:
D = np.zeros((n, n))
un = np.mod(range(1, n), n)
um = np.mod(range(n - 1, 0, -1), n)
u = np.append((0,), np.where(un < um, un, um))
for v in range(int(np.ceil(n / 2))):
D[n - v - 1, :] = np.append(u[v + 1:], u[:v + 1])
D[v, :] = D[n - v - 1, :][::-1]
i, j = np.where(R)
k = len(i)
itr *= k
# maximal number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1)))
# actual number of successful rewirings
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts: # while not rewired
rewire = True
while True:
e1 = np.random.randint(k)
e2 = np.random.randint(k)
while e1 == e2:
e2 = np.random.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break
# rewiring condition
if not (R[a, d] or R[c, b]):
# lattice condition
if (D[a, b] * R[a, b] + D[c, d] * R[c, d] >= D[a, d] * R[a, b] + D[c, b] * R[c, d]):
# connectedness condition
if not (np.any((R[a, c], R[d, b], R[d, c])) and
np.any((R[c, a], R[b, d], R[b, a]))):
P = R[(a, c), :].copy()
P[0, b] = 0
P[0, d] = 1
P[1, d] = 0
P[1, b] = 1
PN = P.copy()
PN[0, a] = 1
PN[1, c] = 1
while True:
P[0, :] = np.any(R[P[0, :] != 0, :], axis=0)
P[1, :] = np.any(R[P[1, :] != 0, :], axis=0)
P *= np.logical_not(PN)
PN += P
if not np.all(np.any(P, axis=1)):
rewire = False
break
elif np.any(PN[0, (b, c)]) and np.any(PN[1, (d, a)]):
break
# end connectedness testing
if rewire: # reassign edges
R[a, d] = R[a, b]
R[a, b] = 0
R[c, b] = R[c, d]
R[c, d] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
Rlatt = R[np.ix_(ind_rp[::-1], ind_rp[::-1])] # reverse random permutation
return Rlatt, R, ind_rp, eff
def latmio_dir(R, itr, D=None):
'''
This function "latticizes" a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions.
Parameters
----------
R : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
D : np.ndarray | None
distance-to-diagonal matrix. Defaults to the actual distance matrix
if not specified.
Returns
-------
Rlatt : NxN np.ndarray
latticized network in original node ordering
Rrp : NxN np.ndarray
latticized network in node ordering used for latticization
ind_rp : Nx1 np.ndarray
node ordering used for latticization
eff : int
number of actual rewirings carried out
'''
n = len(R)
ind_rp = np.random.permutation(n) # randomly reorder matrix
R = R.copy()
R = R[np.ix_(ind_rp, ind_rp)]
# create distance to diagonal matrix if not specified by user
if D is None:
D = np.zeros((n, n))
un = np.mod(range(1, n), n)
um = np.mod(range(n - 1, 0, -1), n)
u = np.append((0,), np.where(un < um, un, um))
for v in range(int(np.ceil(n / 2))):
D[n - v - 1, :] = np.append(u[v + 1:], u[:v + 1])
D[v, :] = D[n - v - 1, :][::-1]
i, j = np.where(R)
k = len(i)
itr *= k
# maximal number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1)))
# actual number of successful rewirings
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts: # while not rewired
while True:
e1 = np.random.randint(k)
e2 = np.random.randint(k)
while e1 == e2:
e2 = np.random.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break
# rewiring condition
if not (R[a, d] or R[c, b]):
# lattice condition
if (D[a, b] * R[a, b] + D[c, d] * R[c, d] >= D[a, d] * R[a, b] + D[c, b] * R[c, d]):
R[a, d] = R[a, b]
R[a, b] = 0
R[c, b] = R[c, d]
R[c, d] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
Rlatt = R[np.ix_(ind_rp[::-1], ind_rp[::-1])] # reverse random permutation
return Rlatt, R, ind_rp, eff
def latmio_und_connected(R, itr, D=None):
'''
This function "latticizes" an undirected network, while preserving the
degree distribution. The function does not preserve the strength
distribution in weighted networks. The function also ensures that the
randomized network maintains connectedness, the ability for every node
to reach every other node in the network. The input network for this
function must be connected.
Parameters
----------
R : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
D : np.ndarray | None
distance-to-diagonal matrix. Defaults to the actual distance matrix
if not specified.
Returns
-------
Rlatt : NxN np.ndarray
latticized network in original node ordering
Rrp : NxN np.ndarray
latticized network in node ordering used for latticization
ind_rp : Nx1 np.ndarray
node ordering used for latticization
eff : int
number of actual rewirings carried out
'''
if not np.all(R == R.T):
raise BCTParamError("Input must be undirected")
if number_of_components(R) > 1:
raise BCTParamError("Input is not connected")
n = len(R)
ind_rp = np.random.permutation(n) # randomly reorder matrix
R = R.copy()
R = R[np.ix_(ind_rp, ind_rp)]
if D is None:
D = np.zeros((n, n))
un = np.mod(range(1, n), n)
um = np.mod(range(n - 1, 0, -1), n)
u = np.append((0,), np.where(un < um, un, um))
for v in range(int(np.ceil(n / 2))):
D[n - v - 1, :] = np.append(u[v + 1:], u[:v + 1])
D[v, :] = D[n - v - 1, :][::-1]
i, j = np.where(np.tril(R))
k = len(i)
itr *= k
# maximal number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1) / 2))
# actual number of successful rewirings
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts:
rewire = True
while True:
e1 = np.random.randint(k)
e2 = np.random.randint(k)
while e1 == e2:
e2 = np.random.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break
if np.random.random() > .5:
i.setflags(write=True)
j.setflags(write=True)
i[e2] = d
j[e2] = c # flip edge c-d with 50% probability
c = i[e2]
d = j[e2] # to explore all potential rewirings
# rewiring condition
if not (R[a, d] or R[c, b]):
# lattice condition
if (D[a, b] * R[a, b] + D[c, d] * R[c, d] >= D[a, d] * R[a, b] + D[c, b] * R[c, d]):
# connectedness condition
if not (R[a, c] or R[b, d]):
P = R[(a, d), :].copy()
P[0, b] = 0
P[1, c] = 0
PN = P.copy()
PN[:, d] = 1
PN[:, a] = 1
while True:
P[0, :] = np.any(R[P[0, :] != 0, :], axis=0)
P[1, :] = np.any(R[P[1, :] != 0, :], axis=0)
P *= np.logical_not(PN)
if not np.all(np.any(P, axis=1)):
rewire = False
break
elif np.any(P[:, (b, c)]):
break
PN += P
# end connectedness testing
if rewire: # reassign edges
R[a, d] = R[a, b]
R[a, b] = 0
R[d, a] = R[b, a]
R[b, a] = 0
R[c, b] = R[c, d]
R[c, d] = 0
R[b, c] = R[d, c]
R[d, c] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b
eff += 1
break
att += 1
Rlatt = R[np.ix_(ind_rp[::-1], ind_rp[::-1])]
return Rlatt, R, ind_rp, eff
def latmio_und(R, itr, D=None):
'''
This function "latticizes" an undirected network, while preserving the
degree distribution. The function does not preserve the strength
distribution in weighted networks.
Parameters
----------
R : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
D : np.ndarray | None
distance-to-diagonal matrix. Defaults to the actual distance matrix
if not specified.
Returns
-------
Rlatt : NxN np.ndarray
latticized network in original node ordering
Rrp : NxN np.ndarray
latticized network in node ordering used for latticization
ind_rp : Nx1 np.ndarray
node ordering used for latticization
eff : int
number of actual rewirings carried out
'''
n = len(R)
ind_rp = np.random.permutation(n) # randomly reorder matrix
R = R.copy()
R = R[np.ix_(ind_rp, ind_rp)]
if D is None:
D = np.zeros((n, n))
un = np.mod(range(1, n), n)
um = np.mod(range(n - 1, 0, -1), n)
u = np.append((0,), np.where(un < um, un, um))
for v in range(int(np.ceil(n / 2))):
D[n - v - 1, :] = np.append(u[v + 1:], u[:v + 1])
D[v, :] = D[n - v - 1, :][::-1]
i, j = np.where(np.tril(R))
k = len(i)
itr *= k
# maximal number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1) / 2))
# actual number of successful rewirings
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts:
while True:
e1 = np.random.randint(k)
e2 = np.random.randint(k)
while e1 == e2:
e2 = np.random.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break
if np.random.random() > .5:
i.setflags(write=True)
j.setflags(write=True)
i[e2] = d
j[e2] = c # flip edge c-d with 50% probability
c = i[e2]
d = j[e2] # to explore all potential rewirings
# rewiring condition
if not (R[a, d] or R[c, b]):
# lattice condition
if (D[a, b] * R[a, b] + D[c, d] * R[c, d] >= D[a, d] * R[a, b] + D[c, b] * R[c, d]):
R[a, d] = R[a, b]
R[a, b] = 0
R[d, a] = R[b, a]
R[b, a] = 0
R[c, b] = R[c, d]
R[c, d] = 0
R[b, c] = R[d, c]
R[d, c] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b
eff += 1
break
att += 1
Rlatt = R[np.ix_(ind_rp[::-1], ind_rp[::-1])]
return Rlatt, R, ind_rp, eff
def makeevenCIJ(n, k, sz_cl):
'''
This function generates a random, directed network with a specified
number of fully connected modules linked together by evenly distributed
remaining random connections.
Parameters
----------
N : int
number of vertices (must be power of 2)
K : int
number of edges
sz_cl : int
size of clusters (must be power of 2)
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
N must be a power of 2.
A warning is generated if all modules contain more edges than K.
Cluster size is 2^sz_cl;
'''
# compute number of hierarchical levels and adjust cluster size
mx_lvl = int(np.floor(np.log2(n)))
sz_cl -= 1
# make a stupid little template
t = np.ones((2, 2)) * 2
# check n against the number of levels
Nlvl = 2**mx_lvl
if Nlvl != n:
print("Warning: n must be a power of 2")
n = Nlvl
# create hierarchical template
for lvl in range(1, mx_lvl):
s = 2**(lvl + 1)
CIJ = np.ones((s, s))
grp1 = range(int(s / 2))
grp2 = range(int(s / 2), s)
ix1 = np.add.outer(np.array(grp1) * s, grp1).flatten()
ix2 = np.add.outer(np.array(grp2) * s, grp2).flatten()
CIJ.flat[ix1] = t # numpy indexing is teh sucks :(
CIJ.flat[ix2] = t
CIJ += 1
t = CIJ.copy()
CIJ -= (np.ones((s, s)) + mx_lvl * np.eye(s))
# assign connection probabilities
CIJp = (CIJ >= (mx_lvl - sz_cl))
# determine nr of non-cluster connections left and their possible positions
rem_k = k - np.size(np.where(CIJp.flatten()))
if rem_k < 0:
print("Warning: K is too small, output matrix contains clusters only")
return CIJp
a, b = np.where(np.logical_not(CIJp + np.eye(n)))
# assign remK randomly dstributed connections
rp = np.random.permutation(len(a))
a = a[rp[:rem_k]]
b = b[rp[:rem_k]]
for ai, bi in zip(a, b):
CIJp[ai, bi] = 1
return np.array(CIJp, dtype=int)
def makefractalCIJ(mx_lvl, E, sz_cl):
'''
This function generates a directed network with a hierarchical modular
organization. All modules are fully connected and connection density
decays as 1/(E^n), with n = index of hierarchical level.
Parameters
----------
mx_lvl : int
number of hierarchical levels, N = 2^mx_lvl
E : int
connection density fall off per level
sz_cl : int
size of clusters (must be power of 2)
Returns
-------
CIJ : NxN np.ndarray
connection matrix
K : int
number of connections present in output CIJ
'''
# make a stupid little template
t = np.ones((2, 2)) * 2
# compute N and cluster size
n = 2**mx_lvl
sz_cl -= 1
for lvl in range(1, mx_lvl):
s = 2**(lvl + 1)
CIJ = np.ones((s, s))
grp1 = range(int(s / 2))
grp2 = range(int(s / 2), s)
ix1 = np.add.outer(np.array(grp1) * s, grp1).flatten()
ix2 = np.add.outer(np.array(grp2) * s, grp2).flatten()
CIJ.flat[ix1] = t # numpy indexing is teh sucks :(
CIJ.flat[ix2] = t
CIJ += 1
t = CIJ.copy()
CIJ -= (np.ones((s, s)) + mx_lvl * np.eye(s))
# assign connection probabilities
ee = mx_lvl - CIJ - sz_cl
ee = (ee > 0) * ee
prob = (1 / E**ee) * (np.ones((s, s)) - np.eye(s))
CIJ = (prob > np.random.random((n, n)))
# count connections
k = np.sum(CIJ)
return np.array(CIJ, dtype=int), k
def makerandCIJdegreesfixed(inv, outv):
'''
This function generates a directed random network with a specified
in-degree and out-degree sequence.
Parameters
----------
inv : Nx1 np.ndarray
in-degree vector
outv : Nx1 np.ndarray
out-degree vector
Returns
-------
CIJ : NxN np.ndarray
Notes
-----
Necessary conditions include:
length(in) = length(out) = n
sum(in) = sum(out) = k
in(i), out(i) < n-1
in(i) + out(j) < n+2
in(i) + out(i) < n
No connections are placed on the main diagonal
The algorithm used in this function is not, technically, guaranteed to
terminate. If a valid distribution of in and out degrees is provided,
this function will find it in bounded time with probability
1-(1/(2*(k^2))). This turns out to be a serious problem when
computing infinite degree matrices, but offers good performance
otherwise.
'''
n = len(inv)
k = np.sum(inv)
in_inv = np.zeros((k,))
out_inv = np.zeros((k,))
i_in = 0
i_out = 0
for i in range(n):
in_inv[i_in:i_in + inv[i]] = i
out_inv[i_out:i_out + outv[i]] = i
i_in += inv[i]
i_out += outv[i]
CIJ = np.eye(n)
edges = np.array((out_inv, in_inv[np.random.permutation(k)]))
# create CIJ and check for double edges and self connections
for i in range(k):
if CIJ[edges[0, i], edges[1, i]]:
tried = set()
while True:
if len(tried) == k:
raise BCTParamError('Could not resolve the given '
'in and out vectors')
switch = np.random.randint(k)
while switch in tried:
switch = np.random.randint(k)
if not (CIJ[edges[0, i], edges[1, switch]] or
CIJ[edges[0, switch], edges[1, i]]):
CIJ[edges[0, switch], edges[1, switch]] = 0
CIJ[edges[0, switch], edges[1, i]] = 1
if switch < i:
CIJ[edges[0, switch], edges[1, switch]] = 0
CIJ[edges[0, switch], edges[1, i]] = 1
t = edges[1, i]
edges[1, i] = edges[1, switch]
edges[1, switch] = t
break
tried.add(switch)
else:
CIJ[edges[0, i], edges[1, i]] = 1
CIJ -= np.eye(n)
return CIJ
def makerandCIJ_dir(n, k):
'''
This function generates a directed random network
Parameters
----------
N : int
number of vertices
K : int
number of edges
Returns
-------
CIJ : NxN np.ndarray
directed random connection matrix
Notes
-----
no connections are placed on the main diagonal.
'''
ix, = np.where(np.logical_not(np.eye(n)).flat)
rp = np.random.permutation(np.size(ix))
CIJ = np.zeros((n, n))
CIJ.flat[ix[rp][:k]] = 1
return CIJ
def makerandCIJ_und(n, k):
'''
This function generates an undirected random network
Parameters
----------
N : int
number of vertices
K : int
number of edges
Returns
-------
CIJ : NxN np.ndarray
undirected random connection matrix
Notes
-----
no connections are placed on the main diagonal.
'''
ix, = np.where(np.triu(np.logical_not(np.eye(n))).flat)
rp = np.random.permutation(np.size(ix))
CIJ = np.zeros((n, n))
CIJ.flat[ix[rp][:k]] = 1
return CIJ
def makeringlatticeCIJ(n, k):
'''
This function generates a directed lattice network with toroidal
boundary counditions (i.e. with ring-like "wrapping around").
Parameters
----------
N : int
number of vertices
K : int
number of edges
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
The lattice is made by placing connections as close as possible
to the main diagonal, with wrapping around. No connections are made
on the main diagonal. In/Outdegree is kept approx. constant at K/N.
'''
# initialize
CIJ = np.zeros((n, n))
CIJ1 = np.ones((n, n))
kk = 0
count = 0
seq = range(1, n)
seq2 = range(n - 1, 0, -1)
# fill in
while kk < k:
count += 1
dCIJ = np.triu(CIJ1, seq[count]) - np.triu(CIJ1, seq[count] + 1)
dCIJ2 = np.triu(CIJ1, seq2[count]) - np.triu(CIJ1, seq2[count] + 1)
dCIJ = dCIJ + dCIJ.T + dCIJ2 + dCIJ2.T
CIJ += dCIJ
kk = int(np.sum(CIJ))
# remove excess connections
overby = kk - k
if overby:
i, j = np.where(dCIJ)
rp = np.random.permutation(np.size(i))
for ii in range(overby):
CIJ[i[rp[ii]], j[rp[ii]]] = 0
return CIJ
def maketoeplitzCIJ(n, k, s):
'''
This function generates a directed network with a Gaussian drop-off in
edge density with increasing distance from the main diagonal. There are
toroidal boundary counditions (i.e. no ring-like "wrapping around").
Parameters
----------
N : int
number of vertices
K : int
number of edges
s : float
standard deviation of toeplitz
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
no connections are placed on the main diagonal.
'''
from scipy import linalg, stats
pf = stats.norm.pdf(range(1, n), .5, s)
template = linalg.toeplitz(np.append((0,), pf), r=np.append((0,), pf))
template *= (k / np.sum(template))
CIJ = np.zeros((n, n))
itr = 0
while np.sum(CIJ) != k:
CIJ = (np.random.random((n, n)) < template)
itr += 1
if itr > 10000:
raise BCTParamError('Infinite loop was caught generating toeplitz '
'matrix. This means the matrix could not be resolved with the '
'specified parameters.')
return CIJ
def null_model_dir_sign(W, bin_swaps=5, wei_freq=.1):
'''
This function randomizes an directed network with positive and
negative weights, while preserving the degree and strength
distributions. This function calls randmio_dir.m
Parameters
----------
W : NxN np.ndarray
directed weighted connection matrix
bin_swaps : int
average number of swaps in each edge binary randomization. Default
value is 5. 0 swaps implies no binary randomization.
wei_freq : float
frequency of weight sorting in weighted randomization. 0<=wei_freq<1.
wei_freq == 1 implies that weights are sorted at each step.
wei_freq == 0.1 implies that weights sorted each 10th step (faster,
default value)
wei_freq == 0 implies no sorting of weights (not recommended)
Returns
-------
W0 : NxN np.ndarray
randomized weighted connection matrix
R : 4-tuple of floats
Correlation coefficients between strength sequences of input and
output connection matrices, rpos_in, rpos_out, rneg_in, rneg_out
Notes
-----
The value of bin_swaps is ignored when binary topology is fully
connected (e.g. when the network has no negative weights).
Randomization may be better (and execution time will be slower) for
higher values of bin_swaps and wei_freq. Higher values of bin_swaps may
enable a more random binary organization, and higher values of wei_freq
may enable a more accurate conservation of strength sequences.
R are the correlation coefficients between positive and negative
in-strength and out-strength sequences of input and output connection
matrices and are used to evaluate the accuracy with which strengths
were preserved. Note that correlation coefficients may be a rough
measure of strength-sequence accuracy and one could implement more
formal tests (such as the Kolmogorov-Smirnov test) if desired.
'''
W = W.copy()
n = len(W)
np.fill_diagonal(W, 0) # clear diagonal
Ap = (W > 0) # positive adjmat
if np.size(np.where(Ap.flat)) < (n * (n - 1)):
W_r = randmio_und_signed(W, bin_swaps)
Ap_r = W_r > 0
An_r = W_r < 0
else:
Ap_r = Ap
An_r = An
W0 = np.zeros((n, n))
for s in (1, -1):
if s == 1:
Acur = Ap
A_rcur = Ap_r
else:
Acur = An
A_rcur = An_r
Si = np.sum(W * Acur, axis=0) # positive in-strength
So = np.sum(W * Acur, axis=1) # positive out-strength
Wv = np.sort(W[Acur].flat) # sorted weights vector
i, j = np.where(A_rcur)
Lij, = np.where(A_rcur.flat) # weights indices
P = np.outer(So, Si)
if wei_freq == 0: # get indices of Lij that sort P
Oind = np.argsort(P.flat[Lij]) # assign corresponding sorted
W0.flat[Lij[Oind]] = s * Wv # weight at this index
else:
wsize = np.size(Wv)
wei_period = np.round(1 / wei_freq) # convert frequency to period
lq = np.arange(wsize, 0, -wei_period, dtype=int)
for m in lq: # iteratively explore at this period
# get indices of Lij that sort P
Oind = np.argsort(P.flat[Lij])
R = np.random.permutation(m)[:np.min((m, wei_period))]
for q, r in enumerate(R):
# choose random index of sorted expected weight
o = Oind[r]
W0.flat[Lij[o]] = s * Wv[r] # assign corresponding weight
# readjust expected weighted probability for i[o],j[o]
f = 1 - Wv[r] / So[i[o]]
P[i[o], :] *= f
f = 1 - Wv[r] / So[j[o]]
P[j[o], :] *= f
# readjust in-strength of i[o]
So[i[o]] -= Wv[r]
# readjust out-strength of j[o]
Si[j[o]] -= Wv[r]
O = Oind[R]
# remove current indices from further consideration
Lij = np.delete(Lij, O)
i = np.delete(i, O)
j = np.delete(j, O)
Wv = np.delete(Wv, O)
rpos_in = np.corrcoef(np.sum(W * (W > 0), axis=0),
np.sum(W0 * (W0 > 0), axis=0))
rpos_ou = np.corrcoef(np.sum(W * (W > 0), axis=1),
np.sum(W0 * (W0 > 0), axis=1))
rneg_in = np.corrcoef(np.sum(-W * (W < 0), axis=0),
np.sum(-W0 * (W0 < 0), axis=0))
rneg_ou = np.corrcoef(np.sum(-W * (W < 0), axis=1),
np.sum(-W0 * (W0 < 0), axis=1))
return W0, (rpos_in[0, 1], rpos_ou[0, 1], rneg_in[0, 1], rneg_ou[0, 1])
def null_model_und_sign(W, bin_swaps=5, wei_freq=.1):
'''
This function randomizes an undirected network with positive and
negative weights, while preserving the degree and strength
distributions. This function calls randmio_und.m
Parameters
----------
W : NxN np.ndarray
undirected weighted connection matrix
bin_swaps : int
average number of swaps in each edge binary randomization. Default
value is 5. 0 swaps implies no binary randomization.
wei_freq : float
frequency of weight sorting in weighted randomization. 0<=wei_freq<1.
wei_freq == 1 implies that weights are sorted at each step.
wei_freq == 0.1 implies that weights sorted each 10th step (faster,
default value)
wei_freq == 0 implies no sorting of weights (not recommended)
Returns
-------
W0 : NxN np.ndarray
randomized weighted connection matrix
R : 4-tuple of floats
Correlation coefficients between strength sequences of input and
output connection matrices, rpos_in, rpos_out, rneg_in, rneg_out
Notes
-----
The value of bin_swaps is ignored when binary topology is fully
connected (e.g. when the network has no negative weights).
Randomization may be better (and execution time will be slower) for
higher values of bin_swaps and wei_freq. Higher values of bin_swaps
may enable a more random binary organization, and higher values of
wei_freq may enable a more accurate conservation of strength
sequences.
R are the correlation coefficients between positive and negative
strength sequences of input and output connection matrices and are
used to evaluate the accuracy with which strengths were preserved.
Note that correlation coefficients may be a rough measure of
strength-sequence accuracy and one could implement more formal tests
(such as the Kolmogorov-Smirnov test) if desired.
'''
if not np.all(W == W.T):
raise BCTParamError("Input must be undirected")
W = W.copy()
n = len(W)
np.fill_diagonal(W, 0) # clear diagonal
Ap = (W > 0) # positive adjmat
if np.size(np.where(Ap.flat)) < (n * (n - 1)):
W_r = randmio_und_signed(W, bin_swaps)
Ap_r = W_r > 0
An_r = W_r < 0
else:
Ap_r = Ap
An_r = An
W0 = np.zeros((n, n))
for s in (1, -1):
if s == 1:
Acur = Ap
A_rcur = Ap_r
else:
Acur = An
A_rcur = An_r
S = np.sum(W * Acur, axis=0) # strengths
Wv = np.sort(W[np.where(np.triu(Acur))]) # sorted weights vector
i, j = np.where(np.triu(A_rcur))
Lij, = np.where(np.triu(A_rcur).flat) # weights indices
P = np.outer(S, S)
if wei_freq == 0: # get indices of Lij that sort P
Oind = np.argsort(P.flat[Lij]) # assign corresponding sorted
W0.flat[Lij[Oind]] = s * Wv # weight at this index
else:
wsize = np.size(Wv)
wei_period = np.round(1 / wei_freq) # convert frequency to period
lq = np.arange(wsize, 0, -wei_period, dtype=int)
for m in lq: # iteratively explore at this period
# get indices of Lij that sort P
Oind = np.argsort(P.flat[Lij])
R = np.random.permutation(m)[:np.min((m, wei_period))]
for q, r in enumerate(R):
# choose random index of sorted expected weight
o = Oind[r]
W0.flat[Lij[o]] = s * Wv[r] # assign corresponding weight
# readjust expected weighted probability for i[o],j[o]
f = 1 - Wv[r] / S[i[o]]
P[i[o], :] *= f
P[:, i[o]] *= f
f = 1 - Wv[r] / S[j[o]]
P[j[o], :] *= f
P[:, j[o]] *= f
# readjust strength of i[o]
S[i[o]] -= Wv[r]
# readjust strength of j[o]
S[j[o]] -= Wv[r]
O = Oind[R]
# remove current indices from further consideration
Lij = np.delete(Lij, O)
i = np.delete(i, O)
j = np.delete(j, O)
Wv = np.delete(Wv, R)
W0 = W0 + W0.T
rpos_in = np.corrcoef(np.sum(W * (W > 0), axis=0),
np.sum(W0 * (W0 > 0), axis=0))
rpos_ou = np.corrcoef(np.sum(W * (W > 0), axis=1),
np.sum(W0 * (W0 > 0), axis=1))
rneg_in = np.corrcoef(np.sum(-W * (W < 0), axis=0),
np.sum(-W0 * (W0 < 0), axis=0))
rneg_ou = np.corrcoef(np.sum(-W * (W < 0), axis=1),
np.sum(-W0 * (W0 < 0), axis=1))
return W0, (rpos_in[0, 1], rpos_ou[0, 1], rneg_in[0, 1], rneg_ou[0, 1])
def randmio_dir_connected(R, itr):
'''
This function randomizes a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions. The
function also ensures that the randomized network maintains
connectedness, the ability for every node to reach every other node in
the network. The input network for this function must be connected.
Parameters
----------
W : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
R = R.copy()
n = len(R)
i, j = np.where(R)
k = len(i)
itr *= k
max_attempts = np.round(n * k / (n * (n - 1)))
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts: # while not rewired
rewire = True
while True:
e1 = np.random.randint(k)
e2 = np.random.randint(k)
while e1 == e2:
e2 = np.random.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
# rewiring condition
if not (R[a, d] or R[c, b]):
# connectedness condition
if not (np.any((R[a, c], R[d, b], R[d, c])) and
np.any((R[c, a], R[b, d], R[b, a]))):
P = R[(a, c), :].copy()
P[0, b] = 0
P[0, d] = 1
P[1, d] = 0
P[1, b] = 1
PN = P.copy()
PN[0, a] = 1
PN[1, c] = 1
while True:
P[0, :] = np.any(R[P[0, :] != 0, :], axis=0)
P[1, :] = np.any(R[P[1, :] != 0, :], axis=0)
P *= np.logical_not(PN)
PN += P
if not np.all(np.any(P, axis=1)):
rewire = False
break
elif np.any(PN[0, (b, c)]) and np.any(PN[1, (d, a)]):
break
# end connectedness testing
if rewire: # reassign edges
R[a, d] = R[a, b]
R[a, b] = 0
R[c, b] = R[c, d]
R[c, d] = 0
j.setflags(write=True)
j[e1] = d # reassign edge indices
j[e2] = b
eff += 1
break
att += 1
return R, eff
def randmio_dir(R, itr):
'''
This function randomizes a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions.
Parameters
----------
W : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
R = R.copy()
n = len(R)
i, j = np.where(R)
k = len(i)
itr *= k
max_attempts = np.round(n * k / (n * (n - 1)))
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts: # while not rewired
while True:
e1 = np.random.randint(k)
e2 = np.random.randint(k)
while e1 == e2:
e2 = np.random.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
# rewiring condition
if not (R[a, d] or R[c, b]):
R[a, d] = R[a, b]
R[a, b] = 0
R[c, b] = R[c, d]
R[c, d] = 0
i.setflags(write=True)
j.setflags(write=True)
i[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
return R, eff
def randmio_und_connected(R, itr):
'''
This function randomizes an undirected network, while preserving the
degree distribution. The function does not preserve the strength
distribution in weighted networks. The function also ensures that the
randomized network maintains connectedness, the ability for every node
to reach every other node in the network. The input network for this
function must be connected.
NOTE the changes to the BCT matlab function of the same name
made in the Jan 2016 release
have not been propagated to this function because of substantially
decreased time efficiency in the implementation. Expect these changes
to be merged eventually.
Parameters
----------
W : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
if not np.all(R == R.T):
raise BCTParamError("Input must be undirected")
if number_of_components(R) > 1:
raise BCTParamError("Input is not connected")
R = R.copy()
n = len(R)
i, j = np.where(np.tril(R))
k = len(i)
itr *= k
# maximum number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1)))
# actual number of successful rewirings
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts: # while not rewired
rewire = True
while True:
e1 = np.random.randint(k)
e2 = np.random.randint(k)
while e1 == e2:
e2 = np.random.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
if np.random.random() > .5:
i.setflags(write=True)
j.setflags(write=True)
i[e2] = d
j[e2] = c # flip edge c-d with 50% probability
c = i[e2]
d = j[e2] # to explore all potential rewirings
# rewiring condition
if not (R[a, d] or R[c, b]):
# connectedness condition
if not (R[a, c] or R[b, d]):
P = R[(a, d), :].copy()
P[0, b] = 0
P[1, c] = 0
PN = P.copy()
PN[:, d] = 1
PN[:, a] = 1
while True:
P[0, :] = np.any(R[P[0, :] != 0, :], axis=0)
P[1, :] = np.any(R[P[1, :] != 0, :], axis=0)
P *= np.logical_not(PN)
if not np.all(np.any(P, axis=1)):
rewire = False
break
elif np.any(P[:, (b, c)]):
break
PN += P
# end connectedness testing
if rewire:
R[a, d] = R[a, b]
R[a, b] = 0
R[d, a] = R[b, a]
R[b, a] = 0
R[c, b] = R[c, d]
R[c, d] = 0
R[b, c] = R[d, c]
R[d, c] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
return R, eff
def randmio_dir_signed(R, itr):
'''
This function randomizes a directed weighted network with positively
and negatively signed connections, while preserving the positive and
negative degree distributions. In weighted networks by default the
function preserves the out-degree strength but not the in-strength
distributions
Parameters
---------
W : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
R = R.copy()
n = len(R)
itr *= n * (n - 1)
#maximal number of rewiring attempts per iter
max_attempts = n
#actual number of successful rewirings
eff = 0
#print(itr)
for it in range(itr):
#print(it)
att = 0
while att <= max_attempts:
#select four distinct vertices
a, b, c, d = pick_four_unique_nodes_quickly(n)
#a, b, c, d = np.random.choice(n, 4)
#a, b, c, d = np.random.permutation(4)
r0_ab = R[a, b]
r0_cd = R[c, d]
r0_ad = R[a, d]
r0_cb = R[c, b]
#print(np.sign(r0_ab), np.sign(r0_ad))
#rewiring condition
if ( np.sign(r0_ab) == np.sign(r0_cd) and
np.sign(r0_ad) == np.sign(r0_cb) and
np.sign(r0_ab) != np.sign(r0_ad)):
R[a, d] = r0_ab
R[a, b] = r0_ad
R[c, b] = r0_cd
R[c, d] = r0_cb
eff += 1
break
att += 1
#print(eff)
return R, eff
def randmio_und(R, itr):
'''
This function randomizes an undirected network, while preserving the
degree distribution. The function does not preserve the strength
distribution in weighted networks.
Parameters
----------
W : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
if not np.all(R == R.T):
raise BCTParamError("Input must be undirected")
R = R.copy()
n = len(R)
i, j = np.where(np.tril(R))
k = len(i)
itr *= k
# maximum number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1)))
# actual number of successful rewirings
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts: # while not rewired
while True:
e1, e2 = np.random.randint(k, size=(2,))
while e1 == e2:
e2 = np.random.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
if np.random.random() > .5:
i.setflags(write=True)
j.setflags(write=True)
i[e2] = d
j[e2] = c # flip edge c-d with 50% probability
c = i[e2]
d = j[e2] # to explore all potential rewirings
# rewiring condition
if not (R[a, d] or R[c, b]):
R[a, d] = R[a, b]
R[a, b] = 0
R[d, a] = R[b, a]
R[b, a] = 0
R[c, b] = R[c, d]
R[c, d] = 0
R[b, c] = R[d, c]
R[d, c] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
return R, eff
def randmio_und_signed(R, itr):
'''
This function randomizes an undirected weighted network with positive
and negative weights, while simultaneously preserving the degree
distribution of positive and negative weights. The function does not
preserve the strength distribution in weighted networks.
Parameters
----------
W : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
Returns
-------
R : NxN np.ndarray
randomized network
'''
R = R.copy()
n = len(R)
itr *= int(n * (n -1) / 2)
max_attempts = int(np.round(n / 2))
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts:
a, b, c, d = pick_four_unique_nodes_quickly(n)
r0_ab = R[a, b]
r0_cd = R[c, d]
r0_ad = R[a, d]
r0_cb = R[c, b]
#rewiring condition
if ( np.sign(r0_ab) == np.sign(r0_cd) and
np.sign(r0_ad) == np.sign(r0_cb) and
np.sign(r0_ab) != np.sign(r0_ad)):
R[a, d] = R[d, a] = r0_ab
R[a, b] = R[b, a] = r0_ad
R[c, b] = R[b, c] = r0_cd
R[c, d] = R[d, c] = r0_cb
eff += 1
break
att += 1
return R, eff
def randomize_graph_partial_und(A, B, maxswap):
'''
A = RANDOMIZE_GRAPH_PARTIAL_UND(A,B,MAXSWAP) takes adjacency matrices A
and B and attempts to randomize matrix A by performing MAXSWAP
rewirings. The rewirings will avoid any spots where matrix B is
nonzero.
Parameters
----------
A : NxN np.ndarray
undirected adjacency matrix to randomize
B : NxN np.ndarray
mask; edges to avoid
maxswap : int
number of rewirings
Returns
-------
A : NxN np.ndarray
randomized matrix
Notes
-----
1. Graph may become disconnected as a result of rewiring. Always
important to check.
2. A can be weighted, though the weighted degree sequence will not be
preserved.
3. A must be undirected.
'''
A = A.copy()
i, j = np.where(np.triu(A, 1))
i.setflags(write=True)
j.setflags(write=True)
m = len(i)
nswap = 0
while nswap < maxswap:
while True:
e1, e2 = np.random.randint(m, size=(2,))
while e1 == e2:
e2 = np.random.randint(m)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
if np.random.random() > .5:
i[e2] = d
j[e2] = c # flip edge c-d with 50% probability
c = i[e2]
d = j[e2] # to explore all potential rewirings
# rewiring condition
if not (A[a, d] or A[c, b] or B[a, d] or B[c, b]): # avoid specified ixes
A[a, d] = A[a, b]
A[a, b] = 0
A[d, a] = A[b, a]
A[b, a] = 0
A[c, b] = A[c, d]
A[c, d] = 0
A[b, c] = A[d, c]
A[d, c] = 0
j[e1] = d
j[e2] = b # reassign edge indices
nswap += 1
return A
def randomizer_bin_und(R, alpha):
'''
This function randomizes a binary undirected network, while preserving
the degree distribution. The function directly searches for rewirable
edge pairs (rather than trying to rewire edge pairs at random), and
hence avoids long loops and works especially well in dense matrices.
Parameters
----------
A : NxN np.ndarray
binary undirected connection matrix
alpha : float
fraction of edges to rewire
Returns
-------
R : NxN np.ndarray
randomized network
'''
R = binarize(R, copy=True) # binarize
if not np.all(R == R.T):
raise BCTParamError(
'randomizer_bin_und only takes undirected matrices')
ax = len(R)
nr_poss_edges = (np.dot(ax, ax) - ax) / 2 # find maximum possible edges
savediag = np.diag(R)
np.fill_diagonal(R, np.inf) # replace diagonal with high value
# if there are more edges than non-edges, invert the matrix to reduce
# computation time. "invert" means swap meaning of 0 and 1, not matrix
# inversion
i, j = np.where(np.triu(R, 1))
k = len(i)
if k > nr_poss_edges / 2:
swap = True
R = np.logical_not(R)
np.fill_diagonal(R, np.inf)
i, j = np.where(np.triu(R, 1))
k = len(i)
else:
swap = False
# exclude fully connected nodes
fullnodes = np.where((np.sum(np.triu(R, 1), axis=0) +
np.sum(np.triu(R, 1), axis=1).T) == (ax - 1))
if np.size(fullnodes):
R[fullnodes, :] = 0
R[:, fullnodes] = 0
np.fill_diagonal(R, np.inf)
i, j = np.where(np.triu(R, 1))
k = len(i)
if k == 0 or k >= (nr_poss_edges - 1):
raise BCTParamError("No possible randomization")
for it in range(k):
if np.random.random() > alpha:
continue # rewire alpha% of edges
a = i[it]
b = j[it] # it is the chosen edge from a<->b
alliholes, = np.where(R[:, a] == 0) # find where each end can connect
alljholes, = np.where(R[:, b] == 0)
# we can only use edges with connection to neither node
i_intersect = np.intersect1d(alliholes, alljholes)
# find which of these nodes are connected
ii, jj = np.where(R[np.ix_(i_intersect, i_intersect)])
# if there is an edge to switch
if np.size(ii):
# choose one randomly
nummates = np.size(ii)
mate = np.random.randint(nummates)
# randomly orient the second edge
if np.random.random() > .5:
c = i_intersect[ii[mate]]
d = i_intersect[jj[mate]]
else:
d = i_intersect[ii[mate]]
c = i_intersect[jj[mate]]
# swap the edges
R[a, b] = 0
R[c, d] = 0
R[b, a] = 0
R[d, c] = 0
R[a, c] = 1
R[b, d] = 1
R[c, a] = 1
R[d, b] = 1
# update the edge index (this is inefficient)
for m in range(k):
if i[m] == d and j[m] == c:
i.setflags(write=True)
j.setflags(write=True)
i[it] = c
j[m] = b
elif i[m] == c and j[m] == d:
i.setflags(write=True)
j.setflags(write=True)
j[it] = c
i[m] = b
# restore fullnodes
if np.size(fullnodes):
R[fullnodes, :] = 1
R[:, fullnodes] = 1
# restore inversion
if swap:
R = np.logical_not(R)
# restore diagonal
np.fill_diagonal(R, 0)
R += savediag
return np.array(R, dtype=int)
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/bct/algorithms/reference.py
|
Python
|
mit
| 53,864
|
[
"Gaussian"
] |
32d6bb764e4ad46676386fc3767bdda7400eba449b99fd45ba374cb9fea8bb08
|
from collections import defaultdict
import matplotlib.pyplot as plt
import re
import scipy.stats
import subprocess
import os
import cPickle
import tps_utils
import numpy as np
from collections import Counter
import pysam
import bzUtils
class TPS_Lib:
def __init__(self, experiment_settings, lib_settings):
"""
Constructor for Library class
"""
self.experiment_settings = experiment_settings
self.lib_settings = lib_settings
self.get_property = self.experiment_settings.get_property
self.get_rdir = experiment_settings.get_rdir
self.get_wdir = experiment_settings.get_wdir
self.pool_sequence_mappings = {}
self.initialize_pool_sequence_mappings(mapq_cutoff=0)
self.enrichment_sorted_mappings = None
def initialize_pool_sequence_mappings(self, mapq_cutoff = 30):
if self.get_property('force_recount') or not self.lib_settings.sequence_counts_exist():
gene_names = []
trimmed_sequences = bzUtils.convertFastaToDict(self.experiment_settings.get_trimmed_pool_fasta())
for sequence_name in trimmed_sequences:
gene_name = sequence_name.split('_')[0] #TL names are assumed to be of type:YLR350W_-68_651_116
gene_names.append(gene_name)
self.pool_sequence_mappings[sequence_name] = pool_sequence_mapping(sequence_name, trimmed_sequences[sequence_name])
samfile = pysam.Samfile(self.lib_settings.get_mapped_reads(), "rb" )
ra = read_assigner(self.pool_sequence_mappings, samfile, mapq_cutoff)
for aligned_read in samfile.fetch():
ra.assign_read(aligned_read)
samfile.close()
self.compute_lib_fractions()
gene_counts = Counter(gene_names)
for mapping_name in self.pool_sequence_mappings:
if gene_counts[mapping_name.split('_')[0]]==1:
self.pool_sequence_mappings[mapping_name].is_only_tl = True
else:
assert gene_counts[mapping_name.split('_')[0]] != 0
self.pool_sequence_mappings[mapping_name].is_only_tl = False
bzUtils.makePickle(self.pool_sequence_mappings, self.lib_settings.get_sequence_counts())
else:
self.pool_sequence_mappings = bzUtils.unPickle(self.lib_settings.get_sequence_counts())
def get_single_TL_mappings(self, names_only = False):
single_TL_mappings = set()
single_TL_names = set()
for mapping_name in self.pool_sequence_mappings:
if self.pool_sequence_mappings[mapping_name].is_only_tl:
single_TL_mappings.add(self.pool_sequence_mappings[mapping_name])
single_TL_names.add(mapping_name)
if names_only:
return single_TL_names
else:
return single_TL_mappings
def compute_lib_fractions(self):
total_passing_library_reads = float(sum([pool_sequence_mapping.total_passing_reads for pool_sequence_mapping in self.pool_sequence_mappings.values()]))
for pool_sequence_mapping in self.pool_sequence_mappings.values():
pool_sequence_mapping.lib_fraction = pool_sequence_mapping.total_passing_reads/total_passing_library_reads
def calculate_enrichments(self, input_lib):
for pool_sequence_mapping in self.pool_sequence_mappings.values():
input_pool_sequence_mapping = input_lib.get_pool_sequence_mapping(pool_sequence_mapping.sequence_name)
assert input_pool_sequence_mapping != None
try:
pool_sequence_mapping.enrichment = pool_sequence_mapping.lib_fraction/input_pool_sequence_mapping.lib_fraction
except:
pool_sequence_mapping.enrichment = 0
def get_pool_sequence_mapping(self, sequence_name):
if sequence_name in self.pool_sequence_mappings:
return self.pool_sequence_mappings[sequence_name]
else:
return None
def get_conc(self):
return self.lib_settings.get_conc()
def get_washes(self):
return self.lib_settings.washes
def get_poly_ic(self):
return self.lib_settings.poly_ic_conc
def get_temperature(self):
return self.lib_settings.temperature
def get_rna_conc(self):
return self.lib_settings.input_rna
def get_sample_name(self):
return self.lib_settings.sample_name
def plot_pcr_bias(self):
collapsed_reads_file = self.lib_settings.get_collapsed_reads()
read_counts = np.array()
f = open(collapsed_reads_file)
for line in f:
if not line.strip() == '' and not line.startswith('#'):#ignore empty lines and commented out lines
if line.startswith('>'):#> marks the start of a new sequence
num_reads = int(line[1:].strip().split('-')[1])
read_counts.append(num_reads)
else:
continue
f.close()
read_fractions = read_counts/float(sum(read_counts))
read_fractions
def get_counts(self, sequence_name):
return self.pool_sequence_mappings[sequence_name].total_passing_reads
def get_mappings_with_minimum_reads(self, minimum_reads, names_only = False):
passing_mappings = set()
for mapping in self.pool_sequence_mappings.values():
if mapping.get_number_rt_stops() >= minimum_reads:
passing_mappings.add(mapping)
if names_only:
return set([passing_mapping.sequence_name for passing_mapping in passing_mappings])
else:
return passing_mappings
class pool_sequence_mapping:
"""
Represents a single sequence from the input pool
Stores
The original RNA sequence used in the pool (No adaptor)
The Trimmed sequence used for mapping
The positions of all reads mapping to this sequence
Total number of reads mapping to this sequence
Fraction of library reads mapping here
Enrichment relative to input library
"""
def __init__(self, sequence_name, full_sequence):
self.sequence_name = sequence_name
self.full_sequence = full_sequence
self.poor_quality_reads = 0
self.reverse_reads = 0
self.total_passing_reads = 0
self.reads_at_position = defaultdict(int) #will map position to # of reads there
self.enrichment = None
self.is_only_tl = None
def contains_subsequence(self, subsequence):
if subsequence in self.full_sequence:
return True
else:
return False
def positions_of_subsequence(self, subsequence):
#this regex will NOT return overlapping sequences
return [m.start() for m in re.finditer(subsequence, self.full_sequence)]
def get_number_rt_stops(self):
return float(sum([self.reads_at_position[i] for i in range(3,len(self.full_sequence))]))
def fraction_at_position(self, position):
if position < 0 or position > len(self.full_sequence)-1:
return None
else:
#return self.reads_at_position[position]/float(self.total_passing_reads)
if self.get_number_rt_stops() == 0:
return 0
else:
return self.reads_at_position[position]/self.get_number_rt_stops()
class read_assigner:
def __init__(self, pool_sequence_mappings, samfile, mapq_cutoff):
self.mapq_cutoff = mapq_cutoff
self.pool_sequence_mappings = pool_sequence_mappings
self.samfile = samfile
def assign_read(self, aligned_read):
sequence_id = aligned_read.tid
sequence_name = self.samfile.getrname(sequence_id)
pool_sequence_mapping = self.pool_sequence_mappings[sequence_name]
alignment_start = aligned_read.pos
is_reverse = aligned_read.is_reverse
mapq_score = aligned_read.mapq
if mapq_score >= self.mapq_cutoff and not is_reverse:
pool_sequence_mapping.reads_at_position[alignment_start] += 1
pool_sequence_mapping.total_passing_reads += 1
else:
if mapq_score < self.mapq_cutoff:
pool_sequence_mapping.poor_quality_reads += 1
if is_reverse:
pool_sequence_mapping.reverse_reads += 1
|
borisz264/toeprint_seq
|
tps_lib.py
|
Python
|
mit
| 8,362
|
[
"pysam"
] |
ef11ee84102b5d823fadd6cffa7381997c9b9c393068d835d0e745c9cff9f8c3
|
import collections
import itertools as it
import mrf
import ve
from operator import mul
from operator import add
import numpy as np
def tree_sum_product(tree_network, root):
'''
Perform sum-product belief propagation for trees computing the marginal
distribution of each variable using two passes.
Parameters
----------
tree_network : mrf.Network
Network of factors over a tree graph structure.
root : name
Name of the variable to treat as the tree root. Can be of the network's
variables.
Returns
-------
marginals : dict of functions
Map of functions keyed by variable name representing the marginal
probability of each variable.
'''
assert not tree_network.is_energy_funcs
def generate_message(cj, cij, incoming):
# Create a summed out over the variable in cj.
factors = reduce(add, incoming, []) + [cj, cij]
psi = mrf.Network(factors)
tau = ve.eliminate(psi, cj.names)
# Make messages sum to one.
xi_nstates = tau.nstates[0]
alpha = np.sum(tau((xi,)) for xi in xrange(xi_nstates))
tau = tau.partition(alpha)
return tau.factors
def generate_marginal(ci, messages):
# Multiply out factors (all for the same variable).
factors = reduce(add, messages, []) + [ci]
marginal_table = reduce(mul, [fac.table for fac in factors])
n = mrf.Network((mrf.Factor.fromTable(ci.names, marginal_table),))
alpha = sum(n.query(s)
for s in it.product(*[range(i) for i in n.nstates]))
return mrf.Network(n.factors, alpha)
return _tree_belief_propagation(
tree_network, root, generate_message, generate_marginal)
def tree_max_product(tree_network, root):
'''
Perform max-product belief propagation for trees computing the MAP
assignment to all of the tree variables.
Parameters
----------
tree_network : mrf.Network
Network of factors over a tree graph structure.
root : name
Name of the variable to treat as the tree root. Can be of the network's
variables.
Returns
-------
max_marginal : dict of functions
Map of functions keyed by variable name whose argmax represents the MAP
assignment of the corresponding variable.
'''
assert not tree_network.is_energy_funcs
def generate_message(cj, cij, incoming):
''' Generate a normalized message. '''
# Compute max over xj for the message to xi.
if cj.names[0] == cij.names[0]:
edge = lambda xi, xj: (xj, xi)
xi_nstates = cij.table.shape[1]
else:
edge = lambda xi, xj: (xi, xj)
xi_nstates = cij.table.shape[0]
# Compute the total response for the states of xi.
incoming_tab = np.array([
mul(cj((xj,)), reduce(mul, (m(xj) for m in incoming), 1.))
for xj in range(cj.nstates[0])])
message = lambda xi: max(mul(cij(edge(xi, xj)), incoming_tab[xj])
for xj in range(cj.nstates[0]))
# Combine into a single table over states of xi.
table = np.fromiter((message((xi,))
for xi in xrange(xi_nstates)), dtype=float)
np.divide(table, np.sum(table), table)
return lambda xi: table[xi]
def generate_marginal(ci, messages):
''' Return normalized product over this variable and its messages. '''
# Compute the total response for the states of xi.
xi_nstates = ci.nstates[0]
table = np.fromiter(
(mul(ci((xi,)), reduce(mul, (m(xi) for m in messages), 1.))
for xi in xrange(xi_nstates)), dtype=float)
np.divide(table, np.sum(table), table)
return lambda xi: table[xi]
return _tree_belief_propagation(
tree_network, root, generate_message, generate_marginal)
def tree_max_sum(tree_network, root):
'''
Perform max-sum belief propagation for trees computing the MAP assignment
to all of the tree variables.
Parameters
----------
tree_network : mrf.Network
Network of factors over a tree graph structure.
root : name
Name of the variable to treat as the tree root. Can be of the network's
variables.
Returns
-------
max_marginal : dict of functions
Map of functions keyed by variable name whose argmax represents the MAP
assignment of the corresponding variable.
'''
assert tree_network.is_energy_funcs
def generate_message(cj, cij, incoming):
''' Compute max-sum message from xi to xj. '''
# Compute max over xj for the message to xi.
if cj.names[0] == cij.names[0]:
edge = lambda xi, xj: (xj, xi)
else:
edge = lambda xi, xj: (xi, xj)
incoming_tab = np.array([
add(cj((xj,)), reduce(add, (m(xj) for m in incoming), 0.))
for xj in range(cj.nstates[0])])
# Memoize message.
cache = dict()
def message(xi):
if xi not in cache:
cache[xi] = max(
add(cij(edge(xi, xj)), incoming_tab[xj])
for xj in range(cj.nstates[0]))
return cache[xi]
return message
def generate_marginal(ci, messages):
''' Return the product over this variable and its messages. '''
return lambda xi: \
add(ci((xi,)), reduce(add, (m(xi) for m in messages), 0.))
return _tree_belief_propagation(
tree_network, root, generate_message, generate_marginal)
def tree_network_map_assignment(tree_network):
'''
Compute the MAP assignment for a tree mrf.
Parameters
----------
tree_network : mrf.Network
Network of factors over a tree graph structure.
Returns
-------
map_query : dict
Map keyed by variable name giving a tuple of (potential, state) for
each variable in the network.
'''
if tree_network.is_energy_funcs:
max_marginals = tree_max_sum(tree_network, tree_network.names[0])
else:
max_marginals = tree_max_product(tree_network, tree_network.names[0])
map_query = dict()
# Iterate over single node potentials.
for fac in tree_network.factors:
if len(fac.names) > 1:
continue
name, nstates = fac.names[0], fac.nstates[0]
max_marginal = max_marginals[name]
p_map = [max_marginal(i) for i in range(nstates)]
map_argmax = np.argmax(p_map)
map_query[name] = (p_map[map_argmax], map_argmax)
return map_query
def _tree_belief_propagation(
tree_network, root, generate_message, generate_marginal):
'''
Internal method for message passing in two passes.
'''
# For a tree network, all cliques are over pairs (edges) or single nodes.
t = mrf.network_to_mrf(tree_network)
# Initialize cliques.
cliques = dict(
((tuple(sorted(fac.names)), fac) for fac in tree_network.factors))
# Find message passing order up to the root using DFS.
pass_up = {}
to_visit = [(root, t.edge[root])]
while len(to_visit) > 0:
# Get next set to visit
xj, xis = to_visit.pop()
# Visit over each edge.
for xi in xis:
if not xi in pass_up and xi != root:
pass_up[xi] = xj
to_visit.append((xi, t.edge[xi]))
# Create dict of messages remaining and list of ready cliques.
messages_remain = dict(((n, d-1) for n,d in t.degree().iteritems()))
messages_remain[root] = t.degree()[root]
# While there is a ready node.
ready = [n for n,c in messages_remain.iteritems() if c is 0]
messages = collections.defaultdict(lambda: {})
while len(ready) > 0:
# Pop the next clique to process; break on root.
xi = ready.pop()
# Get parent node (must be unique for a tree).
xj = pass_up[xi]
# Store the message xi -> xj.
edge = tuple(sorted((xi,xj)))
assert xj not in messages[xi]
assert xi not in messages[xj]
messages[xj][xi] = generate_message(
cliques[(xi,)], cliques[edge], messages[xi].values())
# Account for xj's new message.
messages_remain[xj] -= 1
assert messages_remain[xj] >= 0
if 0 == messages_remain[xj] and xj != root:
ready.append(xj)
# No messages remain.
assert sum(messages_remain.values()) == 0
# Compute pass down adjacency list.
pass_down = collections.defaultdict(lambda: [])
for k,v in pass_up.iteritems():
pass_down[v].append(k)
# Pass down.
marginals = {}
ready = [root]
while len(ready) > 0:
xi = ready.pop()
# Gather marginal.
marginals[xi] = generate_marginal(
cliques[(xi,)], messages[xi].values())
for xj in pass_down[xi]:
edge = tuple(sorted((xi,xj)))
messages_sub_xj = [
messages[xi][xk] for xk in messages[xi].keys() if xk != xj]
# Store the message xi -> xj.
assert xi not in messages[xj]
messages[xj][xi] = generate_message(
cliques[(xi,)], cliques[edge], messages_sub_xj)
ready.append(xj)
# Return marginal distributions for all variables.
return marginals, messages
|
blr246/mrf
|
mrf/belief_propagation.py
|
Python
|
mit
| 9,461
|
[
"VisIt"
] |
15f2e79308a3fcfb51e584edd5c778296d5dd0319704ca4b775d623ae654b721
|
#
# mainTab
#
tab = self.notebook.mainTab
tab.settings['Program'] = 'castep'
tab.settings['Output file name'] = 'phonon.castep'
#
# SettingsTab
#
tab = self.notebook.settingsTab
tab.settings['Eckart flag'] = False
tab.settings['Neutral Born charges'] = False
tab.settings['Sigma value'] = 5
tab.settings['Mass definition'] = 'program'
#
# 0th Scenario tabs
#
tab = self.notebook.scenarios[0]
tab.settings['Matrix'] = 'ptfe'
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['Volume fraction'] = 0.1
tab.settings['Ellipsoid a/b'] = 0.5
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Effective medium method'] = 'Mie'
tab.settings['Particle shape'] = 'Sphere'
tab.settings['Particle size(mu)'] = 1.0
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Legend'] = 'Mie single particle size 1.0mu'
# Add new scenarios
methods = ['Mie']
shapes = ['Sphere']
hkls = [[0,0,0]]
vfs = [0.1]
sizes = [1.0, 1.0, ]
sigmas = [0.1, 0.5, ]
for method in methods:
for shape,hkl in zip(shapes,hkls):
for vf in vfs:
for size,sigma in zip(sizes,sigmas):
self.notebook.addScenario()
tab = self.notebook.scenarios[-1]
tab.settings['Volume fraction'] = vf
tab.settings['Particle shape'] = shape
tab.settings['Particle size(mu)'] = size
tab.settings['Effective medium method'] = method
tab.settings['Particle size distribution sigma(mu)'] = sigma
tab.settings['Unique direction - h'] = hkl[0]
tab.settings['Unique direction - k'] = hkl[1]
tab.settings['Unique direction - l'] = hkl[2]
#tab.settings['Legend'] = method + ' ' + shape + ' vf='+str(vf)+' size='+str(size)+' sigma=',str(sigma)
tab.settings['Legend'] = method + ' vf='+str(vf)+' size='+str(size)+' sigma='+str(sigma)
#
# Plotting Tab
#
tab = self.notebook.plottingTab
tab.settings['Minimum frequency'] = 300.0
tab.settings['Maximum frequency'] = 800.0
tab.settings['Frequency increment'] = 0.2
tab.settings['Molar definition'] = 'Unit cells'
tab.settings['Plot title'] = 'Mie method - Castep MgO - LogNormal Distribution'
#
# Analysis Tab
#
tab = self.notebook.analysisTab
tab.settings['Minimum frequency'] = -1
tab.settings['Maximum frequency'] = 800
tab.settings['title'] = 'Analysis'
tab.settings['Covalent radius scaling'] = 1.1
tab.settings['Bonding tolerance'] = 0.1
tab.settings['Bar width'] = 0.5
#
|
JohnKendrick/PDielec
|
Examples/Mie/MgO_lognormal/script.py
|
Python
|
mit
| 2,598
|
[
"CASTEP"
] |
5856af81fb58a6778eabe0ce6a17ece9bd5d1d1859ea8bedd06baca68a3d0f75
|
from __future__ import print_function
import os, string, tempfile, shutil
from subprocess import Popen
from ase.io import write
from ase.units import Bohr
class Bader:
'''class for running bader analysis and extracting data from it.
The class runs bader, extracts the charge density and outputs it
to a cube file. Then you call different functions of the class to
extract the charges, volumes, etc...
ACF.dat contains the coordinates of each atom, the charge
associated with it according to Bader partitioning, percentage of
the whole according to Bader partitioning and the minimum distance
to the surface. This distance should be compared to maximum
cut-off radius for the core region if pseudo potentials have been
used.
BCF.dat contains the coordinates of each Bader maxima, the charge
within that volume, the nearest atom and the distance to that
atom.
AtomVolumes.dat contains the number of each volume that has been
assigned to each atom. These numbers correspond to the number of
the BvAtxxxx.dat files.
The options for the executable are::
bader [ -c bader | voronoi ]
[ -n bader | voronoi ]
[ -b neargrid | ongrid ]
[ -r refine_edge_iterations ]
[ -ref reference_charge ]
[ -p all_atom | all_bader ]
[ -p sel_atom | sel_bader ] [volume list]
[ -p atom_index | bader_index ]
[ -i cube | chgcar ]
[ -h ] [ -v ]
chargefile
References:
G. Henkelman, A. Arnaldsson, and H. Jonsson, A fast and robust
algorithm for Bader decomposition of charge density,
Comput. Mater. Sci. 36 254-360 (2006).
E. Sanville, S. D. Kenny, R. Smith, and G. Henkelman An improved
grid-based algorithm for Bader charge allocation,
J. Comp. Chem. 28 899-908 (2007).
W. Tang, E. Sanville, and G. Henkelman A grid-based Bader analysis
algorithm without lattice bias, J. Phys.: Condens. Matter 21
084204 (2009).
'''
def __init__(self, atoms):
'''
'''
self.atoms = atoms
#get density and write cube file
calc = atoms.get_calculator()
ncfile = calc.get_nc()
base, ext = os.path.splitext(ncfile)
x, y, z, density = calc.get_charge_density()
cubefile = base + '_charge_density.cube'
self.densityfile = cubefile
if not os.path.exists(cubefile):
write(cubefile, atoms, data=density * Bohr ** 3)
#cmd to run for bader analysis. check if output exists so we
#don't run this too often.
acf_file = base + '_ACF.dat'
if not os.path.exists(acf_file):
#mk tempdir
tempdir = tempfile.mkdtemp()
cwd = os.getcwd()
abscubefile = os.path.abspath(cubefile)
os.chdir(tempdir)
cmd = 'bader %s' % abscubefile
process = Popen(cmd)
status = Popen.wait()
if status != 0:
print(process)
shutil.copy2('ACF.dat', os.path.join(cwd, acf_file))
os.chdir(cwd)
shutil.rmtree(tempdir)
self.charges = []
self.volumes = []
#now parse the output
f = open(acf_file, 'r')
#skip 2 lines
f.readline()
f.readline()
for i, atom in enumerate(self.atoms):
line = f.readline()
fields = line.split()
n = int(fields[0])
x = float(fields[1])
y = float(fields[2])
z = float(fields[3])
chg = float(fields[4])
mindist = float(fields[5])
vol = float(fields[6])
self.charges.append(chg)
self.volumes.append(vol)
f.close()
def get_bader_charges(self):
return self.charges
def get_bader_volumes(self):
'return volumes in Ang**3'
return [x * Bohr ** 3 for x in self.volumes]
def write_atom_volume(self, atomlist):
'''write bader atom volumes to cube files.
atomlist = [0,2] #for example
-p sel_atom Write the selected atomic volumes, read from the
subsequent list of volumes.
'''
alist = string.join([str(x) for x in atomlist], ' ')
cmd = 'bader -p sel_atom %s %s' % (alist, self.densityfile)
print(cmd)
os.system(cmd)
def write_bader_volume(self, atomlist):
"""write bader atom volumes to cube files.
::
atomlist = [0,2] # for example
-p sel_bader Write the selected Bader volumes, read from the
subsequent list of volumes.
"""
alist = string.join([str(x) for x in atomlist], ' ')
cmd = 'bader -p sel_bader %s %s' % (alist, self.densityfile)
print(cmd)
os.system(cmd)
def write_atom_index(self):
''' -p atom_index Write the atomic volume index to a charge
density file.
'''
cmd = 'bader -p atom_index %s' % (self.densityfile)
print(cmd)
os.system(cmd)
def write_bader_index(self):
'''
-p bader_index Write the Bader volume index to a charge
density file.
'''
cmd = 'bader -p bader_index %s' % (self.densityfile)
print(cmd)
os.system(cmd)
def write_all_atom(self):
'''
-p all_atom Combine all volumes associated with an atom and
write to file. This is done for all atoms and written to files
named BvAtxxxx.dat. The volumes associated with atoms are
those for which the maximum in charge density within the
volume is closest to the atom.
'''
cmd = 'bader -p all_atom %s' % (self.densityfile)
print(cmd)
os.system(cmd)
def write_all_bader(self):
'''
-p all_bader Write all Bader volumes (containing charge above
threshold of 0.0001) to a file. The charge distribution in
each volume is written to a separate file, named
Bvolxxxx.dat. It will either be of a CHGCAR format or a CUBE
file format, depending on the format of the initial charge
density file. These files can be quite large, so this option
should be used with caution.
'''
cmd = 'bader -p all_bader %s' % (self.densityfile)
print(cmd)
os.system(cmd)
if __name__ == '__main__':
from ase.calculators.jacapo import Jacapo
atoms = Jacapo.read_atoms('ethylene.nc')
b = Bader(atoms)
print(b.get_bader_charges())
print(b.get_bader_volumes())
b.write_atom_volume([3, 4])
|
suttond/MODOI
|
ase/calculators/jacapo/utils/bader.py
|
Python
|
lgpl-3.0
| 6,745
|
[
"ASE"
] |
2e2de13fe07a47f907ae3140d8cecca93a9c3008d653b6980ca9f53b7bad402c
|
#!/usr/bin/env python
import gd_util
import sys
from Population import Population
################################################################################
def convert_percent(string_value):
if string_value.endswith('%'):
val = convert_non_negative_int(string_value[:-1])
if val > 100:
print >> sys.stderr, 'percentage: "%d" > 100' % val
sys.exit(1)
val = val * -1
else:
val = convert_non_negative_int(string_value)
return str(val)
def convert_non_negative_int(string_value):
try:
val = int(string_value)
except:
print >> sys.stderr, '"%s" is not an integer' % string_value
sys.exit(1)
if val < 0:
print >> sys.stderr, '"%d" is negative' % val
sys.exit(1)
return val
################################################################################
if len(sys.argv) != 13:
gd_util.die('Usage')
input, output, ref_chrom_col, min_spacing, lo_genotypes, p1_input, input_type, lo_coverage, hi_coverage, low_ind_cov, low_quality, ind_arg = sys.argv[1:]
p_total = Population()
p_total.from_wrapped_dict(ind_arg)
p1 = Population()
p1.from_population_file(p1_input)
if not p_total.is_superset(p1):
gd_util.die('There is an individual in the population that is not in the SNP table')
lo_coverage = convert_percent(lo_coverage)
hi_coverage = convert_percent(hi_coverage)
if input_type == 'gd_snp':
type_arg = 1
elif input_type == 'gd_genotype':
type_arg = 0
else:
gd_util.die('unknown input_type: {0}'.format(input_type))
################################################################################
prog = 'filter_snps'
args = [ prog ]
args.append(input) # file containing a Galaxy table
args.append(type_arg) # 1 for a gd_snp file, 0 for gd_genotype
args.append(lo_coverage) # lower bound on total coverage (< 0 means interpret as percentage)
args.append(hi_coverage) # upper bound on total coveraae (< 0 means interpret as percentage)
args.append(low_ind_cov) # lower bound on individual coverage
args.append(low_quality) # lower bound on individual quality value
args.append(lo_genotypes) # lower bound on the number of defined genotypes
args.append(min_spacing) # lower bound on the spacing between SNPs
args.append(ref_chrom_col) # reference-chromosome column (base-1); ref position in next column
columns = p1.column_list()
for column in sorted(columns):
args.append(column) # the starting columns (base-1) for the chosen individuals
with open(output, 'w') as fh:
gd_util.run_program(prog, args, stdout=fh)
sys.exit(0)
|
gigascience/galaxy-genome-diversity
|
tools/filter_gd_snp/filter_gd_snp.py
|
Python
|
gpl-3.0
| 2,636
|
[
"Galaxy"
] |
2c10698d2c11297ca4c0f6202643cac6f105a4096c6de1afdff4bc1a04032834
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2018 (ita)
"""
Runner.py: Task scheduling and execution
"""
import heapq, traceback
try:
from queue import Queue, PriorityQueue
except ImportError:
from Queue import Queue
try:
from Queue import PriorityQueue
except ImportError:
class PriorityQueue(Queue):
def _init(self, maxsize):
self.maxsize = maxsize
self.queue = []
def _put(self, item):
heapq.heappush(self.queue, item)
def _get(self):
return heapq.heappop(self.queue)
from waflib import Utils, Task, Errors, Logs
GAP = 5
"""
Wait for at least ``GAP * njobs`` before trying to enqueue more tasks to run
"""
class PriorityTasks(object):
def __init__(self):
self.lst = []
def __len__(self):
return len(self.lst)
def __iter__(self):
return iter(self.lst)
def clear(self):
self.lst = []
def append(self, task):
heapq.heappush(self.lst, task)
def appendleft(self, task):
"Deprecated, do not use"
heapq.heappush(self.lst, task)
def pop(self):
return heapq.heappop(self.lst)
def extend(self, lst):
if self.lst:
for x in lst:
self.append(x)
else:
if isinstance(lst, list):
self.lst = lst
heapq.heapify(lst)
else:
self.lst = lst.lst
class Consumer(Utils.threading.Thread):
"""
Daemon thread object that executes a task. It shares a semaphore with
the coordinator :py:class:`waflib.Runner.Spawner`. There is one
instance per task to consume.
"""
def __init__(self, spawner, task):
Utils.threading.Thread.__init__(self)
self.task = task
"""Task to execute"""
self.spawner = spawner
"""Coordinator object"""
self.setDaemon(1)
self.start()
def run(self):
"""
Processes a single task
"""
try:
if not self.spawner.master.stop:
self.spawner.master.process_task(self.task)
finally:
self.spawner.sem.release()
self.spawner.master.out.put(self.task)
self.task = None
self.spawner = None
class Spawner(Utils.threading.Thread):
"""
Daemon thread that consumes tasks from :py:class:`waflib.Runner.Parallel` producer and
spawns a consuming thread :py:class:`waflib.Runner.Consumer` for each
:py:class:`waflib.Task.Task` instance.
"""
def __init__(self, master):
Utils.threading.Thread.__init__(self)
self.master = master
""":py:class:`waflib.Runner.Parallel` producer instance"""
self.sem = Utils.threading.Semaphore(master.numjobs)
"""Bounded semaphore that prevents spawning more than *n* concurrent consumers"""
self.setDaemon(1)
self.start()
def run(self):
"""
Spawns new consumers to execute tasks by delegating to :py:meth:`waflib.Runner.Spawner.loop`
"""
try:
self.loop()
except Exception:
# Python 2 prints unnecessary messages when shutting down
# we also want to stop the thread properly
pass
def loop(self):
"""
Consumes task objects from the producer; ends when the producer has no more
task to provide.
"""
master = self.master
while 1:
task = master.ready.get()
self.sem.acquire()
if not master.stop:
task.log_display(task.generator.bld)
Consumer(self, task)
class Parallel(object):
"""
Schedule the tasks obtained from the build context for execution.
"""
def __init__(self, bld, j=2):
"""
The initialization requires a build context reference
for computing the total number of jobs.
"""
self.numjobs = j
"""
Amount of parallel consumers to use
"""
self.bld = bld
"""
Instance of :py:class:`waflib.Build.BuildContext`
"""
self.outstanding = PriorityTasks()
"""Heap of :py:class:`waflib.Task.Task` that may be ready to be executed"""
self.postponed = PriorityTasks()
"""Heap of :py:class:`waflib.Task.Task` which are not ready to run for non-DAG reasons"""
self.incomplete = set()
"""List of :py:class:`waflib.Task.Task` waiting for dependent tasks to complete (DAG)"""
self.ready = PriorityQueue(0)
"""List of :py:class:`waflib.Task.Task` ready to be executed by consumers"""
self.out = Queue(0)
"""List of :py:class:`waflib.Task.Task` returned by the task consumers"""
self.count = 0
"""Amount of tasks that may be processed by :py:class:`waflib.Runner.TaskConsumer`"""
self.processed = 0
"""Amount of tasks processed"""
self.stop = False
"""Error flag to stop the build"""
self.error = []
"""Tasks that could not be executed"""
self.biter = None
"""Task iterator which must give groups of parallelizable tasks when calling ``next()``"""
self.dirty = False
"""
Flag that indicates that the build cache must be saved when a task was executed
(calls :py:meth:`waflib.Build.BuildContext.store`)"""
self.revdeps = Utils.defaultdict(set)
"""
The reverse dependency graph of dependencies obtained from Task.run_after
"""
self.spawner = Spawner(self)
"""
Coordinating daemon thread that spawns thread consumers
"""
def get_next_task(self):
"""
Obtains the next Task instance to run
:rtype: :py:class:`waflib.Task.Task`
"""
if not self.outstanding:
return None
return self.outstanding.pop()
def postpone(self, tsk):
"""
Adds the task to the list :py:attr:`waflib.Runner.Parallel.postponed`.
The order is scrambled so as to consume as many tasks in parallel as possible.
:param tsk: task instance
:type tsk: :py:class:`waflib.Task.Task`
"""
self.postponed.append(tsk)
def refill_task_list(self):
"""
Pulls a next group of tasks to execute in :py:attr:`waflib.Runner.Parallel.outstanding`.
Ensures that all tasks in the current build group are complete before processing the next one.
"""
while self.count > self.numjobs * GAP:
self.get_out()
while not self.outstanding:
if self.count:
self.get_out()
if self.outstanding:
break
elif self.postponed:
try:
cond = self.deadlock == self.processed
except AttributeError:
pass
else:
if cond:
# The most common reason is conflicting build order declaration
# for example: "X run_after Y" and "Y run_after X"
# Another can be changing "run_after" dependencies while the build is running
# for example: updating "tsk.run_after" in the "runnable_status" method
lst = []
for tsk in self.postponed:
deps = [id(x) for x in tsk.run_after if not x.hasrun]
lst.append('%s\t-> %r' % (repr(tsk), deps))
if not deps:
lst.append('\n task %r dependencies are done, check its *runnable_status*?' % id(tsk))
raise Errors.WafError('Deadlock detected: check the task build order%s' % ''.join(lst))
self.deadlock = self.processed
if self.postponed:
self.outstanding.extend(self.postponed)
self.postponed.clear()
elif not self.count:
if self.incomplete:
for x in self.incomplete:
for k in x.run_after:
if not k.hasrun:
break
else:
# dependency added after the build started without updating revdeps
self.incomplete.remove(x)
self.outstanding.append(x)
break
else:
raise Errors.WafError('Broken revdeps detected on %r' % self.incomplete)
else:
tasks = next(self.biter)
ready, waiting = self.prio_and_split(tasks)
self.outstanding.extend(ready)
self.incomplete.update(waiting)
self.total = self.bld.total()
break
def add_more_tasks(self, tsk):
"""
If a task provides :py:attr:`waflib.Task.Task.more_tasks`, then the tasks contained
in that list are added to the current build and will be processed before the next build group.
The priorities for dependent tasks are not re-calculated globally
:param tsk: task instance
:type tsk: :py:attr:`waflib.Task.Task`
"""
if getattr(tsk, 'more_tasks', None):
more = set(tsk.more_tasks)
groups_done = set()
def iteri(a, b):
for x in a:
yield x
for x in b:
yield x
# Update the dependency tree
# this assumes that task.run_after values were updated
for x in iteri(self.outstanding, self.incomplete):
for k in x.run_after:
if isinstance(k, Task.TaskGroup):
if k not in groups_done:
groups_done.add(k)
for j in k.prev & more:
self.revdeps[j].add(k)
elif k in more:
self.revdeps[k].add(x)
ready, waiting = self.prio_and_split(tsk.more_tasks)
self.outstanding.extend(ready)
self.incomplete.update(waiting)
self.total += len(tsk.more_tasks)
def mark_finished(self, tsk):
def try_unfreeze(x):
# DAG ancestors are likely to be in the incomplete set
# This assumes that the run_after contents have not changed
# after the build starts, else a deadlock may occur
if x in self.incomplete:
# TODO remove dependencies to free some memory?
# x.run_after.remove(tsk)
for k in x.run_after:
if not k.hasrun:
break
else:
self.incomplete.remove(x)
self.outstanding.append(x)
if tsk in self.revdeps:
for x in self.revdeps[tsk]:
if isinstance(x, Task.TaskGroup):
x.prev.remove(tsk)
if not x.prev:
for k in x.next:
# TODO necessary optimization?
k.run_after.remove(x)
try_unfreeze(k)
# TODO necessary optimization?
x.next = []
else:
try_unfreeze(x)
del self.revdeps[tsk]
if hasattr(tsk, 'semaphore'):
sem = tsk.semaphore
sem.release(tsk)
while sem.waiting and not sem.is_locked():
# take a frozen task, make it ready to run
x = sem.waiting.pop()
self._add_task(x)
def get_out(self):
"""
Waits for a Task that task consumers add to :py:attr:`waflib.Runner.Parallel.out` after execution.
Adds more Tasks if necessary through :py:attr:`waflib.Runner.Parallel.add_more_tasks`.
:rtype: :py:attr:`waflib.Task.Task`
"""
tsk = self.out.get()
if not self.stop:
self.add_more_tasks(tsk)
self.mark_finished(tsk)
self.count -= 1
self.dirty = True
return tsk
def add_task(self, tsk):
"""
Enqueue a Task to :py:attr:`waflib.Runner.Parallel.ready` so that consumers can run them.
:param tsk: task instance
:type tsk: :py:attr:`waflib.Task.Task`
"""
# TODO change in waf 2.1
self.ready.put(tsk)
def _add_task(self, tsk):
if hasattr(tsk, 'semaphore'):
sem = tsk.semaphore
try:
sem.acquire(tsk)
except IndexError:
sem.waiting.add(tsk)
return
self.count += 1
self.processed += 1
if self.numjobs == 1:
tsk.log_display(tsk.generator.bld)
try:
self.process_task(tsk)
finally:
self.out.put(tsk)
else:
self.add_task(tsk)
def process_task(self, tsk):
"""
Processes a task and attempts to stop the build in case of errors
"""
tsk.process()
if tsk.hasrun != Task.SUCCESS:
self.error_handler(tsk)
def skip(self, tsk):
"""
Mark a task as skipped/up-to-date
"""
tsk.hasrun = Task.SKIPPED
self.mark_finished(tsk)
def cancel(self, tsk):
"""
Mark a task as failed because of unsatisfiable dependencies
"""
tsk.hasrun = Task.CANCELED
self.mark_finished(tsk)
def error_handler(self, tsk):
"""
Called when a task cannot be executed. The flag :py:attr:`waflib.Runner.Parallel.stop` is set,
unless the build is executed with::
$ waf build -k
:param tsk: task instance
:type tsk: :py:attr:`waflib.Task.Task`
"""
if not self.bld.keep:
self.stop = True
self.error.append(tsk)
def task_status(self, tsk):
"""
Obtains the task status to decide whether to run it immediately or not.
:return: the exit status, for example :py:attr:`waflib.Task.ASK_LATER`
:rtype: integer
"""
try:
return tsk.runnable_status()
except Exception:
self.processed += 1
tsk.err_msg = traceback.format_exc()
if not self.stop and self.bld.keep:
self.skip(tsk)
if self.bld.keep == 1:
# if -k stop on the first exception, if -kk try to go as far as possible
if Logs.verbose > 1 or not self.error:
self.error.append(tsk)
self.stop = True
else:
if Logs.verbose > 1:
self.error.append(tsk)
return Task.EXCEPTION
tsk.hasrun = Task.EXCEPTION
self.error_handler(tsk)
return Task.EXCEPTION
def start(self):
"""
Obtains Task instances from the BuildContext instance and adds the ones that need to be executed to
:py:class:`waflib.Runner.Parallel.ready` so that the :py:class:`waflib.Runner.Spawner` consumer thread
has them executed. Obtains the executed Tasks back from :py:class:`waflib.Runner.Parallel.out`
and marks the build as failed by setting the ``stop`` flag.
If only one job is used, then executes the tasks one by one, without consumers.
"""
self.total = self.bld.total()
while not self.stop:
self.refill_task_list()
# consider the next task
tsk = self.get_next_task()
if not tsk:
if self.count:
# tasks may add new ones after they are run
continue
else:
# no tasks to run, no tasks running, time to exit
break
if tsk.hasrun:
# if the task is marked as "run", just skip it
self.processed += 1
continue
if self.stop: # stop immediately after a failure is detected
break
st = self.task_status(tsk)
if st == Task.RUN_ME:
self._add_task(tsk)
elif st == Task.ASK_LATER:
self.postpone(tsk)
elif st == Task.SKIP_ME:
self.processed += 1
self.skip(tsk)
self.add_more_tasks(tsk)
elif st == Task.CANCEL_ME:
# A dependency problem has occurred, and the
# build is most likely run with `waf -k`
if Logs.verbose > 1:
self.error.append(tsk)
self.processed += 1
self.cancel(tsk)
# self.count represents the tasks that have been made available to the consumer threads
# collect all the tasks after an error else the message may be incomplete
while self.error and self.count:
self.get_out()
self.ready.put(None)
if not self.stop:
assert not self.count
assert not self.postponed
assert not self.incomplete
def prio_and_split(self, tasks):
"""
Label input tasks with priority values, and return a pair containing
the tasks that are ready to run and the tasks that are necessarily
waiting for other tasks to complete.
The priority system is really meant as an optional layer for optimization:
dependency cycles are found quickly, and builds should be more efficient.
A high priority number means that a task is processed first.
This method can be overridden to disable the priority system::
def prio_and_split(self, tasks):
return tasks, []
:return: A pair of task lists
:rtype: tuple
"""
# to disable:
#return tasks, []
for x in tasks:
x.visited = 0
reverse = self.revdeps
groups_done = set()
for x in tasks:
for k in x.run_after:
if isinstance(k, Task.TaskGroup):
if k not in groups_done:
groups_done.add(k)
for j in k.prev:
reverse[j].add(k)
else:
reverse[k].add(x)
# the priority number is not the tree depth
def visit(n):
if isinstance(n, Task.TaskGroup):
return sum(visit(k) for k in n.next)
if n.visited == 0:
n.visited = 1
if n in reverse:
rev = reverse[n]
n.prio_order = n.tree_weight + len(rev) + sum(visit(k) for k in rev)
else:
n.prio_order = n.tree_weight
n.visited = 2
elif n.visited == 1:
raise Errors.WafError('Dependency cycle found!')
return n.prio_order
for x in tasks:
if x.visited != 0:
# must visit all to detect cycles
continue
try:
visit(x)
except Errors.WafError:
self.debug_cycles(tasks, reverse)
ready = []
waiting = []
for x in tasks:
for k in x.run_after:
if not k.hasrun:
waiting.append(x)
break
else:
ready.append(x)
return (ready, waiting)
def debug_cycles(self, tasks, reverse):
tmp = {}
for x in tasks:
tmp[x] = 0
def visit(n, acc):
if isinstance(n, Task.TaskGroup):
for k in n.next:
visit(k, acc)
return
if tmp[n] == 0:
tmp[n] = 1
for k in reverse.get(n, []):
visit(k, [n] + acc)
tmp[n] = 2
elif tmp[n] == 1:
lst = []
for tsk in acc:
lst.append(repr(tsk))
if tsk is n:
# exclude prior nodes, we want the minimum cycle
break
raise Errors.WafError('Task dependency cycle in "run_after" constraints: %s' % ''.join(lst))
for x in tasks:
visit(x, [])
|
jackaudio/jack2
|
waflib/Runner.py
|
Python
|
gpl-2.0
| 16,146
|
[
"VisIt"
] |
8ed650123a345e09f2445cd8e89a664c3b69bf53d87dd2d714542e34cad80bc4
|
import numpy as np
import os
import theano
import theano.tensor as T
from deepmonster.adlf.activations import Rectifier
from deepmonster.adlf.initializations import Initialization, Gaussian, Orthogonal
from deepmonster.adlf.network import Feedforward, find_nets, find_attributes
from deepmonster.adlf.rnn import ConvLSTM, LSTM
from deepmonster.adlf.utils import (getftensor5, collapse_time_on_batch,getnumpyf32,
expand_time_from_batch, stack_time)
image_size = (120,120)
batch_size = 32
channels = 1
config = {
'batch_norm' : True,
'activation' : Rectifier(),
'initialization' : Initialization({'W' : Gaussian(std=0.0333),
'U' : Orthogonal(0.0333)}),
}
# REMINDER: If a config is set directly on a layer, it will have priority!
# REMINDER2: CONVLSTM always have Tanh() as non-linearity
layers = [
#ConvLSTM(5, 64, strides=(2,2), padding='half',
# num_channels=channels, image_size=image_size), # 60x60
#ConvLayer(5, 64, strides=(2,2), padding='half',
# num_channels=channels, image_size=image_size), # 60x60
#ConvLayer(3, 64, strides=(1,1), padding='half'),
#ConvLayer(3, 128, strides=(2,2), padding='half'), # 30x30
#ConvLSTM(3, 128, strides=(1,1), padding='half'),
LSTM(input_dims=50, output_dims=200),
]
net = Feedforward(layers, 'net', **config)
#x = getftensor5()('x')
x = T.ftensor3('x')
y = net.fprop(x**2 / 2.)
cost = y.mean()
parameters = net.params
from blocks.algorithms import Scale
from blocks.algorithms import GradientDescent
optimizer = Scale(0.)
print "Calling Algorithm"
algorithm = GradientDescent(
#gradients=grads, parameters=parameters,
cost=cost,
parameters=parameters,
step_rule=optimizer)
from theano.compile.nanguardmode import NanGuardMode
fun = theano.function(
inputs=[x],outputs=[cost],updates=algorithm.updates,
mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True))
#npx = getnumpyf32((5, batch_size, channels,)+image_size)
npx = np.random.random((5,32,50)).astype(np.float32)
out = fun(npx)
#for i,v in enumerate(parameters):
# if 'U' in v.name:
# theano.printing.debugprint(algorithm.updates[i][1])
# break
|
olimastro/DeepMonster
|
deepmonster/testing/conlstm_bn.py
|
Python
|
mit
| 2,248
|
[
"Gaussian"
] |
fcf8868eed7b35d08f180434a62417ad7905218d13b4795d37923af3b8a5eef2
|
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2008 Peter G. LAndgren
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Written by Alex Roitman, largely based on relationship.py by Don Allingham
# and on valuable input from Jens Arvidsson
# Updated to 3.0 by Peter Landgren 2007-12-30.
#
"""
Swedish-specific definitions of relationships
"""
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.lib import Person
import gramps.gen.relationship
#-------------------------------------------------------------------------
_cousin_level = [ "", "kusin",
"tremänning", "fyrmänning", "femmänning",
"sexmänning", "sjumänning", "åttamänning",
"niomänning", "tiomänning", "elvammänning",
"tolvmänning", "trettonmänning", "fjortonmänning",
"femtonmänning", "sextonmänning", "sjuttonmänning",
"artonmänning", "nittonmänning", "tjugomänning",
"tjugoettmänning", "tjugotvåmänning", "tjugotremänning",
"tjugofyramänning","tjugofemmänning","tjugoexmänning",
"tjugosjumänning","tjugoåttamänning","tjugoniomänning",
"trettiomänning" ]
_children_level = 20
_level_name = [ "", "första",
"andra", "tredje", "fjärde", "femte",
"sjätte", "sjunde", "åttonde", "nionde",
"tionde", "elfte", "tolfte", "trettonde",
"fjortonde", "femtonde", "sextonde", "sjuttonde",
"artonde", "nittonde", "tjugonde" ]
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
class RelationshipCalculator(gramps.gen.relationship.RelationshipCalculator):
"""
RelationshipCalculator Class
"""
#sibling strings
STEP = 'styv'
HALF = 'halv'
#in-law string
INLAW = 'ingift '
def __init__(self):
gramps.gen.relationship.RelationshipCalculator.__init__(self)
def _get_cousin(self, level, step, inlaw):
if level > len(_cousin_level)-1:
return "avlägset släkt"
else:
result = inlaw + _cousin_level[level]
# Indicate step relations) by adding ' [styv]'
if step:
result = result + ' [styv]'
return result
def pair_up(self, rel_list, step):
result = []
item = ""
for word in rel_list[:]:
if not word:
continue
if word.replace(' [styv]', '') in _cousin_level:
if item:
result.append(item)
item = ""
result.append(word)
continue
if item:
if word == 'syster':
item = item[0:-1]
word = 'ster'
elif word == 'dotter' and item == 'bror':
item = 'brors'
result.append(item + word)
item = ""
else:
item = word
if item:
result.append(item)
gen_result = [ item + 's' for item in result[0:-1] ]
gen_result = ' '.join(gen_result+result[-1:])
# Indicate step relations) by adding ' [styv]' if not already added.
if len(rel_list)>1 and step != '' and not gen_result.rfind(' [styv]'):
gen_result = gen_result + ' [styv]'
return gen_result
def _get_direct_ancestor(self, person_gender, rel_string, step, inlaw):
result = []
for rel in rel_string:
if rel == 'f':
result.append('far')
else:
result.append('mor')
if person_gender == Person.MALE:
result[-1] = 'far'
if person_gender == Person.FEMALE:
result[-1] = 'mor'
if person_gender == Person.UNKNOWN:
result[-1] = 'förälder'
if step != '' and len(result)==1:
#Preceed with step prefix of father/mother
result[0] = self.STEP + result[0]
if inlaw != '':
#Preceed with inlaw prefix
result[-1] = 'svär' + result[-1]
if len(result)>1 and len(result) % 2 == 0 and \
(person_gender == Person.UNKNOWN or inlaw != ''):
# Correct string "-2" with genitive s and add a space to get
# correct Swedish, if even number in result
result[-2] = result[-2] + 's '
return self.pair_up(result, step)
def _get_direct_descendant(self, person_gender, rel_string, step, inlaw):
result = []
for ix in range(len(rel_string)-2, -1, -1):
if rel_string[ix] == 'f':
result.append('son')
else:
result.append('dotter')
if person_gender == Person.MALE:
result.append('son')
elif person_gender == Person.FEMALE:
result.append('dotter')
else:
if person_gender == Person.UNKNOWN and inlaw == '':
result.append('barn')
if person_gender == Person.UNKNOWN and inlaw != '':
result.append('-son/dotter')
if step != '' and len(result)==1:
result[0] = self.STEP + result[0]
if inlaw != '':
#Preceed with inlaw prefix
result[-1] = 'svär' + result[-1]
if len(result)>1 and len(result) % 2 == 0 and \
(person_gender == Person.UNKNOWN or inlaw != ''):
# Correct string "-2" with genitive s and add a space to get
# correct Swedish, if even number in result
result[-2] = result[-2] + 's '
return self.pair_up(result, step)
def _get_ancestors_cousin(self, rel_string_long, rel_string_short, step, inlaw):
result = []
removed = len(rel_string_long)-len(rel_string_short)
level = len(rel_string_short)-1
for ix in range(removed):
if rel_string_long[ix] == 'f':
result.append('far')
else:
result.append('mor')
if inlaw != '' :
inlaw = 's ingifta '
result.append(self._get_cousin(level, step, inlaw))
if step != '' and len(result)==1:
result[0] = self.STEP + result[0]
return self.pair_up(result, step)
def _get_cousins_descendant(self, person_gender, rel_string_long, rel_string_short, step, inlaw):
result = []
removed = len(rel_string_long)-len(rel_string_short)-1
level = len(rel_string_short)-1
if level:
result.append(self._get_cousin(level, step, inlaw))
elif rel_string_long[removed] == 'f':
result.append('bror')
else:
result.append('syster')
for ix in range(removed-1, -1, -1):
if rel_string_long[ix] == 'f':
result.append('son')
else:
result.append('dotter')
if person_gender == Person.MALE:
result.append('son')
elif person_gender == Person.FEMALE:
result.append('dotter')
else:
if person_gender == Person.UNKNOWN and inlaw == '':
result.append('barn')
if person_gender == Person.UNKNOWN and inlaw != '':
result.append('-son/dotter')
if step != '' and len(result) == 1:
result[0] = self.STEP + result[0]
if inlaw != '':
#Preceed with inlaw prefix
result[-1] = 'svär' + result[-1]
if len(result)>1 and len(result) % 2 == 0 and \
(person_gender == Person.UNKNOWN or inlaw != ''):
# Correct string "-2" with genitive s and add a space to get
# correct Swedish, if even number in result
result[-2] = result[-2] + 's '
return self.pair_up(result, step)
def _get_ancestors_brother(self, rel_string, person_gender, step, inlaw):
result = []
for ix in range(len(rel_string)-1):
if rel_string[ix] == 'f':
result.append('far')
else:
result.append('mor')
result.append('bror')
if person_gender == Person.UNKNOWN: result[-1] = 'syskon'
if step != '' and len(result)==1:
result[0] = self.STEP + result[0]
if inlaw != '':
#Preceed with inlaw prefix
result[-1] = 'svåger'
if inlaw != '' and person_gender == Person.UNKNOWN:
#Preceed with inlaw prefix
result[-1] = 'svåger/svägerska'
if len(result)>1 and len(result) % 2 == 0 and \
(person_gender == Person.UNKNOWN or inlaw != ''):
# Correct string "-2" with genitive s and add a space to get
# correct Swedish, if even number in result
result[-2] = result[-2] + 's '
return self.pair_up(result, step)
def _get_ancestors_sister(self, rel_string, step, inlaw):
result = []
for ix in range(len(rel_string)-1):
if rel_string[ix] == 'f':
result.append('far')
else:
result.append('mor')
result.append('syster')
if step != '' and len(result)==1:
result[0] = self.STEP + result[0]
if inlaw != '' :
#Preceed with inlaw prefix
result[-1] = 'svägerska'
if len(result)>1 and len(result) % 2 == 0 and inlaw != '':
# Correct string "-2" with genitive s and add a space to get
# correct Swedish, if even number in result
result[-2] = result[-2] + 's '
return self.pair_up(result, step)
def get_sibling_relationship_string(self, sib_type, gender_a, gender_b,
in_law_a=False, in_law_b=False):
"""
Determine the string giving the relation between two siblings of
type sib_type.
Eg: b is the brother of a
Here 'brother' is the string we need to determine
This method gives more details about siblings than
get_single_relationship_string can do.
.. warning:: DON'T TRANSLATE THIS PROCEDURE IF LOGIC IS EQUAL IN YOUR
LANGUAGE, AND SAME METHODS EXIST (get_uncle, get_aunt,
get_sibling)
"""
if sib_type == self.NORM_SIB or sib_type == self.UNKNOWN_SIB:
typestr = ''
elif sib_type == self.HALF_SIB_MOTHER \
or sib_type == self.HALF_SIB_FATHER:
typestr = self.HALF
elif sib_type == self.STEP_SIB:
typestr = self.STEP
if gender_b == Person.MALE:
rel_str = "bror"
elif gender_b == Person.FEMALE:
rel_str = "syster"
else:
rel_str = "syskon"
return typestr + rel_str
# kinship report
def _get_cousin_kinship(self, Ga):
rel_str = self._get_cousin(Ga-1, False, '')
if Ga == 2 :
rel_str = rel_str + "er"
else:
rel_str = rel_str + "ar"
return rel_str
def get_plural_relationship_string(self, Ga, Gb,
reltocommon_a='', reltocommon_b='',
only_birth=True,
in_law_a=False, in_law_b=False):
"""
Provide a string that describes the relationsip between a person, and
a group of people with the same relationship. E.g. "grandparents" or
"children".
Ga and Gb can be used to mathematically calculate the relationship.
.. seealso::
http://en.wikipedia.org/wiki/Cousin#Mathematical_definitions
:param Ga: The number of generations between the main person and the
common ancestor.
:type Ga: int
:param Gb: The number of generations between the group of people and the
common ancestor
:type Gb: int
:param reltocommon_a: relation path to common ancestor or common
Family for person a.
Note that length = Ga
:type reltocommon_a: str
:param reltocommon_b: relation path to common ancestor or common
Family for person b.
Note that length = Gb
:type reltocommon_b: str
:param only_birth: True if relation between a and b is by birth only
False otherwise
:type only_birth: bool
:param in_law_a: True if path to common ancestors is via the partner
of person a
:type in_law_a: bool
:param in_law_b: True if path to common ancestors is via the partner
of person b
:type in_law_b: bool
:returns: A string describing the relationship between the person and
the group.
:rtype: str
"""
rel_str = "avlägsna släktingar"
if Ga == 0:
result = []
# These are descendants
if Gb < _children_level:
for AntBarn in range(Gb):
result.append("barn")
rel_str = self.pair_up(result,'')
else:
rel_str = "avlägsna ättlingar"
elif Gb == 0:
# These are parents/grand parents
if Ga < len(_level_name):
if Ga == 1:
rel_str = "föräldrar"
else:
rel_str = "far- och morföräldrar i %s generationen" % _level_name[Ga]
else:
rel_str = "avlägsna förfäder"
elif Gb == 1:
# These are siblings/aunts/uncles
if Ga < len(_level_name):
if Ga == 1:
rel_str = "syskon"
else:
rel_str = "förfäders syskon i %s generationen" % _level_name[Ga-1]
else:
rel_str = "avlägsna farbröder/morbröder/fastrar/mostrar"
elif Ga == 1:
# These are nieces/nephews
if Gb < len(_level_name):
result = []
result.append("syskonbarn")
for AntBarn in range(Gb-2):
result.append("barn")
rel_str = self.pair_up(result,'')
else:
rel_str = "avlägsna brorsöner/systersöner/brorsdöttrar/systerdöttrar"
elif Ga > 1 and Ga == Gb:
# These are cousins in the same generation
rel_str = self._get_cousin_kinship(Ga)
elif Ga > 1 and Ga > Gb:
# These are cousins in different generations with the second person
# being in a higher generation from the common ancestor than the
# first person.
if Gb <= len(_level_name):
rel_str = "förfäders " + self._get_cousin_kinship(Ga) + \
" i "+ _level_name[Gb] + " generationen"
else:
rel_str = "avlägsna kusiner"
elif Gb > 1 and Gb > Ga:
# These are cousins in different generations with the second person
# being in a lower generation from the common ancestor than the
# first person.
if Ga <= len(_level_name):
result = []
result.append(self._get_cousin(Ga-1, False, ''))
for AntBarn in range(Gb-Ga):
result.append("barn")
rel_str = self.pair_up(result,'')
else:
rel_str = "avlägsna kusiner"
if in_law_b == True:
rel_str = "makar till %s" % rel_str
return rel_str
def get_single_relationship_string(self, Ga, Gb, gender_a, gender_b,
reltocommon_a, reltocommon_b,
only_birth=True,
in_law_a=False, in_law_b=False):
"""
Provide a string that describes the relationsip between a person, and
another person. E.g. "grandparent" or "child".
To be used as: 'person b is the grandparent of a', this will be in
translation string: 'person b is the %(relation)s of a'
Note that languages with gender should add 'the' inside the
translation, so eg in french: 'person b est %(relation)s de a'
where relation will be here: le grandparent
Ga and Gb can be used to mathematically calculate the relationship.
.. seealso::
http://en.wikipedia.org/wiki/Cousin#Mathematical_definitions
Some languages need to know the specific path to the common ancestor.
Those languages should use reltocommon_a and reltocommon_b which is
a string like 'mfmf'.
The possible string codes are:
======================= ===========================================
Code Description
======================= ===========================================
REL_MOTHER # going up to mother
REL_FATHER # going up to father
REL_MOTHER_NOTBIRTH # going up to mother, not birth relation
REL_FATHER_NOTBIRTH # going up to father, not birth relation
REL_FAM_BIRTH # going up to family (mother and father)
REL_FAM_NONBIRTH # going up to family, not birth relation
REL_FAM_BIRTH_MOTH_ONLY # going up to fam, only birth rel to mother
REL_FAM_BIRTH_FATH_ONLY # going up to fam, only birth rel to father
======================= ===========================================
Prefix codes are stripped, so REL_FAM_INLAW_PREFIX is not present.
If the relation starts with the inlaw of the person a, then 'in_law_a'
is True, if it starts with the inlaw of person b, then 'in_law_b' is
True.
Also REL_SIBLING (# going sideways to sibling (no parents)) is not
passed to this routine. The collapse_relations changes this to a
family relation.
Hence, calling routines should always strip REL_SIBLING and
REL_FAM_INLAW_PREFIX before calling get_single_relationship_string()
Note that only_birth=False, means that in the reltocommon one of the
NOTBIRTH specifiers is present.
The REL_FAM identifiers mean that the relation is not via a common
ancestor, but via a common family (note that that is not possible for
direct descendants or direct ancestors!). If the relation to one of the
parents in that common family is by birth, then 'only_birth' is not
set to False. The only_birth() method is normally used for this.
:param Ga: The number of generations between the main person and the
common ancestor.
:type Ga: int
:param Gb: The number of generations between the other person and the
common ancestor.
:type Gb: int
:param gender_a: gender of person a
:type gender_a: int gender
:param gender_b: gender of person b
:type gender_b: int gender
:param reltocommon_a: relation path to common ancestor or common
Family for person a.
Note that length = Ga
:type reltocommon_a: str
:param reltocommon_b: relation path to common ancestor or common
Family for person b.
Note that length = Gb
:type reltocommon_b: str
:param in_law_a: True if path to common ancestors is via the partner
of person a
:type in_law_a: bool
:param in_law_b: True if path to common ancestors is via the partner
of person b
:type in_law_b: bool
:param only_birth: True if relation between a and b is by birth only
False otherwise
:type only_birth: bool
:returns: A string describing the relationship between the two people
:rtype: str
.. note:: 1. the self.REL_SIBLING should not be passed to this routine,
so we should not check on it. All other self.
2. for better determination of siblings, use if Ga=1=Gb
get_sibling_relationship_string
"""
if only_birth:
step = ''
else:
step = self.STEP
if in_law_a or in_law_b :
inlaw = self.INLAW
else:
inlaw = ''
rel_str = "avlägsen %s-släkting eller %s släkting" % (step, inlaw)
if Ga == 0:
# b is descendant of a
if Gb == 0 :
rel_str = 'samma person'
else:
rel_str = self._get_direct_descendant(gender_b, reltocommon_b, step, inlaw)
elif Gb == 0:
# b is parents/grand parent of a
rel_str = self._get_direct_ancestor(gender_b, reltocommon_a, step, inlaw)
elif Gb == 1:
# b is sibling/aunt/uncle of a
# handles brother and unknown gender as second person,
# shows up in "testing unknown cousins same generation"
if gender_b == Person.MALE or gender_b == Person.UNKNOWN:
rel_str = self._get_ancestors_brother(reltocommon_a, gender_b, step, inlaw)
elif gender_b == Person.FEMALE:
rel_str = self._get_ancestors_sister(reltocommon_a, step, inlaw)
elif Ga == Gb:
# a and b cousins in the same generation
rel_str = self._get_cousin(Ga-1, step, inlaw)
elif Ga > Gb:
# These are cousins in different generations with the second person
# being in a higher generation from the common ancestor than the
# first person.
rel_str = self._get_ancestors_cousin(reltocommon_a, reltocommon_b, step, inlaw)
elif Gb > Ga:
# These are cousins in different generations with the second person
# being in a lower generation from the common ancestor than the
# first person.
rel_str = self._get_cousins_descendant(gender_b, reltocommon_b, reltocommon_a, step, inlaw)
return rel_str
if __name__ == "__main__":
# Test function. Call it as follows from the command line (so as to find
# imported modules):
# export PYTHONPATH=/path/to/gramps/src
# python src/plugins/rel/rel_sv.py
# (Above not needed here)
"""TRANSLATORS, copy this if statement at the bottom of your
rel_xx.py module, and test your work with:
python src/plugins/rel/rel_xx.py
"""
from gramps.gen.relationship import test
RC = RelationshipCalculator()
test(RC, True)
|
gramps-project/gramps
|
gramps/plugins/rel/rel_sv.py
|
Python
|
gpl-2.0
| 23,593
|
[
"Brian"
] |
6c5bf91959b18d99845124e7ab03d61f25914fed03e852b47ffd9899948de3cd
|
from __future__ import absolute_import
import sys, os, yaml, glob
import subprocess
import pandas as pd
import argparse
import re
import string
import shutil
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from nougat import common
from nougat import pdf
from nougat.pdf.theme import colors, DefaultTheme
def main(args):
workingDir = os.getcwd()
validation_dirs = args.validation_dirs
assemblies_dirs = args.assemblies_dirs
min_contig_length = args.min_contig_length
# store all samples folders
assemblies_samples_dirs = [sample for sample in \
os.listdir(assemblies_dirs) \
if os.path.isdir(os.path.join(assemblies_dirs,sample))]
# store all samples folders
validation_samples_dirs = [sample for sample in \
os.listdir(validation_dirs) \
if os.path.isdir(os.path.join(validation_dirs,sample))]
if args.no_uppmax:
collect_results_and_report(validation_dirs, assemblies_dirs, "",
args.sample_name, min_contig_length, args.no_uppmax)
else:
for sample in assemblies_samples_dirs:
if sample not in validation_samples_dirs:
system.exit("ATTENTION: sample {} is present in dir {} but \
absent in dir {}".format(sample, assemblies_dirs,
validation_dirs))
else: #otherwise I have one or more sample folders
#It means that I have assembled with a set of assemblers and
#all of them have gone through validation... I can proceed
os.chdir(workingDir)
validation_sample_dir = os.path.join(validation_dirs, sample)
assemblies_sample_dir = os.path.join(assemblies_dirs, sample)
sample_folder = os.path.join(workingDir, sample)
if not os.path.exists(sample_folder):
os.makedirs(sample_folder)
os.chdir(sample_folder)
collect_results_and_report(validation_sample_dir,
assemblies_sample_dir, sample_folder, sample,
min_contig_length, args.no_uppmax)
def collect_results_and_report(validation_sample_dir, assemblies_sample_dir,
sample_folder, sample, min_contig_length, no_uppmax):
assemblers_validation = [assembler for assembler in \
os.listdir(validation_sample_dir) \
if os.path.isdir(os.path.join(validation_sample_dir,assembler))]
assemblers_assemblies = [assembler for assembler in \
os.listdir(assemblies_sample_dir) \
if os.path.isdir(os.path.join(assemblies_sample_dir,assembler))]
if set(assemblers_validation) != set(assemblers_assemblies):
sys.exit("Error: different assemblies in assemblies and validation "
"folder: {} {}".format(
assemblies_sample_dir,validation_sample_dir))
assemblers_assemblies = sorted(assemblers_assemblies)
assemblers_validation = sorted(assemblers_validation)
#now I am in the folder that will contain all the results
if not os.path.exists("assemblies"):
os.makedirs("assemblies")
for assembler in assemblers_assemblies:
shutil.copytree(os.path.join(assemblies_sample_dir, assembler),
os.path.join("assemblies", assembler))
##copied original assemblies
if not os.path.exists("evaluation"):
os.makedirs("evaluation")
# let us start with QAcompute results
QA_pictures = os.path.join(sample_folder, "evaluation", "QA_pictures")
if not os.path.exists(QA_pictures):
os.makedirs(QA_pictures)
picturesQA = {}
for assembler in assemblers_assemblies:
original_QA_dir = os.path.join(validation_sample_dir,
assembler, "QAstats")
if not os.path.exists(original_QA_dir):
continue
cur_ass_dir = os.path.join(QA_pictures, assembler)
if not os.path.exists(cur_ass_dir):
os.makedirs(cur_ass_dir)
##QA pictures
Original_CoverageDistribution200 = os.path.join(validation_sample_dir,
assembler, "QAstats", "Coverage_distribution_noOutliers.png")
Original_GC_vs_Coverage = os.path.join(validation_sample_dir,
assembler, "QAstats", "GC_vs_Coverage_noOutliers.png")
Original_GC_vs_CtgLength = os.path.join(validation_sample_dir,
assembler, "QAstats", "GC_vs_CtgLength.png")
Original_MedianCov_vs_CtgLength = os.path.join(validation_sample_dir,
assembler, "QAstats", "MedianCov_vs_CtgLength_noOutliers.png")
Original_QAstats_gc_result = ""
QAstats_gc_result_file_name = ""
if no_uppmax:
QAstats_gc_result_file_name = [name for name in \
os.listdir(os.path.join(validation_sample_dir,
assembler, "QAstats")) \
if name.endswith(".bam.cov.gc")][0]
Original_QAstats_gc_result = os.path.join(validation_sample_dir,
assembler, "QAstats",
"{}".format(QAstats_gc_result_file_name))
else:
Original_QAstats_gc_result = os.path.join(validation_sample_dir,
assembler, "QAstats", "{}.bam.cov.gc".format(sample))
Copied_CoverageDistribution200 = os.path.join(cur_ass_dir,
"Coverage_distribution_noOutliers.png")
Copied_GC_vs_Coverage = os.path.join(cur_ass_dir,
"GC_vs_Coverage_noOutliers.png")
Copied_GC_vs_CtgLength = os.path.join(cur_ass_dir,
"GC_vs_CtgLength.png")
Copied_MedianCov_vs_CtgLength = os.path.join(cur_ass_dir,
"MedianCov_vs_CtgLength_noOutliers.png")
if no_uppmax:
Copied_QAstats_gc_result = os.path.join(cur_ass_dir,
QAstats_gc_result_file_name)
else:
Copied_QAstats_gc_result = os.path.join(cur_ass_dir,
"{}.bam.cov.gc".format(sample))
shutil.copy(Original_CoverageDistribution200,
Copied_CoverageDistribution200)
shutil.copy(Original_GC_vs_Coverage,
Copied_GC_vs_Coverage)
shutil.copy(Original_GC_vs_CtgLength,
Copied_GC_vs_CtgLength)
shutil.copy(Original_MedianCov_vs_CtgLength,
Copied_MedianCov_vs_CtgLength)
shutil.copy(Original_QAstats_gc_result,
Copied_QAstats_gc_result)
picturesQA[assembler] = [[Copied_CoverageDistribution200,
"Contig coverage distribtion" ],
[Copied_GC_vs_Coverage, "GC-content versus contig-coverage"],
[Copied_GC_vs_CtgLength, "GC-content versus contig-Length"],
[Copied_MedianCov_vs_CtgLength,
"Median-coverage vs Contig-Length"]]
# now FRCurve results
FRC_folder = os.path.join(sample_folder, "evaluation", "FRCurves")
if not os.path.exists(FRC_folder):
os.makedirs(FRC_folder)
Features = ["_FRC", "COMPR_MP_FRC", "COMPR_PE_FRC", "HIGH_COV_PE_FRC",
"HIGH_NORM_COV_PE_FRC", "HIGH_OUTIE_MP_FRC", "HIGH_OUTIE_PE_FRC",
"HIGH_SINGLE_MP_FRC", "HIGH_SINGLE_PE_FRC", "HIGH_SPAN_MP_FRC",
"HIGH_SPAN_PE_FRC", "LOW_COV_PE_FRC", "LOW_NORM_COV_PE_FRC",
"STRECH_MP_FRC", "STRECH_PE_FRC"]
FRC_to_print = ""
for feature in Features:
FRCurves = []
for assembler in assemblers_assemblies:
FRCurve_Orig_name = os.path.join(validation_sample_dir, assembler,
"FRCurve", "{}{}.txt".format(sample, feature))
FRCurves.append([assembler, FRCurve_Orig_name])
FRCname = _plotFRCurve(os.path.join(FRC_folder, "{}_{}".format(sample,
feature)), FRCurves)
if feature == "_FRC":
FRC_to_print = FRCname
FRCurves = []
#Contiguity stats
contig_stats = []
source_stats = []
for assembler in assemblers_assemblies:
asm_stats = [assembler]
stat_file = os.path.join(validation_sample_dir, assembler,
"contig_stats", "contiguity.out")
source_stats.append((stat_file, assembler))
with open(stat_file, "r") as sf:
for line in sf:
if line.startswith("scaffolds"):
sl = line.strip().split("\t")
asm_stats.extend([sl[1], sl[4], sl[8], sl[9], sl[10], sl[2], sl[5]])
contig_stats.append(asm_stats)
contig_stats = sorted(contig_stats, key=lambda x: x[0])
# Copy the contig stats
stat_folder = os.path.join(sample_folder, "evaluation", "contig_stats")
if not os.path.exists(stat_folder):
os.makedirs(stat_folder)
for src_stat in source_stats:
shutil.copy(src_stat[0], os.path.join(stat_folder, "{}.contiguity.out".format(src_stat[1])))
# Building the BUSCO results table
BUSCO = [] # Assembly, BUSCO dataset, Complete, Duplicates, Fragmented, Missing, Total
BUSCO_dirs = []
BUSCO_lineage = []
for assembler in assemblers_assemblies:
#Find which BUSCO data set was used from the evaluation config file
sample_config_g = os.path.join(validation_sample_dir,
assembler, "*_evaluete.yaml")
sample_config_f = glob.glob(sample_config_g)[0]
with open(sample_config_f, "r") as f:
sample_config = yaml.load(f)
try:
data_path = sample_config["BUSCODataPath"]
except KeyError:
print("No BUSCO data found. Assuming BUSCO was not run.")
BUSCO_lineage.append(None)
continue
BUSCO_lineage.append(data_path)
#Find the BUSCO metrics from the result file
summary_g = os.path.join(validation_sample_dir, assembler, "BUSCO", "run_*", "full_table_*")
if len(glob.glob(summary_g)) > 0:
summary_f = glob.glob(summary_g)[0]
BUSCO_dirs.append([os.path.dirname(summary_f), assembler])
summary_b = {"Complete":[], "Duplicated":[], "Fragmented":[], "Missing":[], "Total":[]}
with open(summary_f, "r") as f:
for line in f:
# File may or may not contain header
if line.startswith("#"):
continue
b_status = line.split()[1]
b_group = line.split()[0]
if b_status in summary_b.keys():
summary_b[b_status].append(b_group)
summary_b["Total"].append(b_group)
reduced_summary = {k: len(set(summary_b[k])) for k in summary_b.keys()}
BUSCO.append([assembler] + [reduced_summary[i] for i in ["Complete", "Duplicated", "Fragmented", "Missing", "Total"]])
# We have samples run with differing BUSCO data sets?!
if len(set(BUSCO_lineage)) != 1:
raise RunTimeError("There are samples run with differing BUSCO data sets. Check the (*.yaml) sample configuration files!")
BUSCO_target = os.path.join(sample_folder, "evaluation", "BUSCO")
if not os.path.exists(BUSCO_target):
os.makedirs(BUSCO_target)
for bdir in BUSCO_dirs:
shutil.copytree(bdir[0], os.path.join(BUSCO_target, bdir[1]))
#### now I can produce the report
write_report(sample_folder, sample, assemblers_assemblies, picturesQA,
FRC_to_print, min_contig_length, contig_stats, BUSCO, BUSCO_lineage[0])
return
def write_report(sample_folder, sample, assemblers,
picturesQA, FRCname, min_contig_length, contig_stats, BUSCO, BUSCO_lineage):
"""This function produces a pdf report """
# TODO: Build my own report generation function, with blackjack
# and markdown. In fact, forget the blackjack.
reportDir = os.path.join(sample_folder, "report")
if not os.path.exists(reportDir):
os.makedirs(reportDir)
PDFtitle = os.path.join(sample_folder, "report",
"{}_assembly_report.pdf".format(sample))
# this you cannot do in rLab which is why I wrote the helper initially
TABLE_WIDTH = 540
class MyTheme(DefaultTheme):
doc = {
'leftMargin': 25,
'rightMargin': 25,
'topMargin': 20,
'bottomMargin': 25,
'allowSplitting': False
}
# let's create the doc and specify title and author
doc = pdf.Pdf('{}'.format(sample),
'NGI-Stockholm, Science for Life Laboratory')
# now we apply our theme
doc.set_theme(MyTheme)
# give me some space
doc.add_spacer()
# this header defaults to H1
scriptDirectory = os.path.split(os.path.abspath(__file__))[0]
logo_path = os.path.join(scriptDirectory,
'../pictures/ngi_scilife.png')
doc.add_image(logo_path, 540, 50, pdf.CENTER)
# give me some space
doc.add_spacer()
doc.add_header('NGI-Stockholm -- Science For Life Laboratory')
doc.add_header('De Novo Assembly Best-Practice Analysis Report')
doc.add_header('{}'.format(sample))
doc.add_spacer()
doc.add_paragraph("For sample {} NGI-Stockholm best-practice analysis "
"for de novo assembly and assembly evaluation have been "
"performed.".format(sample))
doc.add_paragraph("Sample has been assembled using the following "
"assemblers:")
bollet_list = []
for assembler in assemblers:
bollet_list.append("{}".format(assembler)) # TODO: add version
doc.add_list(bollet_list)
doc.add_spacer()
doc.add_paragraph("Each assembler has been evaluated by aligning a subset "
"of Illumina reads back to the assembled sequence. Consequently "
"statistics on coverage, GC-content, contig length distribution "
"have been computed. A global ranking with FRCurve is also "
"performed.")
doc.add_spacer()
doc.add_paragraph("De novo assembly and de novo assembly evaluation are "
"two difficult computational exercises. Currently there is no tool "
"(i.e., de novo assembler) that is guaranteed to always outperform "
"the others. Many recent publications (e.g., GAGE, GAGE-B, "
"Assemblathon 1 and 2) showed how the same assembler can have "
"totally different performances on slightly different datasets. "
"For these reasons, at NGI-Stockholm we do not limit our de novo "
"analysis to a single tool, instead we employ several assemblers "
"and we provide our users with a semi-automated evaluation in "
"order to allow them to choose the best assembler based on their "
"specific needs. The assembly or assemblies judged to be the best "
"can be directly employed to answer important biological "
"questions, or they can be used as a backbone for a specific user "
"defined assembly pipeline (i.e., use of extra data, use of non "
"supported tools, variation of parameters).")
doc.add_spacer()
doc.add_paragraph("For each assembly the following information is "
"provided:")
doc.add_list(["Table with Standard Assembly Statistics: number of "
"scaffolds, number of scaffold longer than {}bp, N50, N80, length "
"of the longest scaffold, total assembly length, and sum of "
"scaffolds scaffolds longer than "
"{}bp.".format(min_contig_length,min_contig_length),
"For each individual assembler four plots are automatically "
"generated: Contig-coverage distribution, GC-content versus "
"Contig-Coverage, GC-content vs Contig-Length, and Contig "
"Coverage vs Contig Length.",
"FRCurve plot: ROC-curve inspired method for assembly "
"validation."])
doc.add_spacer()
doc.add_paragraph("Only contigs longer than {}bp are used during "
"validation. This is done in order to avoid problems with outlier "
"points and to partially circumvent the fact that some assemblers "
"output small contigs while others perform automatic trimming. "
"Statistics like N50, N80, etc. are computed on the expected "
"genome length in order to normalise the numbers and allow a "
"fair comparison among various tools.".format(min_contig_length))
doc.add_paragraph("Coverage information and FRCurve features are obtained "
"by aligning the same reads used in the assembling phase against "
"the assembled sequences using BWA-MEM algorithm.")
doc.add_paragraph("This report is delivered both via e-mail and via "
"Uppmax. In particular on Uppmax the following files are available "
"for further result inspection:")
doc.add_list([
"The report saved in the folder report.",
"All the assemblies (ie., contigs and scaffolds).",
"All the evaluations. For QA pictures, the same pictures included "
"in this report plus the original cov.gc table are present. For "
"FRC, the FRCurve for each invidual feature is plotted"])
doc.add_paragraph("Please note that the pipeline generates all the plots "
"automatically. The pipeline tries to eliminate outliers in order "
"to visualize data in a meaningful and useful way (e.g., a single "
"contig with extremely high coverage can jeopardize the "
"visualization of all the other contigs). However, there might be "
"situations where interesting points are discarded. We recommend "
"to always inspect the original tables that are delivered on "
"Uppmax altogether with this report.")
doc.add_pagebreak()
doc.add_header("Standard Contiguity Metrics", pdf.H2)
doc.add_paragraph("Contiguity measures give an idea of the connectivity "
"of the assembly. For all the assemblies that have been generated "
"we report:")
doc.add_list([
"n. scaff: number of scaffolds produced by the assembler.",
"n. scaff>{}: number of scaffolds produced by the assembler "
"longer or equal to {}bp.".format(
min_contig_length, min_contig_length),
"NG50: length of the longest scaffold such that the sum of all the "
"scaffolds longer than it is at least 50% of the estimated genome "
"length.",
"NG80: length of the longest scaffold such that the sum of all the "
"scaffolds longer than it is at least 80% of the estimated genome "
"length.",
"max_scf_lgth: maximum scaffold length.",
"Ass_length: total assembly length.",
"Ass_lgth_scfs>{}: total assembly length considering only "
"scaffolds longer or equal to {}bp.".format(
min_contig_length,min_contig_length)])
doc.add_spacer()
assemblyStats = [['assembler', 'n. scaff',
'n. scaff>{}'.format(min_contig_length), 'NG50', 'NG80', 'max_scf_lgth',
'Ass_lgth', 'Ass_lgth_scfs>{}'.format(min_contig_length)]]
assemblyStats.extend(contig_stats)
doc.add_spacer()
doc.add_table(assemblyStats, TABLE_WIDTH)
doc.add_spacer()
doc.add_paragraph("The table can be used to give an idea of the "
"conectivity of the assemblies. N.B. a high connectivity "
"(i.e., a long N50) does not necessarily implies a high quality "
"assembly as this assembly might be the result of a too "
"aggressive strategy.")
#Now QC pictures
doc.add_header("QC plots", pdf.H2)
doc.add_paragraph("QC plots try to represent the relations between "
"coverage, GC content, and scaffolds length in a visual way. "
"The idea is to use the four plots to see if the assembly "
"coincides with the expected results and to check the presence of "
"biases. For each assembler we aligned the same reads used for de "
"novo assembly back to the assembly itself and we compute for each "
"scaffold its coverage, its GC content, and its length. In this "
"way the following plots can be generated:")
doc.add_list(["Contig Coverage Distribution: this plot shows the scaffold "
"coverage distribution. Ideally the picture should look like a "
"gaussian distribution with the maximum around the expected "
"coverage. If the assembly is highly connected (i.e., formed by "
"only tens of scaffolds) this shape might be not visible. ",
"GC-Content versus Contig-Coverage: this plot shows for each "
"scaffold its GCs content on the x-axis and its coverage on the "
"y-axis. Typically scaffolds should cluster forming a cloud. "
"The presence of two distict clouds might suggest the presence of "
"contamination.",
"GC-Content versus Contig-Length: this plot shows for each "
"scaffold its GCs content on the x-axis and its length on the "
"y-axis. The main purpose is to identify biases towards long or "
"short scaffolds.",
"Median-Coverage vs Contig-Length: this plot shows for each "
"scaffold its coverage on the x-axis and its length on the y-axis. "
"The main purpose is to identify biases towards long or short "
"scaffolds."])
doc.add_paragraph("N.B.: pictures are produced automatically thus it "
"might be possible that some outliers are not printed. The "
"original cov.gc table is present under the "
"evaluation/QA_pictures folder")
for assembler, assembler_QC_pictures in picturesQA.items():
doc.add_pagebreak()
doc.add_header("QC plots for {}".format(assembler) , pdf.H3)
doc.add_image(assembler_QC_pictures[0][0], 280, 200, pdf.CENTER,
"Contig Coverage Distribution")
doc.add_image(assembler_QC_pictures[1][0], 280, 200, pdf.CENTER,
"GC-Content versus Contig-Coverage")
doc.add_image(assembler_QC_pictures[2][0], 280, 200, pdf.CENTER,
"GC-Content versus Contig-Length")
doc.add_image(assembler_QC_pictures[3][0], 280, 200, pdf.CENTER,
"Median-Coverage vs Contig-Length")
#FRCurves
doc.add_pagebreak()
doc.add_header("FRCurves", pdf.H2)
doc.add_paragraph("Inspired by the standard receiver operating "
"characteristic (ROC) curve, the Feature-Response curve (FRCurve) "
"characterizes the sensitivity (coverage) of the sequence "
"assembler output (contigs) as a function of its discrimination "
"threshold (number of features/errors). Generally speaking, "
"FRCurve can be used to rank different assemblies: the sharper "
"the curve is the better the assembly is (i.e., given a certain "
"feature threshold, we prefer the assembler that reconstructs a "
"higher portion of the genome with fewer features). FRCurve is one "
"of the few tools able to evaluate de novo assemblies in absence "
"of a reference sequence. Results are not always straightforward "
"to interpret and must be always used in conjunction with other "
"sources (e.g., quality plots and standard assembly statistics).")
doc.add_image(FRCname, 336, 220, pdf.CENTER, "Feature-Response curve. "
"On x-axis is the number of features in total, on y-axis is "
"coverage (based on estimated genome size).")
#BUSCO
BUSCO_table = [["Assembly", "Complete", "Duplicates",
"Fragmented", "Missing", "Total"]]
BUSCO_table.extend(BUSCO)
doc.add_pagebreak()
doc.add_header("BUSCO", pdf.H2)
doc.add_paragraph("BUSCO evaluates the completeness de novo assemblies in "
"terms of gene content. It uses a lineage specific dataset (eg. "
"eykaryotes, vertebrates, bacteria) of single-copy orthologous "
"genes to query the assembly. Genes that are recovered from the "
"de novo assembled sequence are classified as 'Complete', "
"'Duplicate' , or 'Fragmented'. A complete gene falls within "
"two standard deviations of the expected gene length of the "
"particular dataset, otherwise it's classified as fragmented. "
"If two complete copies of a gene is recovered, it is classified "
"as duplicate.")
doc.add_spacer()
doc.add_paragraph("BUSCO lineage: {}".format(BUSCO_lineage))
doc.add_table(BUSCO_table, TABLE_WIDTH)
doc.render(PDFtitle)
return 0
def _plotFRCurve(outputName, FRCurves):
marks = [None,'o', 's', 'v', '>', 'd', 'x', '<', '^', '+']
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
plt.rcParams['lines.linewidth'] = 2.0
FRCurveName = "{}_FRCurve.png".format(outputName)
maxXvalues = []
for i, FRCurveData in enumerate(FRCurves):
assembler = FRCurveData[0]
FRCurve = FRCurveData[1]
FRC_data = pd.io.parsers.read_csv(FRCurve, sep=' ', header=None)
FRC_features = FRC_data[FRC_data.columns[0]].tolist()
FRC_coverage = FRC_data[FRC_data.columns[1]].tolist()
plt.plot(FRC_features, FRC_coverage, label="{}".format(assembler),
color=tableau20[i], marker=marks[i], markersize=5, markeredgecolor='k', markevery=2)
maxXvalues.append(max(FRC_features))
maxXvalues.sort()
maxXvalues.reverse()
maxXvalue = maxXvalues[0]
for i in range(1, len(maxXvalues)-1):
if maxXvalue > maxXvalues[i]*2 \
and (maxXvalues[i-1] - maxXvalues[i] > 1000):
maxXvalue = maxXvalues[i] + int(maxXvalues[i]*0.10)
plt.ylim((-5,140))
plt.xlim((-1,maxXvalue))
plt.legend(loc=4, ncol=1, borderaxespad=0.)
plt.savefig(FRCurveName)
plt.clf()
return FRCurveName
if __name__ == '__main__':
parser = argparse.ArgumentParser("This utility scripts will generate the "
"report files for each assembled sample. It is assumed that "
"assemble part and evalaution part have been run with this "
"pipeline, otherwise the assumptions done on the file names "
"and on the results")
parser.add_argument('--validation-dirs', type=str, required=True,
help=("Directory where validation are stored for each sample "
"(one assembler per folder)"))
parser.add_argument('--assemblies-dirs', type=str, required=True,
help=("Directory where assemblies are stored for each sample "
"(one assembler per folder)"))
parser.add_argument('--min-contig-length', type=int, default=1000,
help=("minimum length that a contig must have to be considered "
"long"))
parser.add_argument('--global-config', type=str,
help="global configuration file")
parser.add_argument('--no-uppmax', action='store_true',
default = False, help=("if specified the validation-dir and the "
"assemblies-dir is assumed to contain the assemblies "
"(and not samples) -- this is useful for large multi-library "
"projects"))
parser.add_argument('--sample-name', type=str,
help=("It must be specifed when --no-uppmax is present, in this "
"case you need to tell the porgram under which iouput name the "
"validation has been saved (in the validation yaml file)"))
args = parser.parse_args()
main(args)
|
senthil10/NouGAT
|
sciLifeLab_utils/run_assembly_report.py
|
Python
|
mit
| 28,372
|
[
"BWA",
"Gaussian"
] |
8584bab1c7fc973c741df03fb469373efcb1e2e7e0d3cc76e11a1748bb9af220
|
from io import BytesIO
import numpy as np
import warnings
from .. import Variable
from ..conventions import cf_encoder
from ..core.pycompat import iteritems, basestring, unicode_type, OrderedDict
from ..core.utils import Frozen, FrozenOrderedDict
from ..core.indexing import NumpyIndexingAdapter
from .common import AbstractWritableDataStore
from .netcdf3 import (is_valid_nc3_name, coerce_nc3_dtype,
encode_nc3_attr_value, encode_nc3_variable)
from xray.conventions import cf_decoder
def _decode_string(s):
if isinstance(s, bytes):
return s.decode('utf-8', 'replace')
return s
def _decode_attrs(d):
# don't decode _FillValue from bytes -> unicode, because we want to ensure
# that its type matches the data exactly
return OrderedDict((k, v if k == '_FillValue' else _decode_string(v))
for (k, v) in iteritems(d))
class ScipyArrayWrapper(NumpyIndexingAdapter):
def __init__(self, netcdf_file, variable_name):
self.netcdf_file = netcdf_file
self.variable_name = variable_name
@property
def array(self):
# We can't store the actual netcdf_variable object or its data array,
# because otherwise scipy complains about variables or files still
# referencing mmapped arrays when we try to close datasets without
# having read all data in the file.
return self.netcdf_file.variables[self.variable_name].data
@property
def dtype(self):
# always use native endianness
return np.dtype(self.array.dtype.kind + str(self.array.dtype.itemsize))
def __getitem__(self, key):
data = super(ScipyArrayWrapper, self).__getitem__(key)
# Copy data if the source file is mmapped. This makes things consistent
# with the netCDF4 library by ensuring we can safely read arrays even
# after closing associated files.
copy = self.netcdf_file.use_mmap
data = np.array(data, dtype=self.dtype, copy=copy)
return data
class ScipyDataStore(AbstractWritableDataStore):
"""Store for reading and writing data via scipy.io.netcdf.
This store has the advantage of being able to be initialized with a
StringIO object, allow for serialization without writing to disk.
It only supports the NetCDF3 file-format.
"""
def __init__(self, filename_or_obj, mode='r', mmap=None, version=2):
import scipy
if mode != 'r' and scipy.__version__ < '0.13': # pragma: no cover
warnings.warn('scipy %s detected; '
'the minimal recommended version is 0.13. '
'Older version of this library do not reliably '
'read and write files.'
% scipy.__version__, ImportWarning)
import scipy.io
# if filename is a NetCDF3 bytestring we store it in a StringIO
if (isinstance(filename_or_obj, basestring)
and filename_or_obj.startswith('CDF')):
# TODO: this check has the unfortunate side-effect that
# paths to files cannot start with 'CDF'.
filename_or_obj = BytesIO(filename_or_obj)
self.ds = scipy.io.netcdf_file(
filename_or_obj, mode=mode, mmap=mmap, version=version)
def store(self, variables, attributes):
# All Scipy objects get CF encoded by default, without this attempting
# to write times, for example, would fail.
cf_variables, cf_attrs = cf_encoder(variables, attributes)
AbstractWritableDataStore.store(self, cf_variables, cf_attrs)
def open_store_variable(self, name, var):
return Variable(var.dimensions, ScipyArrayWrapper(self.ds, name),
_decode_attrs(var._attributes))
def get_variables(self):
return FrozenOrderedDict((k, self.open_store_variable(k, v))
for k, v in iteritems(self.ds.variables))
def get_attrs(self):
return Frozen(_decode_attrs(self.ds._attributes))
def get_dimensions(self):
return Frozen(self.ds.dimensions)
def set_dimension(self, name, length):
if name in self.dimensions:
raise ValueError('%s does not support modifying dimensions'
% type(self).__name__)
self.ds.createDimension(name, length)
def _validate_attr_key(self, key):
if not is_valid_nc3_name(key):
raise ValueError("Not a valid attribute name")
def set_attribute(self, key, value):
self._validate_attr_key(key)
value = encode_nc3_attr_value(value)
setattr(self.ds, key, value)
def prepare_variable(self, name, variable):
# TODO, create a netCDF3 encoder
variable = encode_nc3_variable(variable)
self.set_necessary_dimensions(variable)
data = variable.data
# nb. this still creates a numpy array in all memory, even though we
# don't write the data yet; scipy.io.netcdf does not not support
# incremental writes.
self.ds.createVariable(name, data.dtype, variable.dims)
scipy_var = self.ds.variables[name]
for k, v in iteritems(variable.attrs):
self._validate_attr_key(k)
setattr(scipy_var, k, v)
return scipy_var, data
def sync(self):
self.ds.flush()
def close(self):
self.ds.close()
def __exit__(self, type, value, tb):
self.close()
|
clarkfitzg/xray
|
xray/backends/scipy_.py
|
Python
|
apache-2.0
| 5,480
|
[
"NetCDF"
] |
3de8897a47d65015dff01927affa2fe6b27f0ea130f5e14a1f0a292206dd8909
|
#
# Copyright 2010 - 2012 Brian R. D'Urso
#
# This file is part of Python Instrument Control System, also known as Pythics.
#
# Pythics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pythics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pythics. If not, see <http://www.gnu.org/licenses/>.
#
import pythics.libinstrument
import visa
class Synthesizer(visa.GpibInstrument):
#initialization
def __init__(self, *args, **kwargs):
visa.GpibInstrument.__init__(self,*args, **kwargs)
self._frequency = None
self._amplitude = None
# frequency property
def __get_frequency(self):
return self._frequency
def __set_frequency(self, value):
self._frequency = value
self.write("FREQ "+str(value))
frequency = property(__get_frequency, __set_frequency)
# amplitude property
def __get_amplitude(self):
return self._amplitude
def __set_amplitude(self, value):
self._amplitude = value
self.write("AMPL "+str(value)+"DB")
amplitude = property(__get_amplitude, __set_amplitude)
# offset property
def __get_offset(self):
return self._offset
def __set_offset(self, value):
self._offset = value
self.write("OFFS "+str(value))
offset = property(__get_offset, __set_offset)
|
dursobr/Pythics
|
pythics/instruments/SRS_DS345.py
|
Python
|
gpl-3.0
| 1,805
|
[
"Brian"
] |
7520ca86ad2b11c4247ced769ca0adc264d1ed17ced7e0644a1bb5f790d9122c
|
"""Fast PSF modelling and centroiding in python
Calculates an accurate one-dimensional undersampled Gaussian profile by
integrating the profile over each pixel analytically.
Contains
--------
gaussian1d - 1D Gaussian profile
gaussian1dmt - 1D Gaussian profile (multithreaded)
gaussians1d - Multiple 1D Gaussian profiles
Notes
-----
- The routines use the center of the first pixel as the origo. Use
an appropriate offset when calculating over a subarray.
"""
import numpy as np
from scipy.optimize import fmin
from numpy.random import multivariate_normal
from .gaussianf import gaussian
from .lorentzianf import lorentzian
try:
from emcee import EnsembleSampler
with_emcee = True
except ImportError:
with_emcee = False
__all__ = ['psf_g1d', 'logl_g1d', 'centroid', 'gaussian1d', 'gaussian1dmt', 'gaussians1d',
'lorentzian1d', 'lorentzian1dmt', 'lorentzians1d']
gaussian1d = g1d = gaussian.gaussian1d
gaussian1dmt = g1dmt = gaussian.gaussian1dmt
gaussians1d = gs1d = gaussian.gaussians1d
lorentzian1d = l1d = lorentzian.lorentzian1d
lorentzian1dmt = l1dmt = lorentzian.lorentzian1dmt
lorentians1d = ls1d = lorentzian.lorentzians1d
lnlike_gaussian1d = gaussian.lnlike_gaussian1d
lnlike_gaussians1d = gaussian.lnlike_gaussians1d
psf_g1d = gaussian.psf_g1d
logl_g1d = gaussian.logl_g1d
lnlike_lorentzian1d = logl_l1d = lorentzian.lnlike_lorentzian1d
def centroid(img, x0, y0, fwhm_x=8., fwhm_y=8., verbose=False, **kwargs):
def prior_bounds(pv):
return -1e18 if not ((0 < pv[0] < img.shape[1]) | (0 < pv[1] < img.shape[0])) else 0
estimate_errors = kwargs.get('estimate_errors', True)
return_chains = kwargs.get('return_chains', False)
operation = kwargs.get('operation', 'mean')
maxiter = kwargs.get('maxiter', 5000)
maxfun = kwargs.get('maxfun', 5000)
mc_threads = kwargs.get('mc_threads', 1)
mc_nwalkers = kwargs.get('mc_nwalkers', 50)
mc_niter = kwargs.get('mc_niter', 300)
mc_thinning = kwargs.get('mc_thinning', 300)
mc_burn = kwargs.get('mc_burn', 100)
if operation == 'mean':
x, y = img.mean(axis=0), img.mean(axis=1)
elif operation == 'max':
x, y = img.max(axis=0), img.max(axis=1)
else:
raise TypeError
vmin, vmax = 0.5*(x.min()+y.min()), 0.5*(x.max()+y.max())
pv0 = np.array([x0, y0, fwhm_x, fwhm_y, vmax-vmin, 1e-2*(vmax-vmin), vmin])
lpfun = lambda pv: ( logl_g1d(pv[0], pv[4], pv[2], pv[5], pv[6], x)
+ logl_g1d(pv[1], pv[4], pv[3], pv[5], pv[6], y)
+ prior_bounds(pv))
pv = fmin(lambda pv:-lpfun(pv), pv0, disp=verbose, maxfun=maxfun, maxiter=maxiter)
if not (with_emcee and estimate_errors):
return pv, -np.ones(pv.size)
else:
sampler = EnsembleSampler(mc_nwalkers, pv.size, lpfun, threads=1)
sampler.run_mcmc(multivariate_normal(pv, 5e-3*np.eye(pv.size), size=mc_nwalkers), mc_niter);
fc = sampler.chain[:,mc_burn::mc_thinning,:].reshape([-1,pv.size])
pc = np.array(np.percentile(fc, [50,16,84], axis=0))
if return_chains:
pc[0,:], np.mean(np.abs(pc[1:,:]-pc[0,:]), axis=0), fc
else:
return pc[0,:], np.mean(np.abs(pc[1:,:]-pc[0,:]), axis=0)
|
hpparvi/psf-centroid
|
src/__init__.py
|
Python
|
gpl-3.0
| 3,362
|
[
"Gaussian"
] |
5a0e11e31ff8fd62ea81ecb8106d436e446bd9e878aea5dd3e6ea9972cffd180
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import mvn_linear_operator as mvn_linop
from tensorflow.python.framework import ops
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = [
"MultivariateNormalDiagPlusLowRank",
]
class MultivariateNormalDiagPlusLowRank(
mvn_linop.MultivariateNormalLinearOperator):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`scale` matrix; `covariance = scale @ scale.T` where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-0.5 ||y||**2) / Z,
y = inv(scale) @ (x - loc),
Z = (2 pi)**(0.5 k) |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||**2` denotes the squared Euclidean norm of `y`.
A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier ones(k)) +
scale_perturb_factor @ diag(scale_perturb_diag) @ scale_perturb_factor.T
```
where:
* `scale_diag.shape = [k]`,
* `scale_identity_multiplier.shape = []`,
* `scale_perturb_factor.shape = [k, r]`, typically `k >> r`, and,
* `scale_perturb_diag.shape = [r]`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
Y = scale @ X + loc
```
#### Examples
```python
tfd = tf.contrib.distributions
# Initialize a single 3-variate Gaussian with covariance `cov = S @ S.T`,
# `S = diag(d) + U @ diag(m) @ U.T`. The perturbation, `U @ diag(m) @ U.T`, is
# a rank-2 update.
mu = [-0.5., 0, 0.5] # shape: [3]
d = [1.5, 0.5, 2] # shape: [3]
U = [[1., 2],
[-1, 1],
[2, -0.5]] # shape: [3, 2]
m = [4., 5] # shape: [2]
mvn = tfd.MultivariateNormalDiagPlusLowRank(
loc=mu
scale_diag=d
scale_perturb_factor=U,
scale_perturb_diag=m)
# Evaluate this on an observation in `R^3`, returning a scalar.
mvn.prob([-1, 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Gaussians; `S = diag(d) + U @ U.T`.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [b, k] = [2, 3]
U = [[[1., 2],
[3, 4],
[5, 6]],
[[0.5, 0.75],
[1,0, 0.25],
[1.5, 1.25]]] # shape: [b, k, r] = [2, 3, 2]
m = [[0.1, 0.2],
[0.4, 0.5]] # shape: [b, r] = [2, 2]
mvn = tfd.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_perturb_factor=U,
scale_perturb_diag=m)
mvn.covariance().eval() # shape: [2, 3, 3]
# ==> [[[ 15.63 31.57 48.51]
# [ 31.57 69.31 105.05]
# [ 48.51 105.05 162.59]]
#
# [[ 2.59 1.41 3.35]
# [ 1.41 2.71 3.34]
# [ 3.35 3.34 8.35]]]
# Compute the pdf of two `R^3` observations (one from each batch);
# return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
mvn.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
scale_perturb_factor=None,
scale_perturb_diag=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagPlusLowRank"):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`. A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier ones(k)) +
scale_perturb_factor @ diag(scale_perturb_diag) @ scale_perturb_factor.T
```
where:
* `scale_diag.shape = [k]`,
* `scale_identity_multiplier.shape = []`,
* `scale_perturb_factor.shape = [k, r]`, typically `k >> r`, and,
* `scale_perturb_diag.shape = [r]`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_diag: Non-zero, floating-point `Tensor` representing a diagonal
matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,
and characterizes `b`-batches of `k x k` diagonal matrices added to
`scale`. When both `scale_identity_multiplier` and `scale_diag` are
`None` then `scale` is the `Identity`.
scale_identity_multiplier: Non-zero, floating-point `Tensor` representing
a scaled-identity-matrix added to `scale`. May have shape
`[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scaled
`k x k` identity matrices added to `scale`. When both
`scale_identity_multiplier` and `scale_diag` are `None` then `scale` is
the `Identity`.
scale_perturb_factor: Floating-point `Tensor` representing a rank-`r`
perturbation added to `scale`. May have shape `[B1, ..., Bb, k, r]`,
`b >= 0`, and characterizes `b`-batches of rank-`r` updates to `scale`.
When `None`, no rank-`r` update is added to `scale`.
scale_perturb_diag: Floating-point `Tensor` representing a diagonal matrix
inside the rank-`r` perturbation added to `scale`. May have shape
`[B1, ..., Bb, r]`, `b >= 0`, and characterizes `b`-batches of `r x r`
diagonal matrices inside the perturbation added to `scale`. When
`None`, an identity matrix is used inside the perturbation. Can only be
specified if `scale_perturb_factor` is also specified.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = dict(locals())
def _convert_to_tensor(x, name):
return None if x is None else ops.convert_to_tensor(x, name=name)
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[
loc, scale_diag, scale_identity_multiplier, scale_perturb_factor,
scale_perturb_diag]):
has_low_rank = (scale_perturb_factor is not None or
scale_perturb_diag is not None)
scale = distribution_util.make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=validate_args,
assert_positive=has_low_rank)
scale_perturb_factor = _convert_to_tensor(
scale_perturb_factor,
name="scale_perturb_factor")
scale_perturb_diag = _convert_to_tensor(
scale_perturb_diag,
name="scale_perturb_diag")
if has_low_rank:
scale = linalg.LinearOperatorLowRankUpdate(
scale,
u=scale_perturb_factor,
diag_update=scale_perturb_diag,
is_diag_update_positive=scale_perturb_diag is None,
is_non_singular=True, # Implied by is_positive_definite=True.
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
super(MultivariateNormalDiagPlusLowRank, self).__init__(
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
xodus7/tensorflow
|
tensorflow/contrib/distributions/python/ops/mvn_diag_plus_low_rank.py
|
Python
|
apache-2.0
| 10,134
|
[
"Gaussian"
] |
59060ac3957493e8813e5ce9bb7b259d7381372b58c07e12d341fa5465d34081
|
# $Id$
#
# Copyright (C) 2004-2012 Greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" uses pymol to interact with molecules
"""
from rdkit import Chem
import os, tempfile, sys
# Python3 compatibility
try:
from xmlrpclib import Server
except ImportError:
from xmlrpc.client import Server
_server=None
class MolViewer(object):
def __init__(self,host=None,port=9123,force=0,**kwargs):
global _server
if not force and _server is not None:
self.server=_server
else:
if not host:
host=os.environ.get('PYMOL_RPCHOST','localhost')
_server=None
serv = Server('http://%s:%d'%(host,port))
serv.ping()
_server = serv
self.server=serv
self.InitializePyMol()
def InitializePyMol(self):
""" does some initializations to set up PyMol according to our
tastes
"""
self.server.do('set valence,1')
self.server.do('set stick_rad,0.15')
self.server.do('set mouse_selection_mode,0')
self.server.do('set line_width,2')
self.server.do('set selection_width,10')
self.server.do('set auto_zoom,0')
def DeleteAll(self):
" blows out everything in the viewer "
self.server.deleteAll()
def DeleteAllExcept(self,excludes):
" deletes everything except the items in the provided list of arguments "
allNames = self.server.getNames('*',False)
for nm in allNames:
if nm not in excludes:
self.server.deleteObject(nm)
def LoadFile(self,filename,name,showOnly=False):
""" calls pymol's "load" command on the given filename; the loaded object
is assigned the name "name"
"""
if showOnly:
self.DeleteAll()
id = self.server.loadFile(filename,name)
return id
def ShowMol(self,mol,name='molecule',showOnly=True,highlightFeatures=[],
molB="",confId=-1,zoom=True,forcePDB=False, showSticks=False):
""" special case for displaying a molecule or mol block """
server = self.server
if not zoom:
self.server.do('view rdinterface,store')
if showOnly:
self.DeleteAll()
if not forcePDB and mol.GetNumAtoms()<999 :
if not molB:
molB = Chem.MolToMolBlock(mol,confId=confId)
mid = server.loadMolBlock(molB,name)
else:
if not molB:
molB = Chem.MolToPDBBlock(mol,confId=confId)
mid = server.loadPDB(molB,name)
if highlightFeatures:
nm = name+'-features'
conf = mol.GetConformer(confId)
for feat in highlightFeatures:
pt = [0.0,0.0,0.0]
for idx in feat:
loc = conf.GetAtomPosition(idx)
pt[0] += loc[0]/len(feat)
pt[1] += loc[1]/len(feat)
pt[2] += loc[2]/len(feat)
server.sphere(pt,0.2,(1,1,1),nm)
if zoom:
server.zoom('visible')
else:
self.server.do('view rdinterface,recall')
if showSticks: # show molecule in stick view
self.server.do('show sticks, {}'.format(name))
return mid
def GetSelectedAtoms(self,whichSelection=None):
" returns the selected atoms "
if not whichSelection:
sels = self.server.getNames('selections')
if sels:
whichSelection = sels[-1]
else:
whichSelection=None
if whichSelection:
items = self.server.index(whichSelection)
else:
items = []
return items
def SelectAtoms(self,itemId,atomIndices,selName='selection'):
" selects a set of atoms "
ids = '(id '
ids += ','.join(['%d'%(x+1) for x in atomIndices])
ids += ')'
cmd = 'select %s,%s and %s'%(selName,ids,itemId)
self.server.do(cmd)
def HighlightAtoms(self,indices,where,extraHighlight=False):
" highlights a set of atoms "
if extraHighlight:
idxText = ','.join(['%s and (id %d)'%(where,x) for x in indices])
self.server.do('edit %s'%idxText)
else:
idxText = ' or '.join(['id %d'%x for x in indices])
self.server.do('select selection, %s and (%s)'%(where,idxText))
def SetDisplayStyle(self,obj,style=''):
" change the display style of the specified object "
self.server.do('hide everything,%s'%(obj,))
if style:
self.server.do('show %s,%s'%(style,obj))
def SelectProteinNeighborhood(self,aroundObj,inObj,distance=5.0,
name='neighborhood',showSurface=False):
""" selects the area of a protein around a specified object/selection name;
optionally adds a surface to that """
self.server.do('select %(name)s,byres (%(aroundObj)s around %(distance)f) and %(inObj)s'%locals())
if showSurface:
self.server.do('show surface,%s'%name)
self.server.do('disable %s'%name)
def AddPharmacophore(self,locs,colors,label,sphereRad=0.5):
" adds a set of spheres "
self.server.do('view rdinterface,store')
self.server.resetCGO(label)
for i,loc in enumerate(locs):
self.server.sphere(loc,sphereRad,colors[i],label,1)
self.server.do('enable %s'%label)
self.server.do('view rdinterface,recall')
def SetDisplayUpdate(self,val):
if not val:
self.server.do('set defer_update,1')
else:
self.server.do('set defer_update,0')
def GetAtomCoords(self,sels):
" returns the coordinates of the selected atoms "
res = {}
for label,idx in sels:
coords = self.server.getAtomCoords('(%s and id %d)'%(label,idx))
res[(label,idx)] = coords
return res
def HideAll(self):
self.server.do('disable all')
def HideObject(self,objName):
self.server.do('disable %s'%objName)
def DisplayObject(self,objName):
self.server.do('enable %s'%objName)
def Redraw(self):
self.server.do('refresh')
def Zoom(self,objName):
self.server.zoom(objName)
def DisplayHBonds(self,objName,molName,proteinName,
molSelText='(%(molName)s)',
proteinSelText='(%(proteinName)s and not het)'):
" toggles display of h bonds between the protein and a specified molecule "
cmd = "delete %(objName)s;\n"
cmd += "dist %(objName)s," + molSelText+","+proteinSelText+",mode=2;\n"
cmd += "enable %(objName)s;"
cmd = cmd%locals()
self.server.do(cmd)
def DisplayCollisions(self,objName,molName,proteinName,distCutoff=3.0,
color='red',
molSelText='(%(molName)s)',
proteinSelText='(%(proteinName)s and not het)'):
" toggles display of collisions between the protein and a specified molecule "
cmd = "delete %(objName)s;\n"
cmd += "dist %(objName)s," + molSelText+","+proteinSelText+",%(distCutoff)f,mode=0;\n"
cmd += """enable %(objName)s
color %(color)s, %(objName)s"""
cmd = cmd%locals()
self.server.do(cmd)
def SaveFile(self,filename):
# PyMol will interpret the path to be relative to where it was started
# from. Remedy that.
if not filename:
raise ValueError('empty filename')
filename = os.path.abspath(filename)
self.server.save(filename)
def GetPNG(self,h=None,w=None,preDelay=0):
try:
import Image
except ImportError:
from PIL import Image
import time
if preDelay>0:
time.sleep(preDelay)
fd = tempfile.NamedTemporaryFile(suffix='.png',delete=False)
fd.close()
self.server.do('png %s'%fd.name)
time.sleep(0.2) # <- wait a short period so that PyMol can finish
for i in range(10):
try:
img = Image.open(fd.name)
break
except IOError:
time.sleep(0.1)
try:
os.unlink(fd.name)
except (OSError,PermissionError):
# happens sometimes on Windows. Not going to worry about this too deeply since
# the files are in a temp dir anyway. This was github #936
pass
fd=None
if h is not None or w is not None:
sz = img.size
if h is None:
h=sz[1]
if w is None:
w=sz[0]
if h<sz[1]:
frac = float(h)/sz[1]
w *= frac
w = int(w)
img=img.resize((w,h),True)
elif w<sz[0]:
frac = float(w)/sz[0]
h *= frac
h = int(h)
img=img.resize((w,h),True)
return img
|
adalke/rdkit
|
rdkit/Chem/PyMol.py
|
Python
|
bsd-3-clause
| 8,287
|
[
"PyMOL",
"RDKit"
] |
8525123d1d7b2b934408046a201f8fd7440a131270ae9d5c1bf627b7baf2661d
|
"""
DIRAC.Core.Utilities package
"""
|
DIRACGrid/DIRAC
|
src/DIRAC/Core/Utilities/__init__.py
|
Python
|
gpl-3.0
| 40
|
[
"DIRAC"
] |
3b4067e7a42b22e239c04583aca7006609a6f105b01c5e9ff480403096d10943
|
"""
A simple script to analyse ground/lab flat fields.
This script has been written to analyse the wavelength dependency of the PRNU.
:author: Sami-Matias Niemi
:version: 0.4
"""
import matplotlib
#matplotlib.use('pdf')
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['font.size'] = 17
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('axes', linewidth=1.1)
matplotlib.rcParams['legend.fontsize'] = 11
matplotlib.rcParams['legend.handlelength'] = 3
matplotlib.rcParams['xtick.major.size'] = 5
matplotlib.rcParams['ytick.major.size'] = 5
matplotlib.rcParams['image.interpolation'] = 'none'
from mpl_toolkits.axes_grid1 import AxesGrid
import matplotlib.pyplot as plt
import pyfits as pf
import numpy as np
import glob as g
from scipy.ndimage.filters import gaussian_filter
from scipy import signal
from scipy.linalg import norm
from scipy import fftpack
from scipy import ndimage
from support import files as fileIO
from astropy.stats import sigma_clip
from astropy.modeling import models, fitting
from multiprocessing import Pool
import math, os
from PIL import Image
from scipy.interpolate import interp1d
from skimage.measure import structural_similarity as ssim
from sklearn import linear_model
from sklearn.gaussian_process import GaussianProcess
from matplotlib.ticker import FixedFormatter
from scipy.stats import gaussian_kde
from statsmodels.nonparametric.kde import KDEUnivariate
from statsmodels.nonparametric.kernel_density import KDEMultivariate
from sklearn.neighbors import KernelDensity
def kde_scipy(x, x_grid, bandwidth=0.2, **kwargs):
"""Kernel Density Estimation with Scipy"""
# Note that scipy weights its bandwidth by the covariance of the
# input data. To make the results comparable to the other methods,
# we divide the bandwidth by the sample standard deviation here.
kde = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1), **kwargs)
return kde.evaluate(x_grid)
def kde_statsmodels_u(x, x_grid, bandwidth=0.2, **kwargs):
"""Univariate Kernel Density Estimation with Statsmodels"""
kde = KDEUnivariate(x)
kde.fit(bw=bandwidth, **kwargs)
return kde.evaluate(x_grid)
def kde_statsmodels_m(x, x_grid, bandwidth=0.2, **kwargs):
"""Multivariate Kernel Density Estimation with Statsmodels"""
kde = KDEMultivariate(x, bw=bandwidth * np.ones_like(x),
var_type='c', **kwargs)
return kde.pdf(x_grid)
def kde_sklearn(x, x_grid, bandwidth=0.2, **kwargs):
"""Kernel Density Estimation with Scikit-learn"""
kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)
kde_skl.fit(x[:, np.newaxis])
# score_samples() returns the log-likelihood of the samples
log_pdf = kde_skl.score_samples(x_grid[:, np.newaxis])
return np.exp(log_pdf)
def kde_statsmodels_u(x, x_grid, bandwidth=0.2, **kwargs):
"""Univariate Kernel Density Estimation with Statsmodels"""
kde = KDEUnivariate(x)
kde.fit(bw=bandwidth, **kwargs)
return kde.evaluate(x_grid)
def subtractBias(data, biasfile):
"""
Subtract ADC offset using the pre- and overscan information for each quadrant.
"""
if biasfile:
b = pf.getdata('bias.fits')
data -= b
else:
prescanL = data[3:2060, 3:51].mean()
prescanH = data[2076:4125, 3:51].mean()
overscanL = data[3:2060, 4150:4192].mean()
overscanH = data[2076:4125, 4150:4192].mean()
Q0 = data[:2060, :2098]
Q2 = data[2076:, :2098]
Q1 = data[:2060, 2098:]
Q3 = data[2076:, 2098:]
#subtract the bias levels
Q0 -= prescanL
Q2 -= prescanH
Q1 -= overscanL
Q3 -= overscanH
data[:2060, :2098] = Q0
data[2076:, :2098] = Q2
data[:2060, 2098:] = Q1
data[2076:, 2098:] = Q3
return data
def makeBias(files):
"""
Generate a median combined bias frame from the input files
"""
d = np.asarray([pf.getdata(file) for file in files])
#write out FITS file
med = np.median(d, axis=0)
fileIO.writeFITS(med, 'bias.fits', int=False)
def makeFlat(files, output, gain=3.09, biasfile=True):
"""
Combine flat fields
"""
d = []
for file in files:
data = subtractBias(pf.getdata(file), biasfile) * gain
fileIO.writeFITS(data, file.replace('.fits', 'biasremoved.fits'), int=False)
d.append(data)
d = np.asarray(d)
#write out FITS file
avg = np.average(d, axis=0)
fileIO.writeFITS(avg, output+'averaged.fits', int=False)
med = np.median(d, axis=0)
fileIO.writeFITS(med, output+'median.fits', int=False)
return avg, med
def normaliseFlat(data, output, limit=8.e4, order=5):
"""
Normalise each quadrant separately. If limit set use to to generate a mask.
"""
#split to quadrants
Q0 = data[3:2060, 52:2098].copy()
Q2 = data[2076:, 52:2098].copy()
Q1 = data[3:2060, 2098:4145].copy()
Q3 = data[2076:, 2098:4145].copy()
Qs = [Q0, Q1, Q2, Q3]
res = []
for tmp in Qs:
if limit is not None:
print 'Using masked arrays in the fitting...'
t = np.ma.MaskedArray(tmp, mask=(tmp > limit))
#meshgrid representing data
x, y = np.mgrid[:t.shape[0], :t.shape[1]]
#fit a polynomial 2d surface to remove the illumination profile
p_init = models.Polynomial2D(degree=order)
f = fitting.NonLinearLSQFitter()
p = f(p_init, x, y, t)
#normalize data and save it to res list
tmp /= p(x, y)
res.append(tmp)
print np.mean(tmp)
#save out
out = np.zeros_like(data)
out[3:2060, 52:2098] = res[0]
out[3:2060, 2098:4145] = res[1]
out[2076:, 52:2098] = res[2]
out[2076:, 2098:4145] = res[3]
fileIO.writeFITS(out, output+'FlatField.fits', int=False)
return out
def findFiles():
"""
Find files for each wavelength
"""
#pre-process: 28th
files = g.glob('28Apr/*Euclid.fits')
f = [file.replace('28Apr/', '') for file in files]
f = [file.replace('_', '.') for file in f]
times = [float(file[:5]) for file in f]
files = np.asarray(files)
times = np.asarray(times)
#545: 15_42 _13sEuclid - 16_25 _37sEuclid
msk545 = (times >= 15.42) & (times <= 16.25)
f545 = files[msk545]
#570: 16_36 _58sEuclid - 17_00 _12sEuclid
msk570 = (times >= 16.36) & (times <= 17.00)
f570 = files[msk570]
#bias: 14_48 _49sEuclid - 15_00 _25sEuclid
mskbias = (times >= 14.48) & (times <= 15.00)
bias = files[mskbias]
#pre-process: 29th
files = g.glob('29Apr/*Euclid.fits')
f = [file.replace('29Apr/', '') for file in files]
f = [file.replace('_', '.') for file in f]
times = [float(file[:5]) for file in f]
files = np.asarray(files)
times = np.asarray(times)
#660: 13_31 _55sEuclid - 14_10 _49sEuclid
msk660 = (times >= 13.31) & (times <= 14.10)
f660 = files[msk660]
#700: 14_32 _24sEuclid - 15_08 _02sEuclid
msk700 = (times >= 14.32) & (times <= 15.08)
f700 = files[msk700]
#800: 15_22 _34sEuclid - 15_59 _06sEuclid
msk800 = (times >= 15.22) & (times <= 15.59)
f800 = files[msk800]
#850: 16_24 _37sEuclid - 17_04 _03sEuclid
msk850 = (times >= 16.24) & (times <= 17.04)
f850 = files[msk850]
#pre-process: 30th
files = g.glob('30Apr/*Euclid.fits')
f = [file.replace('30Apr/', '') for file in files]
f = [file.replace('_', '.') for file in f]
times = [float(file[:5]) for file in f]
files = np.asarray(files)
times = np.asarray(times)
#600: 16_12 _49sEuclid-16_50 _22sEuclid
msk600 = (times >= 16.12) & (times <= 16.50)
f600 = files[msk600]
#940: 17_09 _37sEuclid - 17_48 _13sEuclid
msk940 = (times >= 17.09) & (times <= 17.48)
f940 = files[msk940]
#dictionary
out = dict(f545=f545, f570=f570, f600=f600, f660=f660, f700=f700, f800=f800, f850=f850, f940=f940, bias=bias)
return out
def findFilesLimit(limit=20):
"""
Find files for each wavelength
"""
#pre-process: 28th
files = g.glob('28Apr/*Euclid.fits')
f = [file.replace('28Apr/', '') for file in files]
f = [file.replace('_', '.') for file in f]
times = [float(file[:5]) for file in f]
files = np.asarray(files)
times = np.asarray(times)
#545: 15_42 _13sEuclid - 16_25 _37sEuclid
msk545 = (times >= 15.42) & (times <= 16.25)
f545 = files[msk545]
f545 = np.random.choice(f545, size=limit)
#570: 16_36 _58sEuclid - 17_00 _12sEuclid
msk570 = (times >= 16.36) & (times <= 17.00)
f570 = files[msk570]
f570 = np.random.choice(f570, size=limit)
#bias: 14_48 _49sEuclid - 15_00 _25sEuclid
mskbias = (times >= 14.48) & (times <= 15.00)
bias = files[mskbias]
#pre-process: 29th
files = g.glob('29Apr/*Euclid.fits')
f = [file.replace('29Apr/', '') for file in files]
f = [file.replace('_', '.') for file in f]
times = [float(file[:5]) for file in f]
files = np.asarray(files)
times = np.asarray(times)
#660: 13_31 _55sEuclid - 14_10 _49sEuclid
msk660 = (times >= 13.31) & (times <= 14.10)
f660 = files[msk660]
f660 = np.random.choice(f660, size=limit)
#700: 14_32 _24sEuclid - 15_08 _02sEuclid
msk700 = (times >= 14.32) & (times <= 15.08)
f700 = files[msk700]
f700 = np.random.choice(f700, size=limit)
#800: 15_22 _34sEuclid - 15_59 _06sEuclid
msk800 = (times >= 15.22) & (times <= 15.59)
f800 = files[msk800]
f800 = np.random.choice(f800, size=limit)
#850: 16_24 _37sEuclid - 17_04 _03sEuclid
msk850 = (times >= 16.24) & (times <= 17.04)
f850 = files[msk850]
f850 = np.random.choice(f850, size=limit)
#pre-process: 30th
files = g.glob('30Apr/*Euclid.fits')
f = [file.replace('30Apr/', '') for file in files]
f = [file.replace('_', '.') for file in f]
times = [float(file[:5]) for file in f]
files = np.asarray(files)
times = np.asarray(times)
#600: 16_12 _49sEuclid-16_50 _22sEuclid
msk600 = (times >= 16.12) & (times <= 16.50)
f600 = files[msk600]
f600 = np.random.choice(f600, size=limit)
#940: 17_09 _37sEuclid - 17_48 _13sEuclid
msk940 = (times >= 17.09) & (times <= 17.48)
f940 = files[msk940]
f940 = np.random.choice(f940, size=limit)
#dictionary
out = dict(f545=f545, f570=f570, f600=f600, f660=f660, f700=f700, f800=f800, f850=f850, f940=f940, bias=bias)
return out
def _generateFlats(key, files):
"""
Actual calls to generate flat fields.
"""
print key, files, files.shape
avg, med = makeFlat(files, key)
normed = normaliseFlat(med, key)
return normed
def generateFlats(args):
"""
A wrapper to generate flat fields simultaneously at different wavelengths.
A hack required as Pool does not accept multiple arguments.
"""
return _generateFlats(*args)
def flats():
"""
Generates normalised flats at several wavelengths. Use all input files.
"""
#search for the right files
files = findFiles()
#generate bias
makeBias(files['bias'])
files.pop('bias', None)
#generate flats using multiprocessing
pool = Pool(processes=6)
pool.map(generateFlats, [(key, files[key]) for key in files.keys()])
def flatsLimit(limit=20):
"""
Generates normalised flats at several wavelengths, but using randomly chosen input files to a limiting number.
This allows a matched SNR studies.
"""
#search for the right files
files = findFilesLimit(limit=limit)
#generate bias
makeBias(files['bias'])
files.pop('bias', None)
#generate flats using multiprocessing
pool = Pool(processes=6)
pool.map(generateFlats, [(key, files[key]) for key in files.keys()])
#rename
for file in g.glob('*FlatField.fits'):
os.rename(file, file.replace('.fits', 'L%i.fits' % limit))
def plot(xmin=300, xmax=3500, ymin=200, ymax=1600, smooth=2.):
#load data
data = {}
for file in g.glob('*FlatField.fits'):
fh = pf.open(file)
wave = file[1:4]
data[wave] = fh[1].data
fh.close()
#simple plot showing some structures
number_of_subplots = math.ceil(len(data.keys())/2.)
fig = plt.figure(figsize=(13, 13))
plt.subplots_adjust(wspace=0.05, hspace=0.15, left=0.01, right=0.99, top=0.95, bottom=0.05)
#loop over data from shortest wavelength to the longest
for i, wave in enumerate(sorted(data.keys())):
tmp = data[wave][ymin:ymax, xmin:xmax].copy()
#Gaussian smooth to enhance structures for plotting
if smooth > 1:
tmp = gaussian_filter(tmp, smooth)
if 690 < int(wave) < 710:
norm = tmp
ax = plt.subplot(number_of_subplots, 2, i+1)
im = ax.imshow(tmp, interpolation='none', origin='lower', vmin=0.997, vmax=1.003)
ax.set_title(r'$\lambda =$ ' + str(int(wave)) + 'nm')
plt.axis('off')
cbar = plt.colorbar(im, cax=fig.add_axes([0.65, 0.14, 0.25, 0.03], frameon=False),
ticks=[0.997, 1, 1.003], format='%.3f', orientation='horizontal')
cbar.set_label('Normalised Pixel Values')
plt.savefig('PRNUmaps.png')
plt.close()
#normalise to 700nm
fig = plt.figure(figsize=(13, 13))
plt.subplots_adjust(wspace=0.05, hspace=0.15, left=0.01, right=0.99, top=0.95, bottom=0.05)
for i, wave in enumerate(sorted(data.keys())):
tmp = data[wave][ymin:ymax, xmin:xmax].copy()
#Gaussian smooth to enhance structures for plotting
if smooth > 1:
tmp = gaussian_filter(tmp, smooth)
tmp /= norm
ax = plt.subplot(number_of_subplots, 2, i+1)
im = ax.imshow(tmp, interpolation='none', origin='lower', vmin=0.997, vmax=1.003)
ax.set_title(r'$\lambda =$ ' + str(int(wave)) + 'nm')
plt.axis('off')
cbar = plt.colorbar(im, cax=fig.add_axes([0.65, 0.14, 0.25, 0.03], frameon=False),
ticks=[0.997, 1, 1.003], format='%.3f', orientation='horizontal')
cbar.set_label('Normalised Pixel Values')
plt.savefig('PRNUmapsNormed.png')
plt.close()
#loop over data from shortest wavelength to the longest
dat = {}
ylen = 100
xlen = 100
for i, wave in enumerate(sorted(data.keys())):
tmp = data[wave][ymin:ymax, xmin:xmax].copy()
#select sub regions to calculate the PRNU in
prnu = []
ydim, xdim = tmp.shape
samplesx = xdim / xlen
samplesy = ydim / ylen
print samplesx, samplesy
for a in range(samplesy):
for b in range(samplesx):
area = tmp[a*ylen:(a+1)*ylen, b*xlen:(b+1)*xlen]
prn = np.std(sigma_clip(area, 6.)) * 100.
prnu.append(prn)
dat[int(wave)] = prnu
#calculate the mean for each wavelength and std
w = []
mean = []
std = []
for wave in sorted(dat.keys()):
m = np.mean(dat[wave])
s = np.std(dat[wave])
w.append(wave)
mean.append(m)
std.append(s)
print wave, m, s
#polynomial fit to PRNU data
z2 = np.polyfit(w, mean, 1)
p2 = np.poly1d(z2)
z3 = np.polyfit(w, mean, 3)
p3 = np.poly1d(z3)
x = np.linspace(500, 900)
#using a gaussian process
X = np.atleast_2d(w).T
y = np.asarray(mean)
ev = np.atleast_2d(np.linspace(500, 900, 1000)).T #points at which to evaluate
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1, thetaL=1e-3, thetaU=1,
nugget=(np.asarray(std)*3/y)**2, random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(ev, eval_MSE=True)
sigma = np.sqrt(MSE)
#standard error of the mean
sigma3 = 3.*np.asarray(std)/np.sqrt(len(std))
#wavelength dependency plot
plt.title('Wavelength Dependency of the PRNU')
#plt.plot(x, p3(x), 'g--', label='3rd order fit')
plt.plot(ev, y_pred, 'b-', label='Prediction')
plt.fill(np.concatenate([ev, ev[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label=u'95\% confidence interval')
plt.errorbar(w, mean, yerr=sigma3, c='r', fmt='o', label='data, $3\sigma$ errors')
plt.plot(x, p2(x), 'g-', lw=2, label='linear fit')
plt.xlabel(r'Wavelength $\lambda$ [nm]')
plt.ylabel(r'PRNU $[\%]$')
plt.xlim(500, 900)
plt.ylim(0.5, 1.)
plt.legend(shadow=True, fancybox=True, scatterpoints=1, numpoints=1)
plt.savefig('PRNUwave.pdf')
plt.close()
#residual variance after normalization
#loop over data from shortest wavelength to the longest
for j, refwave in enumerate(sorted(data.keys())):
dat = {}
ylen = 100
xlen = 100
for i, wave in enumerate(sorted(data.keys())):
tmp = data[wave][ymin:ymax, xmin:xmax].copy()
ref = data[refwave][ymin:ymax, xmin:xmax].copy()
#select sub regions to calculate the PRNU in
prnu = []
ydim, xdim = tmp.shape
samplesx = xdim / xlen
samplesy = ydim / ylen
for a in range(samplesy):
for b in range(samplesx):
area = tmp[a*ylen:(a+1)*ylen, b*xlen:(b+1)*xlen]
arearef = ref[a*ylen:(a+1)*ylen, b*xlen:(b+1)*xlen]
area /= arearef
prn = np.std(sigma_clip(area, 6.)) * 100.
prnu.append(prn)
dat[int(wave)] = prnu
#calculate the mean for each wavelength and std
w = []
mean = []
std = []
for wave in sorted(dat.keys()):
m = np.mean(dat[wave])
s = np.std(dat[wave])
w.append(wave)
mean.append(m)
std.append(s)
print wave, m, s
#standard error of the mean
sigma3 = 3.*np.asarray(std)/np.sqrt(len(std))
#wavelength dependency plot
plt.subplots_adjust(left=0.13)
plt.title('Residual Dispersion')
plt.errorbar(w, mean, yerr=sigma3, c='r', fmt='o', label='data, $3\sigma$ errors')
plt.xlabel(r'Wavelength $\lambda$ [nm]')
plt.ylabel(r'$\sigma \left ( \frac{{M_{{i}}}}{{M_{{{0:s}}}}} \right )$ $[\%]$'.format(refwave))
plt.xlim(500, 900)
#plt.ylim(-0.05, 0.3)
plt.legend(shadow=True, fancybox=True, scatterpoints=1, numpoints=1)
plt.savefig('PRNUwaveResidual%s.pdf' % refwave)
plt.close()
def spatialAutocorrelation(interpolation='none', smooth=2):
#load data
data = {}
for file in g.glob('*FlatField.fits'):
fh = pf.open(file)
wave = file[1:4]
data[wave] = fh[1].data
fh.close()
for i, wave in enumerate(sorted(data.keys())):
tmp = data[wave][500:1524, 500:1524].copy()
tmp = gaussian_filter(tmp, smooth)
autoc = signal.fftconvolve(tmp, np.flipud(np.fliplr(tmp)), mode='full')
autoc /= np.max(autoc)
autoc *= 100.
fileIO.writeFITS(autoc, 'autocorrelationRealdata%s.fits' % wave, int=False)
#plot images
fig = plt.figure(figsize=(14.5, 6.5))
plt.suptitle(r'Autocorrelation of Flat Field Data $\lambda = %i$ \AA' % int(wave))
plt.suptitle(r'Autocorrelation Interaction $[\%]$', x=0.7, y=0.26)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
i1 = ax1.imshow(tmp, origin='lower', interpolation=interpolation, vmin=0.997, vmax=1.003)
plt.colorbar(i1, ax=ax1, orientation='horizontal', format='%.3f')
i2 = ax2.imshow(autoc, interpolation=interpolation, origin='lower', rasterized=True, vmin=0, vmax=100)
plt.colorbar(i2, ax=ax2, orientation='horizontal')
ax1.set_xlabel('X [pixel]')
ax1.set_ylabel('Y [pixel]')
plt.savefig('SpatialAutocorrelation%s.pdf' % wave)
plt.close()
def powerSpectrum(interpolation='none'):
"""
"""
x, y = np.mgrid[0:32, 0:32]
img = 100 * np.cos(x*np.pi/4.) * np.cos(y*np.pi/4.)
kernel = np.array([[0.0025, 0.01, 0.0025], [0.01, 0.95, 0.01], [0.0025, 0.01, 0.0025]])
img = ndimage.convolve(img.copy(), kernel)
fourierSpectrum2 = np.abs(fftpack.fft2(img))
print np.mean(fourierSpectrum2), np.median(fourierSpectrum2), np.std(fourierSpectrum2), np.max(fourierSpectrum2), np.min(fourierSpectrum2)
fig = plt.figure(figsize=(14.5, 6.5))
plt.suptitle('Fourier Analysis of Flat-field Data')
plt.suptitle('Original Image', x=0.32, y=0.26)
plt.suptitle(r'$\log_{10}$(2D Power Spectrum)', x=0.72, y=0.26)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
i1 = ax1.imshow(img, origin='lower', interpolation=interpolation, rasterized=True)
plt.colorbar(i1, ax=ax1, orientation='horizontal', format='%.1e')
i2 = ax2.imshow(fourierSpectrum2[0:512, 0:512], interpolation=interpolation, origin='lower',
rasterized=True)
plt.colorbar(i2, ax=ax2, orientation='horizontal')
ax1.set_xlabel('X [pixel]')
ax2.set_xlabel('$l_{x}$')
ax2.set_ylim(0, 16)
ax2.set_xlim(0, 16)
ax1.set_ylabel('Y [pixel]')
plt.savefig('FourierSin.pdf')
plt.close()
#load data
data = {}
for file in g.glob('*FlatField.fits'):
fh = pf.open(file)
wave = file[1:4]
data[wave] = fh[1].data
fh.close()
for i, wave in enumerate(sorted(data.keys())):
tmp = data[wave][500:1524, 500:1524].copy()
#tmp = gaussian_filter(tmp, 3.)
fourierSpectrum = np.abs(fftpack.fft2(tmp.astype(np.float32)))
fp = np.log10(fourierSpectrum[0:512, 0:512])
fig = plt.figure(figsize=(14.5, 6.5))
plt.suptitle('Fourier Analysis of Flat-field Data')
plt.suptitle('Original Image', x=0.32, y=0.26)
plt.suptitle(r'$\log_{10}$(2D Power Spectrum)', x=0.72, y=0.26)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
i1 = ax1.imshow(tmp, origin='lower', interpolation=interpolation, rasterized=True,
vmin=0.96, vmax=1.04)
plt.colorbar(i1, ax=ax1, orientation='horizontal', format='%.2f')
i2 = ax2.imshow(fp, interpolation=interpolation, origin='lower', rasterized=True,
vmin=-1., vmax=3.)
plt.colorbar(i2, ax=ax2, orientation='horizontal')
ax1.set_xlabel('X [pixel]')
ax2.set_xlabel('$l_{x}$')
ax2.set_ylim(0, 16)
ax2.set_xlim(0, 16)
ax1.set_ylabel('Y [pixel]')
plt.savefig('PowerSpectrum%s.pdf' % wave)
plt.close()
def normalisedCrosscorrelation(image1, image2):
dist_ncc = np.sum((image1 - np.mean(image1)) * (image2 - np.mean(image2)) ) / \
((image1.size - 1) * np.std(image1) * np.std(image2) )
return dist_ncc
def correlate(xmin=300, xmax=3500, ymin=200, ymax=1600):
#load data
data = {}
for file in g.glob('*FlatField.fits'):
fh = pf.open(file)
wave = file[1:4]
data[wave] = fh[1].data
fh.close()
cr = []
crRandom = []
for i, wave1 in enumerate(sorted(data.keys())):
for j, wave2 in enumerate(sorted(data.keys())):
tmp1 = data[wave1][ymin:ymax, xmin:xmax].copy()
tmp2 = data[wave2][ymin:ymax, xmin:xmax].copy()
# st1 = np.std(tmp1)
# av1 = np.mean(tmp1)
# msk = (tmp1 > st1*1 + av1) & (tmp1 < av1 - st1*1)
# tmp1 = tmp1[~msk]
# tmp2 = tmp2[~msk]
# calculate the difference and its norms
diff = tmp1 - tmp2 # elementwise for scipy arrays
m_norm = np.sum(np.abs(diff)) # Manhattan norm
z_norm = norm(diff.ravel(), 0) # Zero norm
#cor = np.corrcoef(tmp1, tmp2)
dist_ncc = normalisedCrosscorrelation(tmp1, tmp2)
print wave1, wave2
print "Manhattan norm:", m_norm, "/ per pixel:", m_norm/tmp1.size
print "Zero norm:", z_norm, "/ per pixel:", z_norm*1./tmp1.size
print "Normalized cross-correlation:", dist_ncc
cr.append(dist_ncc)
crRandom.append(normalisedCrosscorrelation(np.random.random(tmp1.shape), np.random.random(tmp1.shape)))
#data containers, make a 2D array of the cross-correlations
wx = [x for x in sorted(data.keys())]
wy = [y for y in sorted(data.keys())]
cr = np.asarray(cr).reshape(len(wx), len(wy))
crRandom = np.asarray(crRandom).reshape(len(wx), len(wy))
fig = plt.figure()
plt.title('Normalized Cross-Correlation')
ax = fig.add_subplot(111)
plt.pcolor(cr, cmap='Greys', vmin=0.9, vmax=1.)
plt.colorbar()
#change the labels and move ticks to centre
ticks = np.arange(len(wx)) + 0.5
plt.xticks(ticks, wx)
plt.yticks(ticks, wy)
ax.xaxis.set_ticks_position('none') #remove the tick marks
ax.yaxis.set_ticks_position('none') #remove the tick marks
ax.set_xlabel('Wavelength [nm]')
ax.set_ylabel('Wavelength [nm]')
plt.savefig('Crosscorrelation.pdf')
plt.close()
#plot the data from random arrays
fig = plt.figure()
plt.title('Normalized Cross-Correlation (Random)')
ax = fig.add_subplot(111)
plt.pcolor(crRandom, cmap='Greys')#, vmin=0.9, vmax=1.)
plt.colorbar()
#change the labels and move ticks to centre
ticks = np.arange(len(wx)) + 0.5
plt.xticks(ticks, wx)
plt.yticks(ticks, wy)
ax.xaxis.set_ticks_position('none') #remove the tick marks
ax.yaxis.set_ticks_position('none') #remove the tick marks
ax.set_xlabel('Wavelength [nm]')
ax.set_ylabel('Wavelength [nm]')
plt.savefig('CrosscorrelationRandom.pdf')
plt.close()
def morphPRNUmap():
data = _loadPRNUmaps()
for alpha in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
blended = Image.blend(data['570'], data['660'], alpha)
fig = plt.figure()
plt.title('Image Blending')
ax = fig.add_subplot(111)
i1 = ax.imshow(blended, origin='lower', interpolation='none', rasterized=True)
plt.colorbar(i1, ax=ax, orientation='horizontal', format='%.2f')
plt.savefig('Blending%f.pdf' %alpha)
plt.close()
def mse(x, y):
"""
Mean Square Error (MSE)
"""
return np.linalg.norm(x - y)
def structuralSimilarity(xmin=300, xmax=3500, ymin=200, ymax=1600, smooth=0.):
"""
Adapted from:
http://scikit-image.org/docs/0.9.x/auto_examples/plot_ssim.html#example-plot-ssim-py
"""
data = _loadPRNUmaps()
ref = data['700'][ymin:ymax, xmin:xmax].copy()
if smooth > 1:
ref = gaussian_filter(ref, smooth)
number_of_subplots = math.ceil(len(data.keys())/2.)
fig = plt.figure(figsize=(13, 13))
plt.subplots_adjust(wspace=0.05, hspace=0.15, left=0.01, right=0.99, top=0.95, bottom=0.05)
#loop over data from shortest wavelength to the longest
wavearray = []
msearray = []
ssiarray = []
for i, wave in enumerate(sorted(data.keys())):
tmp = data[wave][ymin:ymax, xmin:xmax].copy()
rows, cols = tmp.shape
#Gaussian smooth to enhance structures for plotting
if smooth > 1:
tmp = gaussian_filter(tmp, smooth)
ms = mse(tmp, ref)
#careful with the win_size, can take up to 16G of memory if set to e.g. 19
ssi = ssim(tmp, ref, dynamic_range=tmp.max() - tmp.min(), win_size=9)
ax = plt.subplot(number_of_subplots, 2, i+1)
im = ax.imshow(tmp, interpolation='none', origin='lower', vmin=0.997, vmax=1.003)
ax.set_title(r'$\lambda =$ ' + str(int(wave)) + 'nm;' + ' MSE: %.2f, SSIM: %.5f' % (ms, ssi))
plt.axis('off')
print wave, ms, ssi
wavearray.append(int(wave))
msearray.append(ms)
ssiarray.append(ssi)
cbar = plt.colorbar(im, cax=fig.add_axes([0.65, 0.14, 0.25, 0.03], frameon=False),
ticks=[0.997, 1, 1.003], format='%.3f', orientation='horizontal')
cbar.set_label('Normalised Pixel Values')
plt.savefig('StructuralSimilarities.png')
plt.close()
fig = plt.figure()
plt.title(r'Mean Squared Error Wrt. $\lambda = 700$nm')
ax = fig.add_subplot(111)
ax.plot(wavearray, msearray, 'bo')
ax.set_xlim(500, 900)
ax.set_ylim(-0.03, 6.)
ax.set_ylabel('MSE')
ax.set_xlabel('Wavelength [nm]')
plt.savefig('MSEwave.pdf')
plt.close()
def interpolateMissing(hide=600, kind='linear', three=False):
"""
Interpolates the hidden PRNU map by using the other PRNU maps and interpolating between them.
The current implementations is really slow because it relies in nested 1D interpolation.
:param hide: which wavelength is used as the target to recover
:type hide: int
:param kind: interpolation type e.g. linear or qaudratic, see scipy interp1d for information
:type kind: str
:param three: whether only three wavelengths were available
:type three: bool
:return: derived PRNU map, target PRNU map
:rtype: list of ndarrays
"""
shide = str(hide)
data = _loadPRNUmaps()
#these are all the other data
rest = []
w = []
for i, wave in enumerate(sorted(data.keys())):
if shide not in wave:
rest.append(data[wave][1150:1300, 1200:1400])
#rest.append(data[wave][200:1600, 300:3500])
w.append(int(wave))
else:
#this is the one we try to recover
hidden = data[shide][1150:1300, 1200:1400]
#hidden = data[shide][200:1600, 300:3500]
if three:
#pick first and last and one from the middle
rest = [rest[0], rest[2], rest[-1]]
w = [w[0], w[2], w[-1]]
rest = np.asarray(rest)
ww, jj, ii = rest.shape
print w
out = np.zeros(hidden.shape)
#this is really slow way of doing this...
for j in range(jj):
for i in range(ii):
f = interp1d(w, rest[:, j, i], kind=kind)
out[j, i] = f(hide)
ratio = out / hidden
print 'Mean, STD, MSE:'
print ratio.mean(), ratio.std(), mse(out, hidden)
#save FITS files
if three:
fileIO.writeFITS(out, 'recoveredThree%i.fits' % hide, int=False)
fileIO.writeFITS(hidden, 'hiddenThree%i.fits' % hide, int=False)
fileIO.writeFITS(ratio, 'residualThree%i.fits' % hide, int=False)
else:
fileIO.writeFITS(out, 'recovered%i.fits' % hide, int=False)
fileIO.writeFITS(hidden, 'hidden%i.fits' % hide, int=False)
fileIO.writeFITS(ratio, 'residual%i.fits' % hide, int=False)
return out, hidden
def predictMissingLinearRegression(hide=600, three=False):
shide = str(hide)
data = _loadPRNUmaps()
#these are all the other data
rest = []
w = []
for i, wave in enumerate(sorted(data.keys())):
if shide not in wave:
#rest.append(data[wave][1150:1300, 1200:1400])
rest.append(data[wave][200:1600, 300:3500])
w.append(int(wave))
else:
#this is the one we try to recover
#hidden = data[shide][1150:1300, 1200:1400]
hidden = data[shide][200:1600, 300:3500]
if three:
#pick first and last and one from the middle
rest = [rest[0], rest[2], rest[-1]]
w = [w[0], w[2], w[-1]]
rest = np.asarray(rest)
print w
w = np.atleast_2d(w).T #needed for the linear regression
#Create linear regression object
ww, jj, ii = rest.shape
out = np.zeros(hidden.shape)
#this is really slow way of doing this...
for j in range(jj):
for i in range(ii):
regr = linear_model.LinearRegression()
regr.fit(w, rest[:, j, i])
out[j, i] = regr.predict(hide)
ratio = out / hidden
print 'Mean, STD, MSE:'
print ratio.mean(), ratio.std(), mse(out, hidden)
#save FITS files
if three:
fileIO.writeFITS(out, 'recoveredThreeLR%i.fits' % hide, int=False)
fileIO.writeFITS(hidden, 'hiddenThreeLR%i.fits' % hide, int=False)
fileIO.writeFITS(ratio, 'residualThreeLR%i.fits' % hide, int=False)
else:
fileIO.writeFITS(out, 'recoveredLR%i.fits' % hide, int=False)
fileIO.writeFITS(hidden, 'hiddenLR%i.fits' % hide, int=False)
fileIO.writeFITS(ratio, 'residualLR%i.fits' % hide, int=False)
return out, hidden
def predictMissingGaussianProcessRegression(hide=600, three=False):
shide = str(hide)
data = _loadPRNUmaps()
#these are all the other data
rest = []
w = []
for i, wave in enumerate(sorted(data.keys())):
if shide not in wave:
rest.append(data[wave][1150:1300, 1200:1400])
#rest.append(data[wave][200:1600, 300:3500])
w.append(int(wave))
else:
#this is the one we try to recover
hidden = data[shide][1150:1300, 1200:1400]
#hidden = data[shide][200:1600, 300:3500]
if three:
#pick first and last and one from the middle
rest = [rest[0], rest[2], rest[-1]]
w = [w[0], w[2], w[-1]]
rest = np.asarray(rest)
print w
w = np.atleast_2d(w).T #needed for the gp
#Create linear regression object
ww, jj, ii = rest.shape
out = np.zeros(hidden.shape)
#this is really slow way of doing this...
for j in range(jj):
for i in range(ii):
# Instanciate a Gaussian Process model
gp = GaussianProcess(regr='linear', corr='linear')
gp.fit(w, rest[:, j, i])
out[j, i] = gp.predict(hide)
ratio = out / hidden
print 'Mean, STD, MSE:'
print ratio.mean(), ratio.std(), mse(out, hidden)
#save FITS files
if three:
fileIO.writeFITS(out, 'recoveredThreeGP%i.fits' % hide, int=False)
fileIO.writeFITS(hidden, 'hiddenThreeGP%i.fits' % hide, int=False)
fileIO.writeFITS(ratio, 'residualThreeGP%i.fits' % hide, int=False)
else:
fileIO.writeFITS(out, 'recoveredGP%i.fits' % hide, int=False)
fileIO.writeFITS(hidden, 'hiddenGP%i.fits' % hide, int=False)
fileIO.writeFITS(ratio, 'residualGP%i.fits' % hide, int=False)
return out, hidden
def plotMissing(target, derived, hide, out='', smooth=2, ratio=True):
"""
Generate plots showing the target and derived PRNU maps and the residual of the two.
"""
#plot simple images
fig = plt.figure(figsize=(15, 5))
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
ax1.set_title('Target')
ax2.set_title('Derived')
if ratio:
ax3.set_title(r'Residual: (D/T)')
else:
ax3.set_title(r'Residual: $|D - T|$')
i1 = ax1.imshow(gaussian_filter(target, smooth),
origin='lower', interpolation='none', rasterized=True, vmin=0.995, vmax=1.005)
plt.colorbar(i1, ax=ax1, orientation='horizontal', format='%.3f', ticks=[0.995, 1, 1.005])
i2 = ax2.imshow(gaussian_filter(derived, smooth),
interpolation='none', origin='lower', rasterized=True, vmin=0.995, vmax=1.005)
plt.colorbar(i2, ax=ax2, orientation='horizontal', format='%.3f', ticks=[0.995, 1, 1.005])
if ratio:
i3 = ax3.imshow(derived/target,
interpolation='none', origin='lower', rasterized=True, vmin=0.995, vmax=1.005)
plt.colorbar(i3, ax=ax3, orientation='horizontal', format='%.3f', ticks=[0.995, 1, 1.005])
else:
i3 = ax3.imshow(np.absolute(derived - target),
interpolation='none', origin='lower', rasterized=True, vmin=0., vmax=1e-3)
plt.colorbar(i3, ax=ax3, orientation='horizontal', format='%.1e', ticks=[0., 5e-4, 1e-3])
plt.savefig('PRNUmapRecovery%s%i.pdf' % (out, hide))
plt.close()
def calculateAreaStatistcs(data, ylen=100, xlen=100):
"""
Calculates a standard deviation in regions of a given size.
"""
st = []
ydim, xdim = data.shape
samplesx = xdim / xlen
samplesy = ydim / ylen
#print samplesx, samplesy
for a in range(samplesy):
for b in range(samplesx):
area = data[a*ylen:(a+1)*ylen, b*xlen:(b+1)*xlen]
s = np.std(sigma_clip(area, 6.)) * 100.
st.append(s)
return st
def plotRecoveredResiduals(xmin=-0.0035, xmax=0.0035, ymax=0.065, nbins=60):
six = g.glob('residualLR*.fits')
three = g.glob('residualThreeLR*.fits')
sixdata = []
#data format: [(wave1, data1), (wave2, data2)...]
for file in six:
#sixdata.append((int(file[10:13]), pf.getdata(file))) #ratio data
sixdata.append((int(file[10:13]),
pf.getdata(file.replace('residual', 'hidden')) -
pf.getdata(file.replace('residual', 'recovered'))))
threedata = []
for file in three:
#threedata.append((int(file[15:18]), pf.getdata(file))) #ratio data
threedata.append((int(file[15:18]),
pf.getdata(file.replace('residual', 'hidden')) -
pf.getdata(file.replace('residual', 'recovered'))))
#simple plot showing some structures
number_of_subplots = math.ceil(len(threedata))
plt.figure(figsize=(13, 13))
plt.subplots_adjust(wspace=0.01, hspace=0.2, left=0.05, right=0.99, top=0.95, bottom=0.05)
bins = np.linspace(xmin, xmax, nbins)
#loop over data from shortest wavelength to the longest
i = 1
pdfs = []
for wave, data in sixdata:
d = data.ravel()
pdf = kde_statsmodels_u(d.astype(np.float64), bins, bandwidth=1.e-4) * 1.e-4 * 1.2
pdfs.append(pdf)
txt = r'Six LEDs: $\lambda =$ ' + str(wave) + 'nm'
ax = plt.subplot(number_of_subplots, 2, i)
ax.set_title(txt)
ax.hist(d, bins=bins, weights=np.ones_like(d)/len(d))
ax.plot(bins, pdf, 'r-', lw=2)
ax.set_xlim(xmin, xmax)
ax.set_ylim(0., ymax)
ax.axvline(x=0, color='g', lw=1.5)
#ax.set_xticks([xmin+2e-3, 1, xmax-2e-3])
plt.setp(ax.get_xticklabels(), visible=False)
xx, locs = plt.xticks()
ll = ['%.3f' % a for a in xx]
plt.gca().xaxis.set_major_formatter(FixedFormatter(ll))
ax.annotate(r'$\sigma(T-D) \sim %.2e$' % d.std(), xycoords='axes fraction', xy=(0.05, 0.85))
i += 2
plt.setp(ax.get_xticklabels(), visible=True)
i = 2
j = 0
for wave, data in threedata:
d = data.ravel()
txt = r'Three LEDs: $\lambda =$ ' + str(wave) + 'nm'
ax = plt.subplot(number_of_subplots, 2, i)
ax.set_title(txt)
ax.hist(d, bins=bins, weights=np.ones_like(d)/len(d))
ax.plot(bins, pdfs[j], 'r-', lw=2)
ax.set_xlim(xmin, xmax)
ax.set_ylim(0., ymax)
ax.axvline(x=0, color='g', lw=1.5)
#ax.set_xticks([xmin+2e-3, 1, xmax-2e-3])
plt.setp(ax.get_xticklabels(), visible=False)
xx, locs = plt.xticks()
ll = ['%.3f' % a for a in xx]
plt.gca().xaxis.set_major_formatter(FixedFormatter(ll))
plt.setp(ax.get_yticklabels(), visible=False)
ax.annotate(r'$\sigma(T-D) \sim %.2e$' % d.std(), xycoords='axes fraction', xy=(0.05, 0.85))
i += 2
j += 1
plt.setp(ax.get_xticklabels(), visible=True)
plt.savefig('RecoveredResidualPixelValues.pdf')
plt.close()
def _lowOrderPolynomialSurfaceRecovery(w, data, wall, degree=4, kind='linear'):
#first remove low order polynomial
coeffs = []
for i, d in enumerate(data):
#meshgrid representing data
x, y = np.mgrid[:d.shape[0], :d.shape[1]]
#fit a polynomial 2d surface to remove the illumination profile
p_init = models.Polynomial2D(degree=degree)
f = fitting.NonLinearLSQFitter()
p = f(p_init, x, y, d)
coeffs.append(p)
nparamas = len(coeffs[0].parameters)
interpolated = []
for a in range(nparamas):
yy = [c.parameters[a] for c in coeffs]
f = interp1d(w, yy, kind=kind)
interpolated.append(np.mean(f(wall))) #mean value of the interpolated
#create mapping between coefficients and the interpolated values and then generate a new model
mapping = dict(zip(p.param_names, interpolated))
model = models.Polynomial2D(degree=degree, **mapping)
#recovered is the model evaluated on the grid
recovered = model(x, y)
return recovered
def _singularValueDecompostionRecover(w, data, degree=2):
#singular value decomposition of the first, assumed to be 545
U545, s545, V545 = np.linalg.svd(data[0], full_matrices=True, compute_uv=True)
l = []
for ww, d in zip(w, data):
l.append(np.dot(U545.T, np.dot(d, V545)))
#numpy array
l = np.asarray(l)
#fit quadratic model to each l
ww, jj, ii = l.shape
lpred = np.zeros(data[0].shape)
#this is really slow way of doing this...
for j in range(jj):
for i in range(ii):
p_init = models.Polynomial1D(degree=degree)
#p_init = models.Legendre1D(degree=degree)
#p_init = models.Chebyshev1D(degree=degree)
f = fitting.NonLinearLSQFitter()
p = f(p_init, w, l[:, j, i])
lpred[j, i] = np.mean(p(w))
#modelled flat
recovered = np.dot(U545, np.dot(lpred, V545.T))
return recovered
def recoverMaster(smooth=2, degree=3, fitlow=False, limits=(600, 700, 600, 700)):
"""
Recover a master PRNU using low order surface fitting and Singular Value Decomposition.
"""
#master flat is the average of the independent PRNU maps
master = pf.getdata('MasterFlat.fits')[limits[2]:limits[3], limits[0]:limits[1]]
if fitlow:
#meshgrid representing data
x, y = np.mgrid[:master.shape[0], :master.shape[1]]
#fit a polynomial 2d surface to remove the illumination profile
p_init = models.Polynomial2D(degree=degree)
f = fitting.NonLinearLSQFitter()
p = f(p_init, x, y, master)
#normalize data and save it to res list
master /= p(x, y)
#wavelengths to use in the analysis
three = ['545', '700', '850']
four = ['545', '600', '700', '850']
five = ['545', '570', '660', '800', '850']
six = ['545', '570', '600', '660', '800', '850']
LEDsets = ((three, 28), (four, 21), (five, 17), (six, 14))
#bins
bins = np.linspace(-0.001, 0.001, 50)
#figure definitions
fig = plt.figure(figsize=(13, 13))
plt.subplots_adjust(wspace=0.01, hspace=0.2, left=0.05, right=0.99, top=0.95, bottom=0.05)
i = len(LEDsets)
run = 1
sigmas = []
for LEDs, limit in LEDsets:
deff = []
for l in LEDs:
t = pf.getdata('f%sFlatField.L%i.fits' % (l, limit))[limits[2]:limits[3], limits[0]:limits[1]]
if fitlow:
#meshgrid representing data
x, y = np.mgrid[:t.shape[0], :t.shape[1]]
#fit a polynomial 2d surface to remove the illumination profile
p_init = models.Polynomial2D(degree=degree)
f = fitting.NonLinearLSQFitter()
p = f(p_init, x, y, t)
#normalize data and save it to res list
t /= p(x, y)
deff.append(t)
print 'Trying to recover a master PRNU map with', LEDs
#build a PRNU model using SVD
recovered = _singularValueDecompostionRecover([int(w) for w in LEDs], deff, degree=len(LEDs)-1)
#difference between the model and the truth and simple statistics
ratio = (recovered - master) / float(len(LEDs)-1.)
sigma = ratio.std()
print sigma, ratio.mean(), ratio.max(), ratio.min()
sigmas.append(sigma)
#plot
ax1 = fig.add_subplot(i, 4, run)
ax2 = fig.add_subplot(i, 4, run+1)
ax3 = fig.add_subplot(i, 4, run+2)
ax4 = fig.add_subplot(i, 4, run+3)
ax1.set_title('Target')
ax2.set_title('Derived')
ax3.set_title(r'Residual: D-T')
i1 = ax1.imshow(gaussian_filter(master, smooth),
origin='lower', interpolation='none', rasterized=True, vmin=0.997, vmax=1.003)
i2 = ax2.imshow(gaussian_filter(recovered, smooth),
interpolation='none', origin='lower', rasterized=True, vmin=0.997, vmax=1.003)
i3 = ax3.imshow(gaussian_filter(ratio, smooth),
interpolation='none', origin='lower', rasterized=True, vmin=-0.0003, vmax=0.0003)
txt = r'LEDs: ' + str([int(w) for w in LEDs])
d = ratio.flatten()
ax4.hist(d, bins=bins, weights=np.ones_like(d)/len(d))
ax4.set_xlim(-0.001, 0.001)
ax4.set_ylim(.0, 0.2)
ax4.set_xticks([-0.0005, 0, 0.0005])
ax4.set_title(txt, fontsize=10)
ax4.annotate(r'$\sigma(D-T) \sim %.2e$' % sigma, xycoords='axes fraction', xy=(0.05, 0.85))
plt.setp(ax1.get_xticklabels(), visible=False)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.setp(ax3.get_xticklabels(), visible=False)
plt.setp(ax4.get_xticklabels(), visible=False)
plt.setp(ax2.get_yticklabels(), visible=False)
plt.setp(ax3.get_yticklabels(), visible=False)
plt.setp(ax4.get_yticklabels(), visible=False)
run += 4
plt.setp(ax1.get_xticklabels(), visible=True)
plt.setp(ax2.get_xticklabels(), visible=True)
plt.setp(ax3.get_xticklabels(), visible=True)
plt.setp(ax4.get_xticklabels(), visible=True)
#plt.colorbar(i1, ax=ax1, orientation='horizontal', format='%.3f', ticks=[0.997, 1, 1.003])
#plt.colorbar(i2, ax=ax2, orientation='horizontal', format='%.3f', ticks=[0.997, 1, 1.003])
#plt.colorbar(i3, ax=ax3, orientation='horizontal', format='%.4f', ticks=[-0.0005, 0, 0.0005])
plt.savefig('PRNUMasterRecovery.pdf')
plt.close()
#requirement plot
plt.figure()
plt.subplots_adjust(wspace=0.01, hspace=0.2, left=0.1, right=0.99, top=0.95, bottom=0.1)
plt.plot([3,4,5,6], sigmas, 'bo', label='Data')
plt.axhline(y=2e-4, color='r', label='Requirement')
plt.xlabel('Number of LEDs')
plt.ylabel(r'$\sigma(D-T)$')
plt.xlim(2.9, 6.1)
plt.ylim(5e-5, 5e-4)
plt.xticks([3, 4, 5, 6])
plt.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1)
plt.savefig('sigmaVsLEDs.pdf')
plt.close()
def recoverMasterOnlyLowOrder(degree=5, smooth=2):
"""
Recover a master PRNU using Singular Value Decomposition.
"""
datad = _loadPRNUmaps()
data = []
w = []
for i, wave in enumerate(sorted(datad.keys())):
data.append(datad[wave][1200:1250, 1200:1250])
w.append(int(wave))
data = np.asarray(data)
#master flat is the average of the independent PRNU maps
master = np.mean(data, axis=0)
three = [0, 4, 6]
four = [0, 1, 4, 6]
five = [0, 1, 3, 4, 6]
six = [0, 1, 2, 4, 5, 6]
LEDsets = (three, four, five, six)
#figure definitions
fig = plt.figure(figsize=(13, 13))
plt.subplots_adjust(wspace=0.01, hspace=0.2, left=0.05, right=0.99, top=0.95, bottom=0.05)
i = len(LEDsets)
run = 1
sigmas = []
for LEDs in LEDsets:
weff = []
deff = []
for l in LEDs:
weff.append(w[l])
deff.append(data[l])
print 'Trying to recover the low order part of a master PRNU map with', weff
recovered = _lowOrderPolynomialSurfaceRecovery(weff, deff, w, degree=degree)
x, y = np.mgrid[:master.shape[0], :master.shape[1]]
p_init = models.Polynomial2D(degree=degree)
f = fitting.NonLinearLSQFitter()
p = f(p_init, x, y, master)
master = p(x, y)
ratio = recovered / master
sigma = ratio.std()
print sigma
sigmas.append(sigma)
#plot
ax1 = fig.add_subplot(i, 4, run)
ax2 = fig.add_subplot(i, 4, run+1)
ax3 = fig.add_subplot(i, 4, run+2)
ax4 = fig.add_subplot(i, 4, run+3)
ax1.set_title('Target')
ax2.set_title('Derived')
ax3.set_title(r'Residual: (D/T)')
i1 = ax1.imshow(gaussian_filter(master, smooth),
origin='lower', interpolation='none', rasterized=True, vmin=0.999, vmax=1.001)
i2 = ax2.imshow(gaussian_filter(recovered, smooth),
interpolation='none', origin='lower', rasterized=True, vmin=0.999, vmax=1.001)
i3 = ax3.imshow(ratio,
interpolation='none', origin='lower', rasterized=True, vmin=0.99999, vmax=1.00001)
txt = r'LEDs:' + str(weff)
ax4.hist(ratio.flatten(), bins=20)
ax4.set_title(txt)
ax4.annotate(r'$\sigma(D/T) \sim %.2e$' % sigma, xycoords='axes fraction', xy=(0.05, 0.85))
run += 4
plt.colorbar(i1, ax=ax1, orientation='horizontal', format='%.4f', ticks=[0.9995, 1, 1.0005])
plt.colorbar(i2, ax=ax2, orientation='horizontal', format='%.4f', ticks=[0.9995, 1, 1.0005])
plt.colorbar(i3, ax=ax3, orientation='horizontal', format='%.3f', ticks=[0.99995, 1, 1.00005])
plt.savefig('PRNUMasterRecoveryLowOrder.pdf')
plt.close()
def simpleTestforRecovery(limits=(200, 3800, 200, 3800)):
"""
Just averaged files.
"""
#master flat is the average of the independent PRNU maps
master = pf.getdata('MasterFlat.fits')[limits[2]:limits[3], limits[0]:limits[1]]
# files = ['f545FlatField.L28.fits', 'f570FlatField.L28.fits', 'f600FlatField.L28.fits', 'f660FlatField.L28.fits',
# 'f700FlatField.L28.fits', 'f800FlatField.L28.fits', 'f850FlatField.L28.fits']
# master = np.asarray([pf.getdata(f)[limits[2]:limits[3], limits[0]:limits[1]] for f in files])
# master = np.mean(master, axis=0)
#wavelengths to use in the analysis
three = ['545', '700', '850']
four = ['545', '600', '700', '850']
five = ['545', '570', '660', '800', '850']
six = ['545', '570', '600', '660', '800', '850']
LEDsets = ((three, 28), (four, 21), (five, 17), (six, 14))
for LEDs, limit in LEDsets:
d = []
for l in LEDs:
t = pf.getdata('f%sFlatField.L%i.fits' % (l, limit))[limits[2]:limits[3], limits[0]:limits[1]]
d.append(t)
sigma1 = np.std(master - np.mean(d, axis=0))
sigma2 = np.std(master - np.median(d, axis=0))
print LEDs, sigma1, sigma2
def _loadPRNUmaps(id='*FlatField.fits'):
#load data
data = {}
for file in g.glob(id):
fh = pf.open(file)
wave = file[1:4]
data[wave] = fh[1].data
fh.close()
return data
def generateMasterFlat():
files = g.glob('nominal/*FlatField.fits')
data = [pf.getdata(file).astype(np.float64) for file in files]
avg = np.mean(np.asarray(data), axis=0).astype(np.float64)
fileIO.writeFITS(avg, 'MasterFlat.fits', int=False)
if __name__ == '__main__':
#generate flats from all available data
#flats()
#generate flats with limited inputs, needed to have matched SNR in the combined PRNU maps
# for limit in [28, 21, 17, 14]:
# flatsLimit(limit=limit)
#generate master flat, done from all input files
#generateMasterFlat()
#plot generated flats
#plot()
#spatial autocorrelations
#spatialAutocorrelation()
#power spectrum analysis
#powerSpectrum()
#normalised cross-correlation of the PRNU maps
#correlate()
#try morphing flats to other wavelengths
#morphPRNUmap()
#structural parameters
#structuralSimilarity()
# #try to recover a PRNU map using information at other wavelengths
# #using linear interpolation
# for hide in [570, 600, 660, 700, 800]:
# print 'Hiding %inm, trying to recover using linear interpolation with' % hide
# d, t = interpolateMissing(hide=hide)
# plotMissing(t, d, hide)
# d, t = interpolateMissing(hide=hide, three=True)
# plotMissing(t, d, hide, out='Three')
#try to recover a PRNU map using information at other wavelengths
#using linear regression
# for hide in [570, 600, 660, 700, 800]:
# print 'Hiding %inm, trying to recover using linear regression with' % hide
# d, t = predictMissingLinearRegression(hide=hide)
# plotMissing(t, d, hide, out='LR')
# d, t = predictMissingLinearRegression(hide=hide, three=True)
# plotMissing(t, d, hide, out='ThreeLR')
# #try to recover a PRNU map using information at other wavelengths
# #using gaussian process
# for hide in [570, 600, 660, 700, 800]:
# print 'Hiding %inm, trying to recover using gaussian process with' % hide
# d, t = predictMissingGaussianProcessRegression(hide=hide)
# plotMissing(t, d, hide, out='GP')
# d, t = predictMissingGaussianProcessRegression(hide=hide, three=True)
# plotMissing(t, d, hide, out='ThreeGP')
#plotMissing(pf.getdata('small/recovered700.fits'), pf.getdata('small/hidden700.fits'), 700, out='TEST')
#plotRecoveredResiduals()
#simple low order polynomial recovery
#recoverMasterOnlyLowOrder()
#try to recover the master PRNU map
recoverMaster()
#simpleTestforRecovery()
|
sniemi/EuclidVisibleInstrument
|
analysis/analyseGroundFlatsLambda.py
|
Python
|
bsd-2-clause
| 53,358
|
[
"Gaussian"
] |
4da4f908ef9fd935d834144def1bdc16618e6d53c933c897b67b25f31c14329c
|
#! /usr/bin/env python
########################################################################
# $HeadURL$
########################################################################
"""
Change file owner's group
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from COMDIRAC.Interfaces import ConfigCache
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
from DIRAC import S_OK
class Params(object):
def __init__(self):
self.recursive = False
def setRecursive(self, opt):
self.recursive = True
return S_OK()
def getRecursive(self):
return self.recursive
@Script()
def main():
params = Params()
Script.setUsageMessage(
"\n".join(
[
__doc__.split("\n")[1],
"Usage:",
" %s [options] group Path..." % Script.scriptName,
"Arguments:",
" group: new group name",
" Path: path to file",
"",
"Examples:",
" $ dchown atsareg ././some_lfn_file",
" $ dchown -R pgay ./",
]
)
)
Script.registerSwitch("R", "recursive", "recursive", params.setRecursive)
configCache = ConfigCache()
Script.parseCommandLine(ignoreErrors=True)
configCache.cacheConfig()
args = Script.getPositionalArgs()
import DIRAC
from DIRAC import gLogger
from COMDIRAC.Interfaces import DSession
from COMDIRAC.Interfaces import DCatalog
from COMDIRAC.Interfaces import pathFromArgument
session = DSession()
catalog = DCatalog()
if len(args) < 2:
print("Error: not enough arguments provided\n%s:" % Script.scriptName)
Script.showHelp()
DIRAC.exit(-1)
group = args[0]
lfns = []
for path in args[1:]:
lfns.append(pathFromArgument(session, path))
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
fc = FileCatalog()
for lfn in lfns:
try:
pathDict = {lfn: group}
result = fc.changePathGroup(pathDict, params.recursive)
if not result["OK"]:
gLogger.error("Error:", result["Message"])
break
if lfn in result["Value"]["Failed"]:
gLogger.error("Error:", result["Value"]["Failed"][lfn])
except Exception as x:
print("Exception:", str(x))
if __name__ == "__main__":
main()
|
DIRACGrid/COMDIRAC
|
src/COMDIRAC/Interfaces/scripts/dchgrp.py
|
Python
|
gpl-3.0
| 2,531
|
[
"DIRAC"
] |
e53552435ac1444cda68b4489399d04bc0d6fed85989832b823e7a264767d335
|
# -*- coding: utf-8 -*-
# Copyright 2011 Viewfinder Inc. All Rights Reserved.
"""Tests DB indexing and querying.
"""
__author__ = 'spencer@emailscrubbed.com (Spencer Kimball)'
import logging
import platform
import random
import time
import unittest
from functools import partial
from tornado import escape
from viewfinder.backend.base import util
from viewfinder.backend.base.testing import async_test
from viewfinder.backend.db.contact import Contact
from viewfinder.backend.db.db_client import DBKey
from viewfinder.backend.db.episode import Episode
from viewfinder.backend.db.follower import Follower
from viewfinder.backend.db.photo import Photo
from viewfinder.backend.db.schema import Location, Placemark
from viewfinder.backend.db.user import User
from viewfinder.backend.db.vf_schema import USER
from base_test import DBBaseTestCase
class IndexingTestCase(DBBaseTestCase):
@async_test
def testIndexing(self):
"""Tests indexing of multiple objects with overlapping field values.
Creates 100 users, then queries for specific items.
"""
given_names = ['Spencer', 'Peter', 'Brian', 'Chris']
family_names = ['Kimball', 'Mattis', 'McGinnis', 'Schoenbohm']
emails = ['spencer.kimball@emailscrubbed.com', 'spencer@goviewfinder.com',
'petermattis@emailscrubbed.com', 'peter.mattis@gmail.com', 'peter@goviewfinder.com',
'brian.mcginnis@emailscrubbed.com', 'brian@goviewfinder.com',
'chris.schoenbohm@emailscrubbed.com', 'chris@goviewfinder.com']
num_users = 100
def _QueryAndVerify(users, barrier_cb, col, value):
def _Verify(q_users):
logging.debug('querying for %s=%s yielded %d matches' % (col, value, len(q_users)))
for u in q_users:
# Exclude users created by base class.
if u.user_id not in [self._user.user_id, self._user2.user_id]:
self.assertEqual(getattr(users[u.user_id], col), value)
barrier_cb()
User.IndexQuery(self._client, ('user.%s={v}' % col, {'v': value}),
col_names=None, callback=_Verify)
def _OnCreateUsers(user_list):
users = dict([(u.user_id, u) for u in user_list])
with util.Barrier(self.stop) as b:
[_QueryAndVerify(users, b.Callback(), 'given_name', value) for value in given_names]
[_QueryAndVerify(users, b.Callback(), 'family_name', value) for value in family_names]
[_QueryAndVerify(users, b.Callback(), 'email', value) for value in emails]
with util.ArrayBarrier(_OnCreateUsers) as b:
for i in xrange(num_users):
kwargs = {'user_id': i + 10,
'given_name': random.choice(given_names),
'family_name': random.choice(family_names),
'email': random.choice(emails), }
user = User.CreateFromKeywords(**kwargs)
user.Update(self._client, partial(b.Callback(), user))
def testIndexQueryForNonExistingItem(self):
"""IndexQuery should not return a result list with any None elements."""
# Create a user:
user = User.CreateFromKeywords(user_id=1, given_name='Mike', family_name='Purtell', email='mike@time.com')
self._RunAsync(user.Update, self._client)
# Should return one non-None item.
results = self._RunAsync(User.IndexQuery, self._client, ('user.given_name={v}', {'v': 'Mike'}), col_names=None)
self.assertEqual(len(results), 1)
self.assertIsNotNone(results[0])
# Delete the item that the index references.
self._RunAsync(self._client.DeleteItem, table=USER, key=user.GetKey())
# IndexQuery again with same query to see that a zero length list is returned.
results = self._RunAsync(User.IndexQuery, self._client, ('user.given_name={v}', {'v': 'Mike'}), col_names=None)
self.assertEqual(len(results), 0)
def testStringSetIndexing(self):
"""Tests indexing of items in string set columns."""
emails = ['spencer.kimball@emailscrubbed.com', 'spencer@goviewfinder.com',
'petermattis@emailscrubbed.com', 'peter.mattis@gmail.com', 'peter@goviewfinder.com',
'brian.mcginnis@emailscrubbed.com', 'brian@goviewfinder.com',
'chris.schoenbohm@emailscrubbed.com', 'chris@goviewfinder.com']
# Create a bunch of contacts with one or two identities.
timestamp = util.GetCurrentTimestamp()
for email in emails:
for email2 in emails:
contact = Contact.CreateFromKeywords(1,
[('Email:' + email, None), ('Email:' + email2, None)],
timestamp,
Contact.GMAIL)
self._RunAsync(contact.Update, self._client)
for email in emails:
q_contacts = self._RunAsync(Contact.IndexQuery,
self._client,
('contact.identities={i}', {'i': 'Email:' + email}),
col_names=None)
logging.debug('querying for %s=%s yielded %d matches' % ('identities', 'Email:' + email, len(q_contacts)))
for contact in q_contacts:
self.assertTrue('Email:' + email in contact.identities)
self.assertEqual(len(q_contacts), len(emails) * 2 - 1)
@async_test
def testRealTimeIndexing(self):
"""Tests index updates in real-time."""
def _QueryAndVerify(p, barrier_cb, query, is_in):
def _Verify(keys):
ids = [key.hash_key for key in keys]
if is_in:
self.assertTrue(p.photo_id in ids)
else:
self.assertFalse(p.photo_id in ids)
barrier_cb()
Photo.IndexQueryKeys(self._client, query, callback=_Verify)
def _OnUpdate(p):
with util.Barrier(self.stop) as b:
_QueryAndVerify(p, b.Callback(), ('photo.caption={c}', {'c': 'Class'}), False)
_QueryAndVerify(p, b.Callback(), ('photo.caption={c}', {'c': 'reunion'}), False)
_QueryAndVerify(p, b.Callback(), ('photo.caption={c}', {'c': '1992'}), True)
_QueryAndVerify(p, b.Callback(), ('photo.caption={c}', {'c': 'culumbia'}), True)
def _Update(p):
p.caption = 'Columbia High School c.o. 1992'
p.Update(self._client, callback=partial(_OnUpdate, p))
photo_id = Photo.ConstructPhotoId(time.time(), 1, 1)
p = self.UpdateDBObject(Photo, user_id=self._user.user_id,
photo_id=photo_id, caption='Class of 1992 reunion')
with util.Barrier(partial(_Update, p)) as b:
_QueryAndVerify(p, b.Callback(), ('photo.caption={c}', {'c': 'reunion'}), True)
_QueryAndVerify(p, b.Callback(), ('photo.caption={c}', {'c': '1992'}), True)
@async_test
@unittest.skipIf(platform.python_implementation() == 'PyPy', 'metaphone queries broken on pypy')
def testMetaphoneQueries(self):
"""Tests metaphone queries."""
def _QueryAndVerify(p, barrier_cb, query_expr, match):
def _Verify(keys):
ids = [key.hash_key for key in keys]
if match:
self.assertTrue(p.photo_id in ids)
else:
self.assertFalse(ids)
barrier_cb()
Photo.IndexQueryKeys(self._client, query_expr, callback=_Verify)
photo_id = Photo.ConstructPhotoId(time.time(), 1, 1)
p = self.UpdateDBObject(Photo, user_id=self._user.user_id,
photo_id=photo_id, caption='Summer in East Hampton')
with util.Barrier(self.stop) as b:
_QueryAndVerify(p, b.Callback(), ('photo.caption={c}', {'c': 'summer'}), True)
_QueryAndVerify(p, b.Callback(), ('photo.caption={c}', {'c': 'sumer'}), True)
_QueryAndVerify(p, b.Callback(), ('photo.caption={c}', {'c': 'summa'}), False)
_QueryAndVerify(p, b.Callback(), ('photo.caption={c}', {'c': 'sum'}), False)
_QueryAndVerify(p, b.Callback(), ('photo.caption={c}', {'c': 'hamton'}), False)
_QueryAndVerify(p, b.Callback(), ('photo.caption={c}', {'c': 'hamptons'}), True)
_QueryAndVerify(p, b.Callback(), ('photo.caption={c}', {'c': 'hammpton'}), True)
# Disabled because we removed location secondary index from Episode table.
@async_test
def disabled_t_estLocationQueries(self):
"""Tests location queries."""
def _QueryAndVerify(episode_ids, barrier_cb, loc_search, matches):
def _Verify(keys):
ids = [key.hash_key for key in keys]
self.assertEqual(len(ids), len(matches))
[self.assertTrue(episode_ids[m] in ids) for m in matches]
barrier_cb()
Episode.IndexQueryKeys(self._client, 'episode.location="%f,%f,%f"' % \
(loc_search[0], loc_search[1], loc_search[2]), callback=_Verify)
def _OnCreate(locations, episodes):
with util.Barrier(self.stop) as b:
episode_ids = dict([(v.title, v.episode_id) for v in episodes])
# Exact search.
_QueryAndVerify(episode_ids, b.Callback(),
Location(40.727657, -73.994583, 30), ['kimball ph'])
_QueryAndVerify(episode_ids, b.Callback(),
Location(41.044048, -71.950622, 100), ['surf lodge'])
# A super-small search area, centered in middle of Great Jones Alley.
_QueryAndVerify(episode_ids, b.Callback(),
Location(40.727267, -73.994443, 10), [])
# Widen the search area to 50m, centered in middle of Great Jones Alley.
_QueryAndVerify(episode_ids, b.Callback(),
Location(40.727267, -73.994443, 50), ['kimball ph', 'bond st sushi'])
# Union square with a 2km radius.
_QueryAndVerify(episode_ids, b.Callback(),
Location(40.736462, -73.990517, 2000),
['kimball ph', 'bond st sushi', 'viewfinder', 'soho house', 'google'])
# The Dominican Republic.
_QueryAndVerify(episode_ids, b.Callback(),
Location(19.041349, -70.427856, 75000), ['casa kimball'])
# The Caribbean.
_QueryAndVerify(episode_ids, b.Callback(),
Location(22.593726, -76.662598, 800000), ['casa kimball', 'atlantis'])
# Long Island.
_QueryAndVerify(episode_ids, b.Callback(), Location(40.989228, -72.144470, 40000),
['kimball east', 'surf lodge'])
locations = {'kimball ph': Location(40.727657, -73.994583, 50.0),
'bond st sushi': Location(40.726901, -73.994358, 50.0),
'viewfinder': Location(40.720169, -73.998756, 200.0),
'soho house': Location(40.740616, -74.005880, 200.0),
'google': Location(40.740974, -74.002115, 500.0),
'kimball east': Location(41.034184, -72.210603, 50.0),
'surf lodge': Location(41.044048, -71.950622, 100.0),
'casa kimball': Location(19.636848, -69.896602, 100.0),
'atlantis': Location(25.086104, -77.323065, 1000.0)}
with util.ArrayBarrier(partial(_OnCreate, locations)) as b:
device_episode_id = 0
for place, location in locations.items():
device_episode_id += 1
timestamp = time.time()
episode_id = Episode.ConstructEpisodeId(timestamp, 1, device_episode_id)
episode = Episode.CreateFromKeywords(timestamp=timestamp,
episode_id=episode_id, user_id=self._user.user_id,
viewpoint_id=self._user.private_vp_id,
publish_timestamp=timestamp,
title=place, location=location)
episode.Update(self._client, b.Callback())
# Disabled because we removed placemark secondary index from Episode table.
@async_test
def disabled_t_estPlacemarkQueries(self):
"""Tests placemark queries."""
def _QueryAndVerify(episode_ids, barrier_cb, search, matches):
def _Verify(keys):
ids = [key.hash_key for key in keys]
self.assertEqual(len(ids), len(matches))
[self.assertTrue(episode_ids[m] in ids) for m in matches]
barrier_cb()
Episode.IndexQueryKeys(self._client, ('episode.placemark={s}', {'s': search}), callback=_Verify)
def _OnCreate(locations, episodes):
with util.Barrier(self.stop) as b:
episode_ids = dict([(v.title, v.episode_id) for v in episodes])
_QueryAndVerify(episode_ids, b.Callback(), 'Broadway', ['kimball ph'])
_QueryAndVerify(episode_ids, b.Callback(), '682 Broadway', ['kimball ph'])
_QueryAndVerify(episode_ids, b.Callback(), 'Broadway 682', [])
_QueryAndVerify(episode_ids, b.Callback(), 'new york, ny, united states',
['kimball ph', 'bond st sushi', 'viewfinder', 'soho house', 'google'])
_QueryAndVerify(episode_ids, b.Callback(), 'new york, ny',
['kimball ph', 'bond st sushi', 'viewfinder', 'soho house', 'google'])
_QueryAndVerify(episode_ids, b.Callback(), 'NY, United States',
['kimball ph', 'bond st sushi', 'viewfinder', 'soho house', 'google',
'kimball east', 'surf lodge'])
_QueryAndVerify(episode_ids, b.Callback(), 'United States',
['kimball ph', 'bond st sushi', 'viewfinder', 'soho house', 'google',
'kimball east', 'surf lodge'])
_QueryAndVerify(episode_ids, b.Callback(), 'Bahamas', ['atlantis'])
_QueryAndVerify(episode_ids, b.Callback(), 'Dominican', ['casa kimball'])
_QueryAndVerify(episode_ids, b.Callback(), 'Dominican Republic', ['casa kimball'])
_QueryAndVerify(episode_ids, b.Callback(), 'Cabrera', ['casa kimball'])
_QueryAndVerify(episode_ids, b.Callback(), 'DR', ['casa kimball'])
locations = {'kimball ph': Placemark('US', 'United States', 'NY', 'New York',
'NoHo', 'Broadway', '682'),
'bond st sushi': Placemark('US', 'United States', 'NY', 'New York',
'NoHo', 'Bond St', '6'),
'viewfinder': Placemark('US', 'United States', 'NY', 'New York',
'SoHo', 'Grand St', '154'),
'soho house': Placemark('US', 'United States', 'NY', 'New York',
'Meatpacking District', '9th Avenue', '29-35'),
'google': Placemark('US', 'United States', 'NY', 'New York',
'Chelsea', '8th Avenue', '111'),
'kimball east': Placemark('US', 'United States', 'NY', 'East Hampton',
'Northwest Harbor', 'Milina', '35'),
'surf lodge': Placemark('US', 'United States', 'NY', 'Montauk',
'', 'Edgemere St', '183'),
'casa kimball': Placemark('DR', 'Dominican Republic', 'Maria Trinidad Sanchez',
'Cabrera', 'Orchid Bay Estates', '', '5-6'),
'atlantis': Placemark('BS', 'Bahamas', '', 'Paradise Island', '', '', '')}
with util.ArrayBarrier(partial(_OnCreate, locations)) as b:
device_episode_id = 0
for place, placemark in locations.items():
device_episode_id += 1
timestamp = time.time()
episode_id = Episode.ConstructEpisodeId(timestamp, 1, device_episode_id)
episode = Episode.CreateFromKeywords(timestamp=timestamp,
episode_id=episode_id, user_id=self._user.user_id,
viewpoint_id=self._user.private_vp_id,
publish_timestamp=timestamp,
title=place, placemark=placemark)
episode.Update(self._client, b.Callback())
@async_test
def testQuerying(self):
"""Tests querying of User objects."""
def _QueryAndVerify(barrier_cb, query_expr, id_set):
def _Verify(keys):
ids = [key.hash_key for key in keys]
if not id_set:
self.assertFalse(ids)
else:
[self.assertTrue(i in id_set) for i in ids]
barrier_cb()
User.IndexQueryKeys(self._client, query_expr,
callback=_Verify)
# Add given & family names to users created by base class.
spencer = self.UpdateDBObject(User, user_id=self._user.user_id, given_name='Spencer', family_name='Kimball')
andrew = self.UpdateDBObject(User, user_id=self._user2.user_id, given_name='Peter', family_name='Mattis')
s_id = set([spencer.user_id])
a_id = set([andrew.user_id])
both_ids = s_id.union(a_id)
no_ids = set([])
with util.Barrier(self.stop) as b:
_QueryAndVerify(b.Callback(), ('user.given_name={sp}', {'sp': 'spencer'}), s_id)
_QueryAndVerify(b.Callback(), ('user.given_name={sp}', {'sp': '\'spencer\''}), s_id)
_QueryAndVerify(b.Callback(), ('user.given_name={sp}', {'sp': '"spencer"'}), s_id)
_QueryAndVerify(b.Callback(), ('(user.given_name={sp})', {'sp': 'spencer'}), s_id)
_QueryAndVerify(b.Callback(), ('user.family_name={k}', {'k': 'kimball'}), both_ids)
_QueryAndVerify(b.Callback(), ('user.given_name={sp} & user.given_name={pe}',
{'sp': 'spencer', 'pe': 'peter'}), no_ids)
_QueryAndVerify(b.Callback(), ('(user.given_name={sp} & user.given_name={pe})',
{'sp': 'spencer', 'pe': 'peter'}), no_ids)
_QueryAndVerify(b.Callback(), ('user.given_name={sp} - user.given_name={pe}',
{'sp': 'spencer', 'pe': 'peter'}), s_id)
_QueryAndVerify(b.Callback(), ('user.given_name={sp} | user.given_name={pe}',
{'sp': 'spencer', 'pe': 'peter'}), both_ids)
_QueryAndVerify(b.Callback(), ('user.given_name={sp} - user.family_name={k}',
{'sp': 'spencer', 'k': 'kimball'}), no_ids)
_QueryAndVerify(b.Callback(), ('user.email={sp}', {'sp': 'spencer'}), s_id)
_QueryAndVerify(b.Callback(), ('user.email={sp} & user.email={gm}', {'sp': 'spencer', 'gm': 'gmail'}), s_id)
_QueryAndVerify(b.Callback(), ('user.email={sp} & user.email={gm} & user.email=com',
{'sp': 'spencer', 'gm': 'gmail', 'c': 'com'}), s_id)
_QueryAndVerify(b.Callback(), ('user.email={gm} & user.email=com - user.email=spencer',
{'gm': 'gmail', 'c': 'com', 'sp': 'spencer'}), no_ids)
_QueryAndVerify(b.Callback(), ('user.email={c}', {'c': 'com'}), both_ids)
_QueryAndVerify(b.Callback(), ('user.email={em}', {'em': '"spencer.kimball@emailscrubbed.com"'}), s_id)
_QueryAndVerify(b.Callback(), ('user.given_name={sp} | user.given_name={pe} - user.email={gm}',
{'sp': 'spencer', 'pe': 'peter', 'gm': 'gmail'}), both_ids)
_QueryAndVerify(b.Callback(), ('(user.given_name={sp} | user.given_name={pe}) - user.email={gm}',
{'sp': 'spencer', 'pe': 'peter', 'gm': 'gmail'}), a_id)
@async_test
def testRangeSupport(self):
"""Tests start_key, end_key, and limit support in IndexQueryKeys
and IndexQuery.
"""
name = 'Rumpelstiltskin'
vp_id = 'v0'
def _QueryAndVerify(cls, barrier_cb, query_expr, start_key, end_key, limit):
def _FindIndex(list, db_key):
for i, item in enumerate(list):
if item.GetKey() == db_key:
return i
return -1
def _Verify(results):
all_items, some_items, some_item_keys = results
# Ensure that IndexQuery and IndexQueryKeys return consistent results.
assert len(some_items) == len(some_item_keys)
assert [u.GetKey() for u in some_items] == some_item_keys
# Ensure that right subset was returned.
start_index = _FindIndex(all_items, start_key) + 1 if start_key is not None else 0
end_index = _FindIndex(all_items, end_key) if end_key is not None else len(all_items)
if limit is not None and start_index + limit < end_index:
end_index = start_index + limit
assert len(some_items) == end_index - start_index, (len(some_items), start_index, end_index)
for expected_item, actual_item in zip(all_items[start_index:end_index], some_items):
expected_dict = expected_item._asdict()
actual_dict = actual_item._asdict()
self.assertEqual(expected_dict, actual_dict)
barrier_cb()
with util.ArrayBarrier(_Verify) as b:
cls.IndexQuery(self._client, query_expr, None, b.Callback(), limit=None)
cls.IndexQuery(self._client, query_expr, None, b.Callback(),
start_index_key=start_key, end_index_key=end_key, limit=limit)
cls.IndexQueryKeys(self._client, query_expr, b.Callback(),
start_index_key=start_key, end_index_key=end_key, limit=limit)
def _RunQueries(cls, query_expr, hash_key_25, hash_key_75, callback):
with util.Barrier(callback) as b:
_QueryAndVerify(cls, b.Callback(), query_expr, start_key=None, end_key=None, limit=None)
_QueryAndVerify(cls, b.Callback(), query_expr, start_key=None, end_key=None, limit=50)
_QueryAndVerify(cls, b.Callback(), query_expr, start_key=None, end_key=hash_key_75, limit=50)
_QueryAndVerify(cls, b.Callback(), query_expr, start_key=None, end_key=hash_key_25, limit=50)
_QueryAndVerify(cls, b.Callback(), query_expr, start_key=hash_key_25, end_key=None, limit=50)
_QueryAndVerify(cls, b.Callback(), query_expr, start_key=hash_key_75, end_key=None, limit=50)
_QueryAndVerify(cls, b.Callback(), query_expr, start_key=hash_key_25, end_key=hash_key_75, limit=50)
_QueryAndVerify(cls, b.Callback(), query_expr, start_key=hash_key_25, end_key=hash_key_75, limit=1)
_QueryAndVerify(cls, b.Callback(), query_expr, start_key=hash_key_25, end_key=hash_key_75, limit=100)
# Create 90 users all with the same given name, and 90 followers for the same viewpoint,
# and 90 followers with same adding_user_id.
for i in xrange(90):
user_id = i + 10
self.UpdateDBObject(User, given_name=name, user_id=user_id, signing_key={})
self.UpdateDBObject(Follower, user_id=user_id, viewpoint_id=vp_id)
with util.Barrier(self.stop) as b:
_RunQueries(User, ('user.given_name={n}', {'n': name}), DBKey(25, None), DBKey(75, None), b.Callback())
_RunQueries(Follower, ('follower.viewpoint_id={id}', {'id': vp_id}), DBKey(25, vp_id), DBKey(75, vp_id),
b.Callback())
def testUnicode(self):
"""Test various interesting Unicode characters."""
base_name = escape.utf8(u'ààà朋友你好abc123\U00010000\U00010000\x00\x01\b\n\t ')
timestamp = time.time()
contact_id_lookup = dict()
def _CreateContact(index):
name = base_name + str(index)
identity_key = 'Email:%s' % name
return Contact.CreateFromKeywords(100, [(identity_key, None)], timestamp, Contact.GMAIL, name=name)
def _VerifyContacts(query_expr, start_key, end_key, exp_indexes):
actual_contacts = self._RunAsync(Contact.IndexQuery, self._client, query_expr, None,
start_index_key=start_key, end_index_key=end_key)
self.assertEqual(len(exp_indexes), len(actual_contacts))
for expected, actual in zip([_CreateContact(i) for i in exp_indexes], actual_contacts):
self.assertEqual(expected._asdict(), actual._asdict())
# Create 3 contacts under user 100 in the db.
for i in xrange(3):
contact = _CreateContact(i)
contact_id_lookup[i] = contact.contact_id
self._RunAsync(contact.Update, self._client)
# Get contact by identity.
identity_key = 'Email:%s' % base_name
_VerifyContacts(('contact.identities={i}', {'i': identity_key + '0'}), None, None, [0])
# Get multiple contacts.
_VerifyContacts(('contact.identities={i} | contact.identities={i2}',
{'i': identity_key + '0', 'i2': identity_key + '1'}),
None, None, [1, 0])
# Get contact with start key.
sort_key = Contact.CreateSortKey(contact_id_lookup[1], timestamp)
_VerifyContacts(('contact.identities={i} | contact.identities={i2}',
{'i': identity_key + '0', 'i2': identity_key + '1'}),
DBKey(100, sort_key), None, [0])
# Get contact with end key.
sort_key = Contact.CreateSortKey(contact_id_lookup[0], timestamp)
_VerifyContacts(('contact.identities={i} | contact.identities={i2}',
{'i': identity_key + '0', 'i2': identity_key + '1'}),
None, DBKey(100, sort_key), [1])
|
treejames/viewfinder
|
backend/db/test/indexing_test.py
|
Python
|
apache-2.0
| 24,773
|
[
"Brian"
] |
aeb7f1dcb293279a075c370d6ed939a529ad850f10e232c3c207a5e96efe2261
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from skbio.util import TestRunner
# Add skbio.io to sys.modules to prevent cycles in our imports
import skbio.io # noqa
# imports included for convenience
from skbio.sequence import Sequence, DNA, RNA, Protein, GeneticCode
from skbio.stats.distance import DistanceMatrix
from skbio.alignment import local_pairwise_align_ssw, TabularMSA
from skbio.tree import TreeNode, nj
from skbio.io import read, write
from skbio.stats.ordination import OrdinationResults
__all__ = ['Sequence', 'DNA', 'RNA', 'Protein', 'GeneticCode',
'DistanceMatrix', 'local_pairwise_align_ssw', 'TabularMSA',
'TreeNode', 'nj', 'read', 'write', 'OrdinationResults']
__credits__ = "https://github.com/biocore/scikit-bio/graphs/contributors"
__version__ = "0.5.0-dev"
mottos = [
# 03/15/2014
"It's gonna get weird, bro.",
# 05/14/2014
"no cog yay",
# 03/18/2015
"bincount!",
]
motto = mottos[-1]
# Created at patorjk.com
title = r"""
* *
_ _ _ _ _ _
(_) | (_) | | | (_)
___ ___ _| | ___| |_ ______| |__ _ ___
/ __|/ __| | |/ / | __|______| '_ \| |/ _ \
\__ \ (__| | <| | |_ | |_) | | (_) |
|___/\___|_|_|\_\_|\__| |_.__/|_|\___/
* *
"""
# Created by @gregcaporaso
art = r"""
Opisthokonta
\ Amoebozoa
\ /
* Euryarchaeota
\ |_ Crenarchaeota
\ *
\ /
*
/
/
/
*
/ \
/ \
Proteobacteria \
Cyanobacteria
"""
if __doc__ is None:
__doc__ = title + art
else:
__doc__ = title + art + __doc__
test = TestRunner(__file__).test
if __name__ == '__main__':
test()
|
kdmurray91/scikit-bio
|
skbio/__init__.py
|
Python
|
bsd-3-clause
| 2,376
|
[
"scikit-bio"
] |
632ad6142f79447369a1c3a27511a686656b17a1a702be6c1e2352970edbd7de
|
"""Flowline modelling: bed shapes and model numerics.
"""
# Builtins
import logging
import copy
from collections import OrderedDict
from functools import partial
from time import gmtime, strftime
import os
import shutil
import warnings
# External libs
import numpy as np
import shapely.geometry as shpg
import xarray as xr
from scipy import interpolate
# Optional libs
try:
import salem
except ImportError:
pass
import pandas as pd
# Locals
from oggm import __version__
import oggm.cfg as cfg
from oggm import utils
from oggm import entity_task
from oggm.exceptions import InvalidParamsError, InvalidWorkflowError
from oggm.core.massbalance import (MultipleFlowlineMassBalance,
ConstantMassBalance,
PastMassBalance,
AvgClimateMassBalance,
RandomMassBalance)
from oggm.core.centerlines import Centerline, line_order
from oggm.core.inversion import find_sia_flux_from_thickness
# Constants
from oggm.cfg import SEC_IN_DAY, SEC_IN_YEAR
from oggm.cfg import G, GAUSSIAN_KERNEL
# Module logger
log = logging.getLogger(__name__)
class Flowline(Centerline):
"""A Centerline with additional properties: input to the FlowlineModel
"""
def __init__(self, line=None, dx=1, map_dx=None,
surface_h=None, bed_h=None, rgi_id=None,
water_level=None, gdir=None):
""" Initialize a Flowline
Parameters
----------
line : :py:class:`shapely.geometry.LineString`
the geometrical line of a :py:class:`oggm.Centerline`
dx : float
Grid spacing in pixel coordinates
map_dx : float
DEM grid spacing in meters
surface_h: :py:class:`numpy.ndarray`
elevation [m] of the flowline grid points
bed_h: :py:class:`numpy.ndarray`
elevation[m] of the bedrock at the flowline grid points
rgi_id : str
The glacier's RGI identifier
water_level : float
The water level (to compute volume below sea-level)
"""
# This is do add flexibility for testing
if dx is None:
dx = 1.
if line is None:
coords = np.arange(len(surface_h)) * dx
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
super(Flowline, self).__init__(line, dx, surface_h)
self._thick = utils.clip_min(surface_h - bed_h, 0.)
self.map_dx = map_dx
self.dx_meter = map_dx * self.dx
self.bed_h = bed_h
self.rgi_id = rgi_id
self.water_level = water_level
self._point_lons = None
self._point_lats = None
self.map_trafo = None
if gdir is not None:
self.map_trafo = partial(gdir.grid.ij_to_crs, crs=salem.wgs84)
# volume not yet removed from the flowline
self.calving_bucket_m3 = 0
def has_ice(self):
return np.any(self.thick > 0)
@Centerline.widths.getter
def widths(self):
"""Compute the widths out of H and shape"""
return self.widths_m / self.map_dx
@property
def thick(self):
"""Needed for overriding later"""
return self._thick
@thick.setter
def thick(self, value):
self._thick = utils.clip_min(value, 0)
@Centerline.surface_h.getter
def surface_h(self):
return self._thick + self.bed_h
@surface_h.setter
def surface_h(self, value):
self.thick = value - self.bed_h
@property
def bin_area_m2(self):
# area of the grid point
# this takes the ice thickness into account
return np.where(self.thick > 0, self.widths_m, 0) * self.dx_meter
@property
def length_m(self):
# TODO: take calving bucket into account for fine tuned length?
lt = cfg.PARAMS.get('min_ice_thick_for_length', 0)
if cfg.PARAMS.get('glacier_length_method') == 'consecutive':
if (self.thick > lt).all():
nx = len(self.thick)
else:
nx = np.where(self.thick <= lt)[0][0]
else:
nx = len(np.where(self.thick > lt)[0])
return nx * self.dx_meter
@property
def terminus_index(self):
# the index of the last point with ice thickness above
# min_ice_thick_for_length and consistent with length
lt = cfg.PARAMS.get('min_ice_thick_for_length', 0)
if cfg.PARAMS.get('glacier_length_method') == 'consecutive':
if (self.thick > lt).all():
ix = len(self.thick) - 1
else:
ix = np.where(self.thick <= lt)[0][0] - 1
else:
try:
ix = np.where(self.thick > lt)[0][-1]
except IndexError:
ix = -1
return ix
def _compute_point_lls(self):
if getattr(self, '_point_lons', None) is None:
if getattr(self, 'map_trafo', None) is None:
raise AttributeError('Cannot compute lons and lats on this '
'flowline. It needs to be initialized '
'with a gdir kwarg.')
lons, lats = self.map_trafo(*self.line.xy)
self._point_lons = lons
self._point_lats = lats
@property
def point_lons(self):
self._compute_point_lls()
return self._point_lons
@property
def point_lats(self):
self._compute_point_lls()
return self._point_lats
@property
def volume_m3(self):
return utils.clip_min(np.sum(self.section * self.dx_meter) -
getattr(self, 'calving_bucket_m3', 0), 0)
@property
def volume_km3(self):
return self.volume_m3 * 1e-9
def _vol_below_level(self, water_level=0):
thick = np.copy(self.thick)
n_thick = np.copy(thick)
bwl = (self.bed_h < water_level) & (thick > 0)
n_thick[~bwl] = 0
self.thick = n_thick
vol_tot = np.sum(self.section * self.dx_meter)
n_thick[bwl] = utils.clip_max(self.surface_h[bwl],
water_level) - self.bed_h[bwl]
self.thick = n_thick
vol_bwl = np.sum(self.section * self.dx_meter)
self.thick = thick
fac = vol_bwl / vol_tot if vol_tot > 0 else 0
return utils.clip_min(vol_bwl -
getattr(self, 'calving_bucket_m3', 0) * fac, 0)
@property
def volume_bsl_m3(self):
return self._vol_below_level(water_level=0)
@property
def volume_bsl_km3(self):
return self.volume_bsl_m3 * 1e-9
@property
def volume_bwl_m3(self):
return self._vol_below_level(water_level=self.water_level)
@property
def volume_bwl_km3(self):
return self.volume_bwl_m3 * 1e-9
@property
def area_m2(self):
# TODO: take calving bucket into account
return np.sum(self.bin_area_m2)
@property
def area_km2(self):
return self.area_m2 * 1e-6
def _add_attrs_to_dataset(self, ds):
"""Add bed specific parameters."""
# This must be done by child classes
raise NotImplementedError()
def to_geometry_dataset(self):
"""Makes an xarray Dataset out of the flowline.
Useful only for geometry files (FileModel / restart files),
therefore a bit cryptic regarding dimensions.
"""
h = self.surface_h
nx = len(h)
ds = xr.Dataset()
ds.coords['x'] = np.arange(nx)
ds.coords['c'] = [0, 1]
try:
ds['linecoords'] = (['x', 'c'], np.asarray(self.line.coords))
except AttributeError:
# squeezed lines
pass
ds['surface_h'] = (['x'], h)
ds['bed_h'] = (['x'], self.bed_h)
ds.attrs['class'] = type(self).__name__
ds.attrs['map_dx'] = self.map_dx
ds.attrs['dx'] = self.dx
self._add_attrs_to_dataset(ds)
return ds
def to_diagnostics_dataset(self):
"""Makes an xarray Dataset out of the flowline.
Useful for run_until_and_store's flowline diagnostics data.
"""
h = self.bed_h
nx = len(h)
ds = xr.Dataset()
ds.coords['dis_along_flowline'] = np.arange(nx) * self.map_dx * self.dx
try:
# This is a bit bad design, but basically if some task
# computed to the lons of the flowlines before, use it
# we don't have access to gdir here so we cant convert the coords
ds['point_lons'] = (['dis_along_flowline'], self.point_lons)
ds['point_lons'].attrs['description'] = 'Longitude along the flowline'
ds['point_lons'].attrs['unit'] = 'deg'
ds['point_lats'] = (['dis_along_flowline'], self.point_lats)
ds['point_lats'].attrs['description'] = 'Latitude along the flowline'
ds['point_lats'].attrs['unit'] = 'deg'
except AttributeError:
# squeezed lines or we haven't computed lons and lats yet
pass
ds['bed_h'] = (['dis_along_flowline'], h)
ds['bed_h'].attrs['description'] = 'Bed elevation along the flowline'
ds['bed_h'].attrs['unit'] = 'm'
ds.attrs['class'] = type(self).__name__
ds.attrs['map_dx'] = self.map_dx
ds.attrs['dx'] = self.dx
return ds
class ParabolicBedFlowline(Flowline):
"""A parabolic shaped Flowline with one degree of freedom
"""
def __init__(self, line=None, dx=None, map_dx=None,
surface_h=None, bed_h=None, bed_shape=None, rgi_id=None,
water_level=None, gdir=None):
""" Instantiate.
Parameters
----------
line : :py:class:`shapely.geometry.LineString`
the geometrical line of a :py:class:`oggm.Centerline`
"""
super(ParabolicBedFlowline, self).__init__(line, dx, map_dx,
surface_h, bed_h,
rgi_id=rgi_id,
water_level=water_level,
gdir=gdir)
assert np.all(np.isfinite(bed_shape))
self.bed_shape = bed_shape
@property
def widths_m(self):
"""Compute the widths out of H and shape"""
return np.sqrt(4*self.thick/self.bed_shape)
@property
def section(self):
return 2./3. * self.widths_m * self.thick
@section.setter
def section(self, val):
self.thick = (0.75 * val * np.sqrt(self.bed_shape))**(2./3.)
@utils.lazy_property
def shape_str(self):
"""The bed shape in text (for debug and other things)"""
return np.repeat('parabolic', self.nx)
def _add_attrs_to_dataset(self, ds):
"""Add bed specific parameters."""
ds['bed_shape'] = (['x'], self.bed_shape)
class RectangularBedFlowline(Flowline):
"""Simple shaped Flowline, glacier width does not change with ice thickness
"""
def __init__(self, line=None, dx=None, map_dx=None,
surface_h=None, bed_h=None, widths=None, rgi_id=None,
water_level=None, gdir=None):
""" Instantiate.
Parameters
----------
line : :py:class:`shapely.geometry.LineString`
the geometrical line of a :py:class:`oggm.Centerline`
"""
super(RectangularBedFlowline, self).__init__(line, dx, map_dx,
surface_h, bed_h,
rgi_id=rgi_id,
water_level=water_level,
gdir=gdir)
self._widths = widths
@property
def widths_m(self):
"""Compute the widths out of H and shape"""
return self._widths * self.map_dx
@property
def section(self):
return self.widths_m * self.thick
@section.setter
def section(self, val):
self.thick = val / self.widths_m
@utils.lazy_property
def shape_str(self):
"""The bed shape in text (for debug and other things)"""
return np.repeat('rectangular', self.nx)
def _add_attrs_to_dataset(self, ds):
"""Add bed specific parameters."""
ds['widths'] = (['x'], self._widths)
class TrapezoidalBedFlowline(Flowline):
"""A Flowline with trapezoidal shape and two degrees of freedom
"""
def __init__(self, line=None, dx=None, map_dx=None, surface_h=None,
bed_h=None, widths=None, lambdas=None, rgi_id=None,
water_level=None, gdir=None):
""" Instantiate.
Parameters
----------
line : :py:class:`shapely.geometry.LineString`
the geometrical line of a :py:class:`oggm.Centerline`
"""
super(TrapezoidalBedFlowline, self).__init__(line, dx, map_dx,
surface_h, bed_h,
rgi_id=rgi_id,
water_level=water_level,
gdir=gdir)
self._w0_m = widths * self.map_dx - lambdas * self.thick
if np.any(self._w0_m <= 0):
raise ValueError('Trapezoid beds need to have origin widths > 0.')
self._prec = np.where(lambdas == 0)[0]
self._lambdas = lambdas
@property
def widths_m(self):
"""Compute the widths out of H and shape"""
return self._w0_m + self._lambdas * self.thick
@property
def section(self):
return (self.widths_m + self._w0_m) / 2 * self.thick
@section.setter
def section(self, val):
b = 2 * self._w0_m
a = 2 * self._lambdas
with np.errstate(divide='ignore', invalid='ignore'):
thick = (np.sqrt(b**2 + 4 * a * val) - b) / a
thick[self._prec] = val[self._prec] / self._w0_m[self._prec]
self.thick = thick
@utils.lazy_property
def shape_str(self):
"""The bed shape in text (for debug and other things)"""
return np.repeat('trapezoid', self.nx)
def _add_attrs_to_dataset(self, ds):
"""Add bed specific parameters."""
ds['widths'] = (['x'], self.widths)
ds['lambdas'] = (['x'], self._lambdas)
class MixedBedFlowline(Flowline):
"""A Flowline which can take a combination of different shapes (default)
The default shape is parabolic. At ice divides a rectangular shape is used.
And if the parabola gets too flat a trapezoidal shape is used.
"""
def __init__(self, *, line=None, dx=None, map_dx=None, surface_h=None,
bed_h=None, section=None, bed_shape=None,
is_trapezoid=None, lambdas=None, widths_m=None, rgi_id=None,
water_level=None, gdir=None):
""" Instantiate.
Parameters
----------
line : :py:class:`shapely.geometry.LineString`
the geometrical line of a :py:class:`oggm.Centerline`
"""
super(MixedBedFlowline, self).__init__(line=line, dx=dx, map_dx=map_dx,
surface_h=surface_h.copy(),
bed_h=bed_h.copy(),
rgi_id=rgi_id,
water_level=water_level,
gdir=gdir)
# To speedup calculations if no trapezoid bed is present
self._do_trapeze = np.any(is_trapezoid)
# Parabolic
assert len(bed_shape) == self.nx
self.bed_shape = bed_shape.copy()
self._sqrt_bed = np.sqrt(bed_shape)
# Trapeze
assert len(lambdas) == self.nx
assert len(is_trapezoid) == self.nx
self._lambdas = lambdas.copy()
self._ptrap = np.where(is_trapezoid)[0]
self.is_trapezoid = is_trapezoid
self.is_rectangular = self.is_trapezoid & (self._lambdas == 0)
# Sanity
self.bed_shape[is_trapezoid] = np.NaN
self._lambdas[~is_trapezoid] = np.NaN
# Here we have to compute the widths out of section and lambda
thick = surface_h - bed_h
with np.errstate(divide='ignore', invalid='ignore'):
self._w0_m = section / thick - lambdas * thick / 2
assert np.all(section >= 0)
need_w = (section == 0) & is_trapezoid
if np.any(need_w):
if widths_m is None:
raise ValueError('We need a non-zero section for trapezoid '
'shapes unless you provide widths_m.')
self._w0_m[need_w] = widths_m[need_w]
self._w0_m[~is_trapezoid] = np.NaN
if (np.any(self._w0_m[self._ptrap] <= 0) or
np.any(~np.isfinite(self._w0_m[self._ptrap]))):
raise ValueError('Trapezoid beds need to have origin widths > 0.')
assert np.all(self.bed_shape[~is_trapezoid] > 0)
self._prec = np.where(is_trapezoid & (lambdas == 0))[0]
assert np.allclose(section, self.section)
@property
def widths_m(self):
"""Compute the widths out of H and shape"""
out = np.sqrt(4*self.thick/self.bed_shape)
if self._do_trapeze:
out[self._ptrap] = (self._w0_m[self._ptrap] +
self._lambdas[self._ptrap] *
self.thick[self._ptrap])
return out
@property
def section(self):
out = 2./3. * self.widths_m * self.thick
if self._do_trapeze:
out[self._ptrap] = ((self.widths_m[self._ptrap] +
self._w0_m[self._ptrap]) / 2 *
self.thick[self._ptrap])
return out
@section.setter
def section(self, val):
out = (0.75 * val * self._sqrt_bed)**(2./3.)
if self._do_trapeze:
b = 2 * self._w0_m[self._ptrap]
a = 2 * self._lambdas[self._ptrap]
with np.errstate(divide='ignore', invalid='ignore'):
out[self._ptrap] = ((np.sqrt(b ** 2 + 4 * a * val[self._ptrap])
- b) / a)
out[self._prec] = val[self._prec] / self._w0_m[self._prec]
self.thick = out
@utils.lazy_property
def shape_str(self):
"""The bed shape in text (for debug and other things)"""
out = np.repeat('rectangular', self.nx)
out[~ self.is_trapezoid] = 'parabolic'
out[self.is_trapezoid & ~ self.is_rectangular] = 'trapezoid'
return out
def _add_attrs_to_dataset(self, ds):
"""Add bed specific parameters."""
ds['section'] = (['x'], self.section)
ds['bed_shape'] = (['x'], self.bed_shape)
ds['is_trapezoid'] = (['x'], self.is_trapezoid)
ds['widths_m'] = (['x'], self._w0_m)
ds['lambdas'] = (['x'], self._lambdas)
class FlowlineModel(object):
"""Interface to OGGM's flowline models"""
def __init__(self, flowlines, mb_model=None, y0=0., glen_a=None,
fs=None, inplace=False, smooth_trib_influx=True,
is_tidewater=False, is_lake_terminating=False,
mb_elev_feedback='annual', check_for_boundaries=None,
water_level=None, required_model_steps='monthly'):
"""Create a new flowline model from the flowlines and a MB model.
Parameters
----------
flowlines : list
a list of :py:class:`oggm.Flowline` instances, sorted by order
mb_model : :py:class:`oggm.core.massbalance.MassBalanceModel`
the MB model to use
y0 : int
the starting year of the simulation
glen_a : float
glen's parameter A
fs: float
sliding parameter
inplace : bool
whether or not to make a copy of the flowline objects for the run
setting to True implies that your objects will be modified at run
time by the model (can help to spare memory)
smooth_trib_influx : bool
whether to smooth the mass influx from the incoming tributary.
The default is to use a gaussian kernel on a 9 grid points
window.
is_tidewater: bool, default: False
is this a tidewater glacier?
is_lake_terminating: bool, default: False
is this a lake terminating glacier?
mb_elev_feedback : str, default: 'annual'
'never', 'always', 'annual', or 'monthly': how often the
mass-balance should be recomputed from the mass balance model.
'Never' is equivalent to 'annual' but without elevation feedback
at all (the heights are taken from the first call).
check_for_boundaries : bool
whether the model should raise an error when the glacier exceeds
the domain boundaries. The default is to follow
PARAMS['error_when_glacier_reaches_boundaries']
required_model_steps : str
some Flowline models have an adaptive time stepping scheme, which
is randomly taking steps towards the goal of a "run_until". The
default ('monthly') makes sure that the model results are
consistent whether the users want data at monthly or annual
timesteps by forcing the model to land on monthly steps even if
only annual updates are required. You may want to change this
for optimisation reasons for models that don't require adaptive
steps (for example the deltaH method).
"""
self.is_tidewater = is_tidewater
self.is_lake_terminating = is_lake_terminating
self.is_marine_terminating = is_tidewater and not is_lake_terminating
if water_level is None:
self.water_level = 0
if self.is_lake_terminating:
if not flowlines[-1].has_ice():
raise InvalidParamsError('Set `water_level` for lake '
'terminating glaciers in '
'idealized runs')
# Arbitrary water level 1m below last grid points elevation
min_h = flowlines[-1].surface_h[flowlines[-1].thick > 0][-1]
self.water_level = (min_h -
cfg.PARAMS['free_board_lake_terminating'])
else:
self.water_level = water_level
# Mass balance
self.mb_elev_feedback = mb_elev_feedback.lower()
if self.mb_elev_feedback in ['never', 'annual']:
self.mb_step = 'annual'
elif self.mb_elev_feedback in ['always', 'monthly']:
self.mb_step = 'monthly'
self.mb_model = mb_model
# Defaults
if glen_a is None:
glen_a = cfg.PARAMS['glen_a']
if fs is None:
fs = cfg.PARAMS['fs']
self.glen_a = glen_a
self.fs = fs
self.glen_n = cfg.PARAMS['glen_n']
self.rho = cfg.PARAMS['ice_density']
if check_for_boundaries is None:
check_for_boundaries = cfg.PARAMS[('error_when_glacier_reaches_'
'boundaries')]
self.check_for_boundaries = check_for_boundaries
# we keep glen_a as input, but for optimisation we stick to "fd"
self._fd = 2. / (cfg.PARAMS['glen_n']+2) * self.glen_a
# Calving shenanigans
self.calving_m3_since_y0 = 0. # total calving since time y0
self.calving_rate_myr = 0.
# Time
if required_model_steps not in ['annual', 'monthly']:
raise InvalidParamsError('required_model_steps needs to be of '
'`annual` or `monthly`.')
self.required_model_steps = required_model_steps
self.y0 = None
self.t = None
self.reset_y0(y0)
self.fls = None
self._tributary_indices = None
self.reset_flowlines(flowlines, inplace=inplace,
smooth_trib_influx=smooth_trib_influx)
@property
def mb_model(self):
return self._mb_model
@mb_model.setter
def mb_model(self, value):
# We need a setter because the MB func is stored as an attr too
_mb_call = None
if value:
if self.mb_elev_feedback in ['always', 'monthly']:
_mb_call = value.get_monthly_mb
elif self.mb_elev_feedback in ['annual', 'never']:
_mb_call = value.get_annual_mb
else:
raise ValueError('mb_elev_feedback not understood')
self._mb_model = value
self._mb_call = _mb_call
self._mb_current_date = None
self._mb_current_out = dict()
self._mb_current_heights = dict()
def reset_y0(self, y0):
"""Reset the initial model time"""
self.y0 = y0
self.t = 0
def reset_flowlines(self, flowlines, inplace=False,
smooth_trib_influx=True):
"""Reset the initial model flowlines"""
if not inplace:
flowlines = copy.deepcopy(flowlines)
try:
len(flowlines)
except TypeError:
flowlines = [flowlines]
self.fls = flowlines
# list of tributary coordinates and stuff
trib_ind = []
for fl in self.fls:
# Important also
fl.water_level = self.water_level
if fl.flows_to is None:
trib_ind.append((None, None, None, None))
continue
idl = self.fls.index(fl.flows_to)
ide = fl.flows_to_indice
if not smooth_trib_influx:
gk = 1
id0 = ide
id1 = ide+1
elif fl.flows_to.nx >= 9:
gk = GAUSSIAN_KERNEL[9]
id0 = ide-4
id1 = ide+5
elif fl.flows_to.nx >= 7:
gk = GAUSSIAN_KERNEL[7]
id0 = ide-3
id1 = ide+4
elif fl.flows_to.nx >= 5:
gk = GAUSSIAN_KERNEL[5]
id0 = ide-2
id1 = ide+3
trib_ind.append((idl, id0, id1, gk))
self._tributary_indices = trib_ind
@property
def yr(self):
return self.y0 + self.t / SEC_IN_YEAR
@property
def area_m2(self):
return np.sum([f.area_m2 for f in self.fls])
@property
def volume_m3(self):
return np.sum([f.volume_m3 for f in self.fls])
@property
def volume_km3(self):
return self.volume_m3 * 1e-9
@property
def volume_bsl_m3(self):
return np.sum([f.volume_bsl_m3 for f in self.fls])
@property
def volume_bsl_km3(self):
return self.volume_bsl_m3 * 1e-9
@property
def volume_bwl_m3(self):
return np.sum([f.volume_bwl_m3 for f in self.fls])
@property
def volume_bwl_km3(self):
return self.volume_bwl_m3 * 1e-9
@property
def area_km2(self):
return self.area_m2 * 1e-6
@property
def length_m(self):
return self.fls[-1].length_m
def get_mb(self, heights, year=None, fl_id=None, fls=None):
"""Get the mass balance at the requested height and time.
Optimized so that no mb model call is necessary at each step.
"""
# Do we even have to optimise?
if self.mb_elev_feedback == 'always':
return self._mb_call(heights, year=year, fl_id=fl_id, fls=fls)
# Ok, user asked for it
if fl_id is None:
raise ValueError('Need fls_id')
if self.mb_elev_feedback == 'never':
# The very first call we take the heights
if fl_id not in self._mb_current_heights:
# We need to reset just this tributary
self._mb_current_heights[fl_id] = heights
# All calls we replace
heights = self._mb_current_heights[fl_id]
date = utils.floatyear_to_date(year)
if self.mb_elev_feedback in ['annual', 'never']:
# ignore month changes
date = (date[0], date[0])
if self._mb_current_date == date:
if fl_id not in self._mb_current_out:
# We need to reset just this tributary
self._mb_current_out[fl_id] = self._mb_call(heights,
year=year,
fl_id=fl_id,
fls=fls)
else:
# We need to reset all
self._mb_current_date = date
self._mb_current_out = dict()
self._mb_current_out[fl_id] = self._mb_call(heights,
year=year,
fl_id=fl_id,
fls=fls)
return self._mb_current_out[fl_id]
def to_geometry_netcdf(self, path):
"""Creates a netcdf group file storing the state of the model."""
flows_to_id = []
for trib in self._tributary_indices:
flows_to_id.append(trib[0] if trib[0] is not None else -1)
ds = xr.Dataset()
try:
ds.attrs['description'] = 'OGGM model output'
ds.attrs['oggm_version'] = __version__
ds.attrs['calendar'] = '365-day no leap'
ds.attrs['creation_date'] = strftime("%Y-%m-%d %H:%M:%S", gmtime())
ds['flowlines'] = ('flowlines', np.arange(len(flows_to_id)))
ds['flows_to_id'] = ('flowlines', flows_to_id)
ds.to_netcdf(path)
for i, fl in enumerate(self.fls):
ds = fl.to_geometry_dataset()
ds.to_netcdf(path, 'a', group='fl_{}'.format(i))
finally:
ds.close()
def check_domain_end(self):
"""Returns False if the glacier reaches the domains bound."""
return np.isclose(self.fls[-1].thick[-1], 0)
def step(self, dt):
"""Advance the numerical simulation of one single step.
Important: the step dt is a maximum boundary that is *not* guaranteed
to be met if dt is too large for the underlying numerical
implementation. However, ``step(dt)`` should never cross the desired
time step, i.e. if dt is small enough to ensure stability, step
should match it.
The caller will know how much has been actually advanced by looking
at the output of ``step()`` or by monitoring ``self.t`` or `self.yr``
Parameters
----------
dt : float
the step length in seconds
Returns
-------
the actual dt chosen by the numerical implementation. Guaranteed to
be dt or lower.
"""
raise NotImplementedError
def run_until(self, y1):
"""Runs the model from the current year up to a given year date y1.
This function runs the model for the time difference y1-self.y0
If self.y0 has not been specified at some point, it is 0 and y1 will
be the time span in years to run the model for.
Parameters
----------
y1 : float
Upper time span for how long the model should run
"""
if self.required_model_steps == 'monthly':
# We force timesteps to monthly frequencies for consistent results
# among use cases (monthly or yearly output) and also to prevent
# "too large" steps in the adaptive scheme.
ts = utils.monthly_timeseries(self.yr, y1)
# Add the last date to be sure we end on it - implementations
# of `step()` and of the loop below should not run twice anyways
ts = np.append(ts, y1)
else:
ts = np.arange(int(self.yr), int(y1+1))
# Loop over the steps we want to meet
for y in ts:
t = (y - self.y0) * SEC_IN_YEAR
# because of CFL, step() doesn't ensure that the end date is met
# lets run the steps until we reach our desired date
while self.t < t:
self.step(t - self.t)
# Check for domain bounds
if self.check_for_boundaries:
if self.fls[-1].thick[-1] > 10:
raise RuntimeError('Glacier exceeds domain boundaries, '
'at year: {}'.format(self.yr))
# Check for NaNs
for fl in self.fls:
if np.any(~np.isfinite(fl.thick)):
raise FloatingPointError('NaN in numerical solution, '
'at year: {}'.format(self.yr))
def run_until_and_store(self, y1,
diag_path=None,
fl_diag_path=False,
geom_path=False,
store_monthly_step=None,
stop_criterion=None,
fixed_geometry_spinup_yr=None
):
"""Runs the model and returns intermediate steps in xarray datasets.
This function repeatedly calls FlowlineModel.run_until for either
monthly or yearly time steps up till the upper time boundary y1.
Parameters
----------
y1 : int
Upper time span for how long the model should run (needs to be
a full year)
diag_path : str
Path and filename where to store the glacier-wide diagnostics
dataset (length, area, volume, etc.) as controlled by
cfg.PARAMS['store_diagnostic_variables'].
The default (None) is to not store the dataset to disk but return
the dataset to the user after execution.
fl_diag_path : str, None or bool
Path and filename where to store the model diagnostics along the
flowline(s).
geom_path : str, None or bool
Path and filename where to store the model geometry dataset. This
dataset contains all necessary info to retrieve the full glacier
geometry after the run, with a FileModel. This is stored
on an annual basis and can be used to restart a run from a
past simulation's geometry ("restart file").
The default (False) prevents creating this dataset altogether
(for optimisation purposes).
Set this to None to not store the dataset to disk but return
the dataset to the user after execution.
store_monthly_step : Bool
If True (False) model diagnostics will be stored monthly (yearly).
If unspecified, we follow the update of the MB model, which
defaults to yearly (see __init__).
stop_criterion : func
a function evaluating the model state (and possibly evolution over
time), and deciding when to stop the simulation. Its signature
should look like:
stop, new_state = stop_criterion(model, previous_state)
where stop is a bool, and new_state a container (likely: dict)
initialized by the function itself on the first call (previous_state
can and should be None on the first call). See
`zero_glacier_stop_criterion` for an example.
fixed_geometry_spinup_yr : int
if set to an integer, the model will artificially prolongate
all outputs of run_until_and_store to encompass all time stamps
starting from the chosen year. The only output affected are the
glacier wide diagnostic files - all other outputs are set
to constants during "spinup"
Returns
-------
geom_ds : xarray.Dataset or None
stores the entire glacier geometry. It is useful to visualize the
glacier geometry or to restart a new run from a modelled geometry.
The glacier state is stored at the beginning of each hydrological
year (not in between in order to spare disk space).
diag_ds : xarray.Dataset
stores a few diagnostic variables such as the volume, area, length
and ELA of the glacier.
"""
if int(y1) != y1:
raise InvalidParamsError('run_until_and_store only accepts '
'integer year dates.')
if not self.mb_model.hemisphere:
raise InvalidParamsError('run_until_and_store needs a '
'mass-balance model with an unambiguous '
'hemisphere.')
# Do we have a spinup?
do_fixed_spinup = fixed_geometry_spinup_yr is not None
y0 = fixed_geometry_spinup_yr if do_fixed_spinup else self.yr
# Do we need to create a geometry or flowline diagnostics dataset?
do_geom = geom_path is None or geom_path
do_fl_diag = fl_diag_path is None or fl_diag_path
# time
yearly_time = np.arange(np.floor(y0), np.floor(y1)+1)
if store_monthly_step is None:
store_monthly_step = self.mb_step == 'monthly'
if store_monthly_step:
monthly_time = utils.monthly_timeseries(y0, y1)
else:
monthly_time = np.arange(np.floor(y0), np.floor(y1)+1)
sm = cfg.PARAMS['hydro_month_' + self.mb_model.hemisphere]
yrs, months = utils.floatyear_to_date(monthly_time)
cyrs, cmonths = utils.hydrodate_to_calendardate(yrs, months,
start_month=sm)
# init output
if geom_path:
self.to_geometry_netcdf(geom_path)
ny = len(yearly_time)
if ny == 1:
yrs = [yrs]
cyrs = [cyrs]
months = [months]
cmonths = [cmonths]
nm = len(monthly_time)
if do_geom or do_fl_diag:
sects = [(np.zeros((ny, fl.nx)) * np.NaN) for fl in self.fls]
widths = [(np.zeros((ny, fl.nx)) * np.NaN) for fl in self.fls]
buckets = [np.zeros(ny) for _ in self.fls]
# Diagnostics dataset
diag_ds = xr.Dataset()
# Global attributes
diag_ds.attrs['description'] = 'OGGM model output'
diag_ds.attrs['oggm_version'] = __version__
diag_ds.attrs['calendar'] = '365-day no leap'
diag_ds.attrs['creation_date'] = strftime("%Y-%m-%d %H:%M:%S",
gmtime())
diag_ds.attrs['water_level'] = self.water_level
diag_ds.attrs['glen_a'] = self.glen_a
diag_ds.attrs['fs'] = self.fs
# Add MB model attributes
diag_ds.attrs['mb_model_class'] = self.mb_model.__class__.__name__
for k, v in self.mb_model.__dict__.items():
if np.isscalar(v) and not k.startswith('_'):
diag_ds.attrs['mb_model_{}'.format(k)] = v
# Coordinates
diag_ds.coords['time'] = ('time', monthly_time)
diag_ds.coords['hydro_year'] = ('time', yrs)
diag_ds.coords['hydro_month'] = ('time', months)
diag_ds.coords['calendar_year'] = ('time', cyrs)
diag_ds.coords['calendar_month'] = ('time', cmonths)
diag_ds['time'].attrs['description'] = 'Floating hydrological year'
diag_ds['hydro_year'].attrs['description'] = 'Hydrological year'
diag_ds['hydro_month'].attrs['description'] = 'Hydrological month'
diag_ds['calendar_year'].attrs['description'] = 'Calendar year'
diag_ds['calendar_month'].attrs['description'] = 'Calendar month'
# Variables and attributes
ovars = cfg.PARAMS['store_diagnostic_variables']
if 'volume' in ovars:
diag_ds['volume_m3'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['volume_m3'].attrs['description'] = 'Total glacier volume'
diag_ds['volume_m3'].attrs['unit'] = 'm 3'
if 'volume_bsl' in ovars:
diag_ds['volume_bsl_m3'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['volume_bsl_m3'].attrs['description'] = ('Glacier volume '
'below '
'sea-level')
diag_ds['volume_bsl_m3'].attrs['unit'] = 'm 3'
if 'volume_bwl' in ovars:
diag_ds['volume_bwl_m3'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['volume_bwl_m3'].attrs['description'] = ('Glacier volume '
'below '
'water-level')
diag_ds['volume_bwl_m3'].attrs['unit'] = 'm 3'
if 'area' in ovars:
diag_ds['area_m2'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['area_m2'].attrs['description'] = 'Total glacier area'
diag_ds['area_m2'].attrs['unit'] = 'm 2'
if 'length' in ovars:
diag_ds['length_m'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['length_m'].attrs['description'] = 'Glacier length'
diag_ds['length_m'].attrs['unit'] = 'm'
if 'calving' in ovars:
diag_ds['calving_m3'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['calving_m3'].attrs['description'] = ('Total accumulated '
'calving flux')
diag_ds['calving_m3'].attrs['unit'] = 'm 3'
if 'calving_rate' in ovars:
diag_ds['calving_rate_myr'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['calving_rate_myr'].attrs['description'] = 'Calving rate'
diag_ds['calving_rate_myr'].attrs['unit'] = 'm yr-1'
for gi in range(10):
vn = f'terminus_thick_{gi}'
if vn in ovars:
diag_ds[vn] = ('time', np.zeros(nm) * np.NaN)
diag_ds[vn].attrs['description'] = ('Thickness of grid point '
f'{gi} from terminus.')
diag_ds[vn].attrs['unit'] = 'm'
if do_fixed_spinup:
is_spinup_time = monthly_time < self.yr
diag_ds['is_fixed_geometry_spinup'] = ('time', is_spinup_time)
desc = 'Part of the series which are spinup'
diag_ds['is_fixed_geometry_spinup'].attrs['description'] = desc
diag_ds['is_fixed_geometry_spinup'].attrs['unit'] = '-'
fl_diag_dss = None
if do_fl_diag:
# Time invariant datasets
fl_diag_dss = [fl.to_diagnostics_dataset() for fl in self.fls]
# Global attributes
for ds in fl_diag_dss:
ds.attrs['description'] = 'OGGM model output'
ds.attrs['oggm_version'] = __version__
ds.attrs['calendar'] = '365-day no leap'
ds.attrs['creation_date'] = strftime("%Y-%m-%d %H:%M:%S", gmtime())
ds.attrs['water_level'] = self.water_level
ds.attrs['glen_a'] = self.glen_a
ds.attrs['fs'] = self.fs
# Add MB model attributes
ds.attrs['mb_model_class'] = self.mb_model.__class__.__name__
for k, v in self.mb_model.__dict__.items():
if np.isscalar(v) and not k.startswith('_'):
ds.attrs['mb_model_{}'.format(k)] = v
# Coordinates
ds.coords['time'] = yearly_time
ds['time'].attrs['description'] = 'Floating hydrological year'
# Variables and attributes
ovars_fl = cfg.PARAMS['store_fl_diagnostic_variables']
if 'volume' not in ovars_fl or 'area' not in ovars_fl:
raise InvalidParamsError('Flowline diagnostics need at least '
'volume and area as output.')
for ds, sect, width, bucket in zip(fl_diag_dss, sects, widths, buckets):
if 'volume' in ovars_fl:
ds['volume_m3'] = (('time', 'dis_along_flowline'), sect)
ds['volume_m3'].attrs['description'] = 'Section volume'
ds['volume_m3'].attrs['unit'] = 'm 3'
if 'volume_bsl' in ovars_fl:
ds['volume_bsl_m3'] = (('time', 'dis_along_flowline'), sect * 0)
ds['volume_bsl_m3'].attrs['description'] = 'Section volume below sea level'
ds['volume_bsl_m3'].attrs['unit'] = 'm 3'
if 'volume_bwl' in ovars_fl:
ds['volume_bwl_m3'] = (('time', 'dis_along_flowline'), sect * 0)
ds['volume_bwl_m3'].attrs['description'] = 'Section volume below water level'
ds['volume_bwl_m3'].attrs['unit'] = 'm 3'
if 'area' in ovars_fl:
ds['area_m2'] = (('time', 'dis_along_flowline'), width)
ds['area_m2'].attrs['description'] = 'Section area'
ds['area_m2'].attrs['unit'] = 'm 2'
if 'thickness' in ovars_fl:
ds['thickness_m'] = (('time', 'dis_along_flowline'), width * np.NaN)
ds['thickness_m'].attrs['description'] = 'Section thickness'
ds['thickness_m'].attrs['unit'] = 'm'
if 'ice_velocity' in ovars_fl:
if not (hasattr(self, '_surf_vel_fac') or hasattr(self, 'u_stag')):
raise InvalidParamsError('This flowline model does not seem '
'to be able to provide surface '
'velocities.')
ds['ice_velocity_myr'] = (('time', 'dis_along_flowline'), width * np.NaN)
ds['ice_velocity_myr'].attrs['description'] = 'Ice velocity at the surface'
ds['ice_velocity_myr'].attrs['unit'] = 'm yr-1'
if 'calving_bucket' in ovars_fl:
ds['calving_bucket_m3'] = (('time',), bucket)
desc = 'Flowline calving bucket (volume not yet calved)'
ds['calving_bucket_m3'].attrs['description'] = desc
ds['calving_bucket_m3'].attrs['unit'] = 'm 3'
if do_fixed_spinup:
ds['is_fixed_geometry_spinup'] = ('time', is_spinup_time)
desc = 'Part of the series which are spinup'
ds['is_fixed_geometry_spinup'].attrs['description'] = desc
ds['is_fixed_geometry_spinup'].attrs['unit'] = '-'
# First deal with spinup (we compute volume change only)
if do_fixed_spinup:
spinup_vol = monthly_time * 0
for fl_id, fl in enumerate(self.fls):
h = fl.surface_h
a = fl.widths_m * fl.dx_meter
a[fl.section <= 0] = 0
for j, yr in enumerate(monthly_time[is_spinup_time]):
smb = self.get_mb(h, year=yr, fl_id=fl_id, fls=self.fls)
spinup_vol[j] -= np.sum(smb * a) # per second and minus because backwards
# per unit time
dt = (monthly_time[1:] - monthly_time[:-1]) * cfg.SEC_IN_YEAR
spinup_vol[:-1] = spinup_vol[:-1] * dt
spinup_vol = np.cumsum(spinup_vol[::-1])[::-1]
# Run
j = 0
prev_state = None # for the stopping criterion
for i, (yr, mo) in enumerate(zip(monthly_time, months)):
if yr > self.yr:
# Here we model run - otherwise (for spinup) we
# constantly store the same data
self.run_until(yr)
# Glacier geometry
if (do_geom or do_fl_diag) and mo == 1:
for s, w, b, fl in zip(sects, widths, buckets, self.fls):
s[j, :] = fl.section
w[j, :] = fl.widths_m
if self.is_tidewater:
try:
b[j] = fl.calving_bucket_m3
except AttributeError:
pass
# Flowline diagnostics
if do_fl_diag:
for fl_id, (ds, fl) in enumerate(zip(fl_diag_dss, self.fls)):
# area and volume are already being taken care of above
if 'thickness' in ovars_fl:
ds['thickness_m'].data[j, :] = fl.thick
if 'volume_bsl' in ovars_fl:
ds['volume_bsl_m3'].data[j, :] = fl.volume_bsl_m3
if 'volume_bwl' in ovars_fl:
ds['volume_bwl_m3'].data[j, :] = fl.volume_bwl_m3
if 'ice_velocity' in ovars_fl and (yr > self.y0):
# Velocity can only be computed with dynamics
var = self.u_stag[fl_id]
val = (var[1:fl.nx + 1] + var[:fl.nx]) / 2 * self._surf_vel_fac
ds['ice_velocity_myr'].data[j, :] = val * cfg.SEC_IN_YEAR
# j is the yearly index in case we have monthly output
# we have to count it ourselves
j += 1
# Diagnostics
if 'volume' in ovars:
diag_ds['volume_m3'].data[i] = self.volume_m3
if 'area' in ovars:
diag_ds['area_m2'].data[i] = self.area_m2
if 'length' in ovars:
diag_ds['length_m'].data[i] = self.length_m
if 'calving' in ovars:
diag_ds['calving_m3'].data[i] = self.calving_m3_since_y0
if 'calving_rate' in ovars:
diag_ds['calving_rate_myr'].data[i] = self.calving_rate_myr
if 'volume_bsl' in ovars:
diag_ds['volume_bsl_m3'].data[i] = self.volume_bsl_m3
if 'volume_bwl' in ovars:
diag_ds['volume_bwl_m3'].data[i] = self.volume_bwl_m3
# Terminus thick is a bit more logic
ti = None
for gi in range(10):
vn = f'terminus_thick_{gi}'
if vn in ovars:
if ti is None:
ti = self.fls[-1].terminus_index
diag_ds[vn].data[i] = self.fls[-1].thick[ti - gi]
# Decide if we continue
if stop_criterion is not None:
stop, prev_state = stop_criterion(self, prev_state)
if stop:
break
# to datasets
geom_ds = None
if do_geom:
geom_ds = []
for (s, w, b) in zip(sects, widths, buckets):
ds = xr.Dataset()
ds.attrs['description'] = 'OGGM model output'
ds.attrs['oggm_version'] = __version__
ds.attrs['calendar'] = '365-day no leap'
ds.attrs['creation_date'] = strftime("%Y-%m-%d %H:%M:%S",
gmtime())
ds.attrs['water_level'] = self.water_level
ds.attrs['glen_a'] = self.glen_a
ds.attrs['fs'] = self.fs
# Add MB model attributes
ds.attrs['mb_model_class'] = self.mb_model.__class__.__name__
for k, v in self.mb_model.__dict__.items():
if np.isscalar(v) and not k.startswith('_'):
ds.attrs['mb_model_{}'.format(k)] = v
ds.coords['time'] = yearly_time
ds['time'].attrs['description'] = 'Floating hydrological year'
varcoords = OrderedDict(time=('time', yearly_time),
year=('time', yearly_time))
ds['ts_section'] = xr.DataArray(s, dims=('time', 'x'),
coords=varcoords)
ds['ts_width_m'] = xr.DataArray(w, dims=('time', 'x'),
coords=varcoords)
ds['ts_calving_bucket_m3'] = xr.DataArray(b, dims=('time', ),
coords=varcoords)
if stop_criterion is not None:
# Remove probable NaNs
ds = ds.dropna('time')
geom_ds.append(ds)
# Add the spinup volume to the diag
if do_fixed_spinup:
# If there is calving we need to trick as well
if 'calving_m3' in diag_ds and np.any(diag_ds['calving_m3'] > 0):
raise NotImplementedError('Calving and fixed_geometry_spinup_yr '
'not implemented yet.')
diag_ds['volume_m3'].data[:] += spinup_vol
if stop_criterion is not None:
# Remove probable NaNs
diag_ds = diag_ds.dropna('time')
# write output?
if do_fl_diag:
# Unit conversions for these
for i, ds in enumerate(fl_diag_dss):
dx = ds.attrs['map_dx'] * ds.attrs['dx']
# No inplace because the other dataset uses them
# These variables are always there (see above)
ds['volume_m3'] = ds['volume_m3'] * dx
ds['area_m2'] = ds['area_m2'].where(ds['volume_m3'] > 0, 0) * dx
if stop_criterion is not None:
# Remove probable NaNs
fl_diag_dss[i] = ds.dropna('time')
# Write out?
if fl_diag_path not in [True, None]:
encode = {}
for v in fl_diag_dss[0]:
encode[v] = {'zlib': True, 'complevel': 5}
# Welcome ds
ds = xr.Dataset()
ds.attrs['description'] = ('OGGM model output on flowlines. '
'Check groups for data.')
ds.attrs['oggm_version'] = __version__
# This is useful to interpret the dataset afterwards
flows_to_id = []
for trib in self._tributary_indices:
flows_to_id.append(trib[0] if trib[0] is not None else -1)
ds['flowlines'] = ('flowlines', np.arange(len(flows_to_id)))
ds['flows_to_id'] = ('flowlines', flows_to_id)
ds.to_netcdf(fl_diag_path, 'w')
for i, ds in enumerate(fl_diag_dss):
ds.to_netcdf(fl_diag_path, 'a', group='fl_{}'.format(i),
encoding=encode)
if do_geom and geom_path not in [True, None]:
encode = {'ts_section': {'zlib': True, 'complevel': 5},
'ts_width_m': {'zlib': True, 'complevel': 5},
}
for i, ds in enumerate(geom_ds):
ds.to_netcdf(geom_path, 'a', group='fl_{}'.format(i),
encoding=encode)
# Add calving to geom file because the FileModel can't infer it
if 'calving_m3' in diag_ds:
diag_ds[['calving_m3']].to_netcdf(geom_path, 'a')
if diag_path not in [True, None]:
diag_ds.to_netcdf(diag_path)
# Decide on what to give back
out = [diag_ds]
if fl_diag_dss is not None:
out.append(fl_diag_dss)
if geom_ds is not None:
out.append(geom_ds)
if len(out) == 1:
out = out[0]
else:
out = tuple(out)
return out
def run_until_equilibrium(self, rate=0.001, ystep=5, max_ite=200):
""" Runs the model until an equilibrium state is reached.
Be careful: This only works for CONSTANT (not time-dependant)
mass-balance models.
Otherwise the returned state will not be in equilibrium! Don't try to
calculate an equilibrium state with a RandomMassBalance model!
"""
ite = 0
was_close_zero = 0
t_rate = 1
while (t_rate > rate) and (ite <= max_ite) and (was_close_zero < 5):
ite += 1
v_bef = self.volume_m3
self.run_until(self.yr + ystep)
v_af = self.volume_m3
if np.isclose(v_bef, 0., atol=1):
t_rate = 1
was_close_zero += 1
else:
t_rate = np.abs(v_af - v_bef) / v_bef
if ite > max_ite:
raise RuntimeError('Did not find equilibrium.')
def flux_gate_with_build_up(year, flux_value=None, flux_gate_yr=None):
"""Default scalar flux gate with build up period"""
fac = 1 - (flux_gate_yr - year) / flux_gate_yr
return flux_value * utils.clip_scalar(fac, 0, 1)
class FluxBasedModel(FlowlineModel):
"""The flowline model used by OGGM in production.
It solves for the SIA along the flowline(s) using a staggered grid. It
computes the *ice flux* between grid points and transports the mass
accordingly (also between flowlines).
This model is numerically less stable than fancier schemes, but it
is fast and works with multiple flowlines of any bed shape (rectangular,
parabolic, trapeze, and any combination of them).
We test that it conserves mass in most cases, but not on very stiff cliffs.
"""
def __init__(self, flowlines, mb_model=None, y0=0., glen_a=None,
fs=0., inplace=False, fixed_dt=None, cfl_number=None,
min_dt=None, flux_gate_thickness=None,
flux_gate=None, flux_gate_build_up=100,
do_kcalving=None, calving_k=None, calving_use_limiter=None,
calving_limiter_frac=None, water_level=None,
**kwargs):
"""Instantiate the model.
Parameters
----------
flowlines : list
the glacier flowlines
mb_model : MassBalanceModel
the mass-balance model
y0 : int
initial year of the simulation
glen_a : float
Glen's creep parameter
fs : float
Oerlemans sliding parameter
inplace : bool
whether or not to make a copy of the flowline objects for the run
setting to True implies that your objects will be modified at run
time by the model (can help to spare memory)
fixed_dt : float
set to a value (in seconds) to prevent adaptive time-stepping.
cfl_number : float
Defaults to cfg.PARAMS['cfl_number'].
For adaptive time stepping (the default), dt is chosen from the
CFL criterion (dt = cfl_number * dx / max_u).
To choose the "best" CFL number we would need a stability
analysis - we used an empirical analysis (see blog post) and
settled on 0.02 for the default cfg.PARAMS['cfl_number'].
min_dt : float
Defaults to cfg.PARAMS['cfl_min_dt'].
At high velocities, time steps can become very small and your
model might run very slowly. In production, it might be useful to
set a limit below which the model will just error.
is_tidewater: bool, default: False
is this a tidewater glacier?
is_lake_terminating: bool, default: False
is this a lake terminating glacier?
mb_elev_feedback : str, default: 'annual'
'never', 'always', 'annual', or 'monthly': how often the
mass-balance should be recomputed from the mass balance model.
'Never' is equivalent to 'annual' but without elevation feedback
at all (the heights are taken from the first call).
check_for_boundaries: bool, default: True
raise an error when the glacier grows bigger than the domain
boundaries
flux_gate_thickness : float or array
flux of ice from the left domain boundary (and tributaries).
Units of m of ice thickness. Note that unrealistic values won't be
met by the model, so this is really just a rough guidance.
It's better to use `flux_gate` instead.
flux_gate : float or function or array of floats or array of functions
flux of ice from the left domain boundary (and tributaries)
(unit: m3 of ice per second). If set to a high value, consider
changing the flux_gate_buildup time. You can also provide
a function (or an array of functions) returning the flux
(unit: m3 of ice per second) as a function of time.
This is overridden by `flux_gate_thickness` if provided.
flux_gate_buildup : int
number of years used to build up the flux gate to full value
do_kcalving : bool
switch on the k-calving parameterisation. Ignored if not a
tidewater glacier. Use the option from PARAMS per default
calving_k : float
the calving proportionality constant (units: yr-1). Use the
one from PARAMS per default
calving_use_limiter : bool
whether to switch on the calving limiter on the parameterisation
makes the calving fronts thicker but the model is more stable
calving_limiter_frac : float
limit the front slope to a fraction of the calving front.
"3" means 1/3. Setting it to 0 limits the slope to sea-level.
water_level : float
the water level. It should be zero m a.s.l, but:
- sometimes the frontal elevation is unrealistically high (or low).
- lake terminating glaciers
- other uncertainties
The default is 0. For lake terminating glaciers,
it is inferred from PARAMS['free_board_lake_terminating'].
The best way to set the water level for real glaciers is to use
the same as used for the inversion (this is what
`flowline_model_run` does for you)
"""
super(FluxBasedModel, self).__init__(flowlines, mb_model=mb_model,
y0=y0, glen_a=glen_a, fs=fs,
inplace=inplace,
water_level=water_level,
**kwargs)
self.fixed_dt = fixed_dt
if min_dt is None:
min_dt = cfg.PARAMS['cfl_min_dt']
if cfl_number is None:
cfl_number = cfg.PARAMS['cfl_number']
self.min_dt = min_dt
self.cfl_number = cfl_number
# Do we want to use shape factors?
self.sf_func = None
use_sf = cfg.PARAMS.get('use_shape_factor_for_fluxbasedmodel')
if use_sf == 'Adhikari' or use_sf == 'Nye':
self.sf_func = utils.shape_factor_adhikari
elif use_sf == 'Huss':
self.sf_func = utils.shape_factor_huss
# Calving params
if do_kcalving is None:
do_kcalving = cfg.PARAMS['use_kcalving_for_run']
self.do_calving = do_kcalving and self.is_tidewater
if calving_k is None:
calving_k = cfg.PARAMS['calving_k']
self.calving_k = calving_k / cfg.SEC_IN_YEAR
if calving_use_limiter is None:
calving_use_limiter = cfg.PARAMS['calving_use_limiter']
self.calving_use_limiter = calving_use_limiter
if calving_limiter_frac is None:
calving_limiter_frac = cfg.PARAMS['calving_limiter_frac']
if calving_limiter_frac > 0:
raise NotImplementedError('calving limiter other than 0 not '
'implemented yet')
self.calving_limiter_frac = calving_limiter_frac
# Flux gate
self.flux_gate = utils.tolist(flux_gate, length=len(self.fls))
self.flux_gate_m3_since_y0 = 0.
if flux_gate_thickness is not None:
# Compute the theoretical ice flux from the slope at the top
flux_gate_thickness = utils.tolist(flux_gate_thickness,
length=len(self.fls))
self.flux_gate = []
for fl, fgt in zip(self.fls, flux_gate_thickness):
# We set the thickness to the desired value so that
# the widths work ok
fl = copy.deepcopy(fl)
fl.thick = fl.thick * 0 + fgt
slope = (fl.surface_h[0] - fl.surface_h[1]) / fl.dx_meter
if slope == 0:
raise ValueError('I need a slope to compute the flux')
flux = find_sia_flux_from_thickness(slope,
fl.widths_m[0],
fgt,
shape=fl.shape_str[0],
glen_a=self.glen_a,
fs=self.fs)
self.flux_gate.append(flux)
# convert the floats to function calls
for i, fg in enumerate(self.flux_gate):
if fg is None:
continue
try:
# Do we have a function? If yes all good
fg(self.yr)
except TypeError:
# If not, make one
self.flux_gate[i] = partial(flux_gate_with_build_up,
flux_value=fg,
flux_gate_yr=(flux_gate_build_up +
self.y0))
# Special output
self._surf_vel_fac = (self.glen_n + 2) / (self.glen_n + 1)
# Optim
self.slope_stag = []
self.thick_stag = []
self.section_stag = []
self.u_stag = []
self.shapefac_stag = []
self.flux_stag = []
self.trib_flux = []
for fl, trib in zip(self.fls, self._tributary_indices):
nx = fl.nx
# This is not staggered
self.trib_flux.append(np.zeros(nx))
# We add an additional fake grid point at the end of tributaries
if trib[0] is not None:
nx = fl.nx + 1
# +1 is for the staggered grid
self.slope_stag.append(np.zeros(nx+1))
self.thick_stag.append(np.zeros(nx+1))
self.section_stag.append(np.zeros(nx+1))
self.u_stag.append(np.zeros(nx+1))
self.shapefac_stag.append(np.ones(nx+1)) # beware the ones!
self.flux_stag.append(np.zeros(nx+1))
def step(self, dt):
"""Advance one step."""
# Just a check to avoid useless computations
if dt <= 0:
raise InvalidParamsError('dt needs to be strictly positive')
# Simple container
mbs = []
# Loop over tributaries to determine the flux rate
for fl_id, fl in enumerate(self.fls):
# This is possibly less efficient than zip() but much clearer
trib = self._tributary_indices[fl_id]
slope_stag = self.slope_stag[fl_id]
thick_stag = self.thick_stag[fl_id]
section_stag = self.section_stag[fl_id]
sf_stag = self.shapefac_stag[fl_id]
flux_stag = self.flux_stag[fl_id]
trib_flux = self.trib_flux[fl_id]
u_stag = self.u_stag[fl_id]
flux_gate = self.flux_gate[fl_id]
# Flowline state
surface_h = fl.surface_h
thick = fl.thick
section = fl.section
dx = fl.dx_meter
# If it is a tributary, we use the branch it flows into to compute
# the slope of the last grid point
is_trib = trib[0] is not None
if is_trib:
fl_to = self.fls[trib[0]]
ide = fl.flows_to_indice
surface_h = np.append(surface_h, fl_to.surface_h[ide])
thick = np.append(thick, thick[-1])
section = np.append(section, section[-1])
elif self.do_calving and self.calving_use_limiter:
# We lower the max possible ice deformation
# by clipping the surface slope here. It is completely
# arbitrary but reduces ice deformation at the calving front.
# I think that in essence, it is also partly
# a "calving process", because this ice deformation must
# be less at the calving front. The result is that calving
# front "free boards" are quite high.
# Note that 0 is arbitrary, it could be any value below SL
surface_h = utils.clip_min(surface_h, self.water_level)
# Staggered gradient
slope_stag[0] = 0
slope_stag[1:-1] = (surface_h[0:-1] - surface_h[1:]) / dx
slope_stag[-1] = slope_stag[-2]
# Staggered thick
thick_stag[1:-1] = (thick[0:-1] + thick[1:]) / 2.
thick_stag[[0, -1]] = thick[[0, -1]]
if self.sf_func is not None:
# TODO: maybe compute new shape factors only every year?
sf = self.sf_func(fl.widths_m, fl.thick, fl.is_rectangular)
if is_trib:
# for inflowing tributary, the sf makes no sense
sf = np.append(sf, 1.)
sf_stag[1:-1] = (sf[0:-1] + sf[1:]) / 2.
sf_stag[[0, -1]] = sf[[0, -1]]
# Staggered velocity (Deformation + Sliding)
# _fd = 2/(N+2) * self.glen_a
N = self.glen_n
rhogh = (self.rho*G*slope_stag)**N
u_stag[:] = (thick_stag**(N+1)) * self._fd * rhogh * sf_stag**N + \
(thick_stag**(N-1)) * self.fs * rhogh
# Staggered section
section_stag[1:-1] = (section[0:-1] + section[1:]) / 2.
section_stag[[0, -1]] = section[[0, -1]]
# Staggered flux rate
flux_stag[:] = u_stag * section_stag
# Add boundary condition
if flux_gate is not None:
flux_stag[0] = flux_gate(self.yr)
# CFL condition
if not self.fixed_dt:
maxu = np.max(np.abs(u_stag))
if maxu > cfg.FLOAT_EPS:
cfl_dt = self.cfl_number * dx / maxu
else:
cfl_dt = dt
# Update dt only if necessary
if cfl_dt < dt:
dt = cfl_dt
if cfl_dt < self.min_dt:
raise RuntimeError(
'CFL error: required time step smaller '
'than the minimum allowed: '
'{:.1f}s vs {:.1f}s. Happening at '
'simulation year {:.1f}, fl_id {}, '
'bin_id {} and max_u {:.3f} m yr-1.'
''.format(cfl_dt, self.min_dt, self.yr, fl_id,
np.argmax(np.abs(u_stag)),
maxu * cfg.SEC_IN_YEAR))
# Since we are in this loop, reset the tributary flux
trib_flux[:] = 0
# We compute MB in this loop, before mass-redistribution occurs,
# so that MB models which rely on glacier geometry to decide things
# (like PyGEM) can do wo with a clean glacier state
mbs.append(self.get_mb(fl.surface_h, self.yr,
fl_id=fl_id, fls=self.fls))
# Time step
if self.fixed_dt:
# change only if step dt is larger than the chosen dt
if self.fixed_dt < dt:
dt = self.fixed_dt
# A second loop for the mass exchange
for fl_id, fl in enumerate(self.fls):
flx_stag = self.flux_stag[fl_id]
trib_flux = self.trib_flux[fl_id]
tr = self._tributary_indices[fl_id]
dx = fl.dx_meter
is_trib = tr[0] is not None
# For these we had an additional grid point
if is_trib:
flx_stag = flx_stag[:-1]
# Mass-balance
widths = fl.widths_m
mb = mbs[fl_id]
# Allow parabolic beds to grow
mb = dt * mb * np.where((mb > 0.) & (widths == 0), 10., widths)
# Update section with ice flow and mass balance
new_section = (fl.section + (flx_stag[0:-1] - flx_stag[1:])*dt/dx +
trib_flux*dt/dx + mb)
# Keep positive values only and store
fl.section = utils.clip_min(new_section, 0)
# If we use a flux-gate, store the total volume that came in
self.flux_gate_m3_since_y0 += flx_stag[0] * dt
# Add the last flux to the tributary
# this works because the lines are sorted in order
if is_trib:
# tr tuple: line_index, start, stop, gaussian_kernel
self.trib_flux[tr[0]][tr[1]:tr[2]] += \
utils.clip_min(flx_stag[-1], 0) * tr[3]
# --- The rest is for calving only ---
self.calving_rate_myr = 0.
# If tributary, do calving only if we are not transferring mass
if is_trib and flx_stag[-1] > 0:
continue
# No need to do calving in these cases either
if not self.do_calving or not fl.has_ice():
continue
# We do calving only if the last glacier bed pixel is below water
# (this is to avoid calving elsewhere than at the front)
if fl.bed_h[fl.thick > 0][-1] > self.water_level:
continue
# We do calving only if there is some ice above wl
last_above_wl = np.nonzero((fl.surface_h > self.water_level) &
(fl.thick > 0))[0][-1]
if fl.bed_h[last_above_wl] > self.water_level:
continue
# OK, we're really calving
section = fl.section
# Calving law
h = fl.thick[last_above_wl]
d = h - (fl.surface_h[last_above_wl] - self.water_level)
k = self.calving_k
q_calving = k * d * h * fl.widths_m[last_above_wl]
# Add to the bucket and the diagnostics
fl.calving_bucket_m3 += q_calving * dt
self.calving_m3_since_y0 += q_calving * dt
self.calving_rate_myr = (q_calving / section[last_above_wl] *
cfg.SEC_IN_YEAR)
# See if we have ice below sea-water to clean out first
below_sl = (fl.surface_h < self.water_level) & (fl.thick > 0)
to_remove = np.sum(section[below_sl]) * fl.dx_meter
if 0 < to_remove < fl.calving_bucket_m3:
# This is easy, we remove everything
section[below_sl] = 0
fl.calving_bucket_m3 -= to_remove
elif to_remove > 0:
# We can only remove part of if
section[below_sl] = 0
section[last_above_wl+1] = ((to_remove - fl.calving_bucket_m3)
/ fl.dx_meter)
fl.calving_bucket_m3 = 0
# The rest of the bucket might calve an entire grid point (or more?)
vol_last = section[last_above_wl] * fl.dx_meter
while fl.calving_bucket_m3 > vol_last:
fl.calving_bucket_m3 -= vol_last
section[last_above_wl] = 0
# OK check if we need to continue (unlikely)
last_above_wl -= 1
vol_last = section[last_above_wl] * fl.dx_meter
# We update the glacier with our changes
fl.section = section
# Next step
self.t += dt
return dt
def get_diagnostics(self, fl_id=-1):
"""Obtain model diagnostics in a pandas DataFrame.
Velocities in OGGM's FluxBasedModel are sometimes subject to
numerical instabilities. To deal with the issue, you can either
set a smaller ``PARAMS['cfl_number']`` (e.g. 0.01) or smooth the
output a bit, e.g. with ``df.rolling(5, center=True, min_periods=1).mean()``
Parameters
----------
fl_id : int
the index of the flowline of interest, from 0 to n_flowline-1.
Default is to take the last (main) one
Returns
-------
a pandas DataFrame, which index is distance along flowline (m). Units:
- surface_h, bed_h, ice_tick, section_width: m
- section_area: m2
- slope: -
- ice_flux, tributary_flux: m3 of *ice* per second
- ice_velocity: m per second (depth-section integrated)
- surface_ice_velocity: m per second (corrected for surface - simplifed)
"""
import pandas as pd
fl = self.fls[fl_id]
nx = fl.nx
df = pd.DataFrame(index=fl.dx_meter * np.arange(nx))
df.index.name = 'distance_along_flowline'
df['surface_h'] = fl.surface_h
df['bed_h'] = fl.bed_h
df['ice_thick'] = fl.thick
df['section_width'] = fl.widths_m
df['section_area'] = fl.section
# Staggered
var = self.slope_stag[fl_id]
df['slope'] = (var[1:nx+1] + var[:nx])/2
var = self.flux_stag[fl_id]
df['ice_flux'] = (var[1:nx+1] + var[:nx])/2
var = self.u_stag[fl_id]
df['ice_velocity'] = (var[1:nx+1] + var[:nx])/2
df['surface_ice_velocity'] = df['ice_velocity'] * self._surf_vel_fac
var = self.shapefac_stag[fl_id]
df['shape_fac'] = (var[1:nx+1] + var[:nx])/2
# Not Staggered
df['tributary_flux'] = self.trib_flux[fl_id]
return df
class MassConservationChecker(FluxBasedModel):
"""This checks if the FluxBasedModel is conserving mass."""
def __init__(self, flowlines, **kwargs):
""" Instantiate.
Parameters
----------
"""
super(MassConservationChecker, self).__init__(flowlines, **kwargs)
self.total_mass = 0.
def step(self, dt):
mbs = []
sections = []
for fl in self.fls:
# Mass balance
widths = fl.widths_m
mb = self.get_mb(fl.surface_h, self.yr, fl_id=id(fl))
mbs.append(mb * widths)
sections.append(np.copy(fl.section))
dx = fl.dx_meter
dt = super(MassConservationChecker, self).step(dt)
for mb, sec in zip(mbs, sections):
mb = dt * mb
# there can't be more negative mb than there is section
# this isn't an exact solution unfortunately
# TODO: exact solution for mass conservation
mb = utils.clip_min(mb, -sec)
self.total_mass += np.sum(mb * dx)
class KarthausModel(FlowlineModel):
"""The actual model"""
def __init__(self, flowlines, mb_model=None, y0=0., glen_a=None, fs=0.,
fixed_dt=None, min_dt=SEC_IN_DAY, max_dt=31*SEC_IN_DAY,
inplace=False, **kwargs):
""" Instantiate.
Parameters
----------
"""
if len(flowlines) > 1:
raise ValueError('Karthaus model does not work with tributaries.')
super(KarthausModel, self).__init__(flowlines, mb_model=mb_model,
y0=y0, glen_a=glen_a, fs=fs,
inplace=inplace, **kwargs)
self.dt_warning = False,
if fixed_dt is not None:
min_dt = fixed_dt
max_dt = fixed_dt
self.min_dt = min_dt
self.max_dt = max_dt
def step(self, dt):
"""Advance one step."""
# Just a check to avoid useless computations
if dt <= 0:
raise InvalidParamsError('dt needs to be strictly positive')
# This is to guarantee a precise arrival on a specific date if asked
min_dt = dt if dt < self.min_dt else self.min_dt
dt = utils.clip_scalar(dt, min_dt, self.max_dt)
fl = self.fls[0]
dx = fl.dx_meter
width = fl.widths_m
thick = fl.thick
MassBalance = self.get_mb(fl.surface_h, self.yr, fl_id=id(fl))
SurfaceHeight = fl.surface_h
# Surface gradient
SurfaceGradient = np.zeros(fl.nx)
SurfaceGradient[1:fl.nx-1] = (SurfaceHeight[2:] -
SurfaceHeight[:fl.nx-2])/(2*dx)
SurfaceGradient[-1] = 0
SurfaceGradient[0] = 0
# Diffusivity
N = self.glen_n
Diffusivity = width * (self.rho*G)**3 * thick**3 * SurfaceGradient**2
Diffusivity *= 2/(N+2) * self.glen_a * thick**2 + self.fs
# on stagger
DiffusivityStaggered = np.zeros(fl.nx)
SurfaceGradientStaggered = np.zeros(fl.nx)
DiffusivityStaggered[1:] = (Diffusivity[:fl.nx-1] + Diffusivity[1:])/2.
DiffusivityStaggered[0] = Diffusivity[0]
SurfaceGradientStaggered[1:] = (SurfaceHeight[1:] -
SurfaceHeight[:fl.nx-1])/dx
SurfaceGradientStaggered[0] = 0
GradxDiff = SurfaceGradientStaggered * DiffusivityStaggered
# Yo
NewIceThickness = np.zeros(fl.nx)
NewIceThickness[:fl.nx-1] = (thick[:fl.nx-1] + (dt/width[0:fl.nx-1]) *
(GradxDiff[1:]-GradxDiff[:fl.nx-1])/dx +
dt * MassBalance[:fl.nx-1])
NewIceThickness[-1] = thick[fl.nx-2]
fl.thick = utils.clip_min(NewIceThickness, 0)
# Next step
self.t += dt
return dt
class FileModel(object):
"""Duck FlowlineModel which actually reads data out of a nc file."""
def __init__(self, path):
""" Instantiate.
Parameters
----------
"""
self.fls = glacier_from_netcdf(path)
fl_tss = []
for flid, fl in enumerate(self.fls):
with xr.open_dataset(path, group='fl_{}'.format(flid)) as ds:
if flid == 0:
# Populate time
self.time = ds.time.values
try:
self.years = ds.year.values
except AttributeError:
raise InvalidWorkflowError('The provided model output '
'file is incomplete (likely '
'when the previous '
'run failed) or corrupt.')
try:
self.months = ds.month.values
except AttributeError:
self.months = self.years * 0 + 1
# Read out the data
fl_data = {
'ts_section': ds.ts_section.values,
'ts_width_m': ds.ts_width_m.values,
}
try:
fl_data['ts_calving_bucket_m3'] = ds.ts_calving_bucket_m3.values
except AttributeError:
fl_data['ts_calving_bucket_m3'] = self.years * 0
fl_tss.append(fl_data)
self.fl_tss = fl_tss
self.last_yr = float(ds.time[-1])
# Calving diags
try:
with xr.open_dataset(path) as ds:
self._calving_m3_since_y0 = ds.calving_m3.values
self.do_calving = True
except AttributeError:
self._calving_m3_since_y0 = 0
self.do_calving = False
# time
self.reset_y0()
def __enter__(self):
warnings.warn('FileModel no longer needs to be run as a '
'context manager. You can safely remove the '
'`with` statement.', FutureWarning)
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def reset_y0(self, y0=None):
"""Reset the initial model time"""
if y0 is None:
y0 = float(self.time[0])
self.y0 = y0
self.yr = y0
self._current_index = 0
@property
def area_m2(self):
return np.sum([f.area_m2 for f in self.fls])
@property
def volume_m3(self):
return np.sum([f.volume_m3 for f in self.fls])
@property
def volume_km3(self):
return self.volume_m3 * 1e-9
@property
def area_km2(self):
return self.area_m2 * 1e-6
@property
def length_m(self):
return self.fls[-1].length_m
@property
def calving_m3_since_y0(self):
if self.do_calving:
return self._calving_m3_since_y0[self._current_index]
else:
return 0
def run_until(self, year=None, month=None):
"""Mimics the model's behavior.
Is quite slow tbh.
"""
try:
if month is not None:
pok = np.nonzero((self.years == year) & (self.months == month))[0][0]
else:
pok = np.nonzero(self.time == year)[0][0]
except IndexError as err:
raise IndexError('Index year={}, month={} not available in '
'FileModel.'.format(year, month)) from err
self.yr = self.time[pok]
self._current_index = pok
for fl, fl_ts in zip(self.fls, self.fl_tss):
fl.section = fl_ts['ts_section'][pok, :]
fl.calving_bucket_m3 = fl_ts['ts_calving_bucket_m3'][pok]
def area_m2_ts(self, rollmin=0):
"""rollmin is the number of years you want to smooth onto"""
sel = 0
for fl, fl_ts in zip(self.fls, self.fl_tss):
widths = np.where(fl_ts['ts_section'] > 0., fl_ts['ts_width_m'], 0.)
sel += widths.sum(axis=1) * fl.dx_meter
sel = pd.Series(data=sel, index=self.time, name='area_m2')
if rollmin != 0:
sel = sel.rolling(rollmin).min()
sel.iloc[0:rollmin] = sel.iloc[rollmin]
return sel
def area_km2_ts(self, **kwargs):
return self.area_m2_ts(**kwargs) * 1e-6
def volume_m3_ts(self):
sel = 0
for fl, fl_ts in zip(self.fls, self.fl_tss):
sel += fl_ts['ts_section'].sum(axis=1) * fl.dx_meter
sel -= fl_ts['ts_calving_bucket_m3']
return pd.Series(data=sel, index=self.time, name='volume_m3')
def volume_km3_ts(self):
return self.volume_m3_ts() * 1e-9
def length_m_ts(self, rollmin=0):
raise NotImplementedError('length_m_ts is no longer available in the '
'full output files. To obtain the length '
'time series, refer to the diagnostic '
'output file.')
class MassRedistributionCurveModel(FlowlineModel):
"""Glacier geometry updated using mass redistribution curves.
Also known as the "delta-h method": This uses mass redistribution curves
from Huss et al. (2010) to update the glacier geometry.
Code by David Rounce (PyGEM) and adapted by F. Maussion.
"""
def __init__(self, flowlines, mb_model=None, y0=0.,
is_tidewater=False, water_level=None,
do_kcalving=None, calving_k=None,
advance_method=1,
**kwargs):
""" Instantiate the model.
Parameters
----------
flowlines : list
the glacier flowlines
mb_model : MassBalanceModel
the mass-balance model
y0 : int
initial year of the simulation
advance_method : int
different ways to handle positive MBs:
- 0: do nothing, i.e. simply let the glacier thicken instead of
thinning
- 1: add some of the mass at the end of the glacier
- 2: add some of the mass at the end of the glacier, but
differently
"""
super(MassRedistributionCurveModel, self).__init__(flowlines,
mb_model=mb_model,
y0=y0,
water_level=water_level,
mb_elev_feedback='annual',
required_model_steps='annual',
**kwargs)
if len(self.fls) > 1:
raise InvalidWorkflowError('MassRedistributionCurveModel is not '
'set up for multiple flowlines')
fl = self.fls[0]
self.glac_idx_initial = fl.thick.nonzero()[0]
self.y0 = y0
# Some ways to deal with positive MB
self.advance_method = advance_method
# Frontal ablation shenanigans
if do_kcalving is None:
do_kcalving = cfg.PARAMS['use_kcalving_for_run']
self.do_calving = do_kcalving and self.is_tidewater
if calving_k is None:
calving_k = cfg.PARAMS['calving_k']
self.is_tidewater = is_tidewater
self.calving_k = calving_k
self.calving_m3_since_y0 = 0. # total calving since time y0
def step(self, dt):
"""Advance one step. Here it should be one year"""
# Just a check to avoid useless computations
if dt <= 0:
raise InvalidParamsError('dt needs to be strictly positive')
if dt > cfg.SEC_IN_YEAR:
# This should not happen from how run_until is built, but
# to match the adaptive time stepping scheme of other models
# we don't complain here and just do one year
dt = cfg.SEC_IN_YEAR
elif dt < cfg.SEC_IN_YEAR:
# Here however we complain - we really want one year exactly
raise InvalidWorkflowError('I was asked to run for less than one '
'year. Delta-H models can\'t do that.')
# Flowline state
fl = self.fls[0]
fl_id = 0
height = fl.surface_h.copy()
section = fl.section.copy()
thick = fl.thick.copy()
width = fl.widths_m.copy()
# FRONTAL ABLATION
if self.do_calving:
raise NotImplementedError('Frontal ablation not there yet.')
# Redistribute mass if glacier is still there
if not np.any(section > 0):
# Do nothing
self.t += dt
return dt
# Mass redistribution according to Huss empirical curves
# Annual glacier mass balance [m ice s-1]
mb = self.get_mb(height, year=self.yr, fls=self.fls, fl_id=fl_id)
# [m ice yr-1]
mb *= cfg.SEC_IN_YEAR
# Ok now to the bulk of it
# Mass redistribution according to empirical equations from
# Huss and Hock (2015) accounting for retreat/advance.
# glac_idx_initial is required to ensure that the glacier does not
# advance to area where glacier did not exist before
# (e.g., retreat and advance over a vertical cliff)
# Note: since OGGM uses the DEM, heights along the flowline do not
# necessarily decrease, i.e., there can be overdeepenings along the
# flowlines that occur as the glacier retreats. This is problematic
# for 'adding' a bin downstream in cases of glacier advance because
# you'd be moving new ice to a higher elevation. To avoid this
# unrealistic case, in the event that this would occur, the
# overdeepening will simply fill up with ice first until it reaches
# an elevation where it would put new ice into a downstream bin.
# Bin area [m2]
bin_area = width * fl.dx_meter
bin_area[thick == 0] = 0
# Annual glacier-wide volume change [m3]
# units: m3 ice per year
glacier_delta_v = (mb * bin_area).sum()
# For hindcast simulations, volume change is the opposite
# We don't implement this in OGGM right now
# If volume loss is more than the glacier volume, melt everything and
# stop here. Otherwise, redistribute mass loss/gains across the glacier
glacier_volume_total = (section * fl.dx_meter).sum()
if (glacier_volume_total + glacier_delta_v) < 0:
# Set all to zero and return
fl.section *= 0
return
# Determine where glacier exists
glac_idx = thick.nonzero()[0]
# Compute bin volume change [m3 ice yr-1] after redistribution
bin_delta_v = mass_redistribution_curve_huss(height, bin_area, mb,
glac_idx, glacier_delta_v)
# Here per construction bin_delta_v should be approx equal to glacier_delta_v
np.testing.assert_allclose(bin_delta_v.sum(), glacier_delta_v)
# Update cross sectional area
# relevant issue: https://github.com/OGGM/oggm/issues/941
# volume change divided by length (dx); units m2
delta_s = bin_delta_v / fl.dx_meter
fl.section = utils.clip_min(section + delta_s, 0)
if not np.any(delta_s > 0):
# We shrink - all good
fl.section = utils.clip_min(section + delta_s, 0)
# Done
self.t += dt
return dt
# We grow - that's bad because growing is hard
# First see what happens with thickness
# (per construction the redistribution curves are all positive btw)
fl.section = utils.clip_min(section + delta_s, 0)
# We decide if we really want to advance or if we don't care
# Matthias and Dave use 5m, I find that too much, because not
# redistributing may lead to runaway effects
dh_thres = 1 # in meters
if np.all((fl.thick - thick) <= dh_thres):
# That was not much increase return
self.t += dt
return dt
# Ok, we really grow then - back to previous state and decide on what to do
fl.section = section
if (fl.thick[-2] > 0) or (self.advance_method == 0):
# Do not advance (same as in the melting case but thickening)
fl.section = utils.clip_min(section + delta_s, 0)
if self.advance_method == 1:
# Just shift the redistribution by one pix
new_delta_s = np.append([0], delta_s)[:-1]
# Redis param - how much of mass we want to shift (0 - 1)
# 0 would be like method 0 (do nothing)
# 1 would be to shift all mass by 1 pixel
a = 0.2
new_delta_s = a * new_delta_s + (1 - a) * delta_s
# Make sure we are still preserving mass
new_delta_s *= delta_s.sum() / new_delta_s.sum()
# Update section
fl.section = utils.clip_min(section + new_delta_s, 0)
elif self.advance_method == 2:
# How much of what's positive do we want to add in front
redis_perc = 0.01 # in %
# Decide on volume that needs redistributed
section_redis = delta_s * redis_perc
# The rest is added where it was
fl.section = utils.clip_min(section + delta_s - section_redis, 0)
# Then lets put this "volume" where we can
section_redis = section_redis.sum()
while section_redis > 0:
# Get the terminus grid
orig_section = fl.section.copy()
p_term = np.nonzero(fl.thick > 0)[0][-1]
# Put ice on the next bin, until ice is as high as terminus
# Anything else would require slope assumptions, but it would
# be possible
new_thick = fl.surface_h[p_term] - fl.bed_h[p_term + 1]
if new_thick > 0:
# No deepening
fl.thick[p_term + 1] = new_thick
new_section = fl.section[p_term + 1]
if new_section > section_redis:
# This would be too much, just add what we have
orig_section[p_term + 1] = section_redis
fl.section = orig_section
section_redis = 0
else:
# OK this bin is done, continue
section_redis -= new_section
else:
# We have a deepening, or we have to climb
target_h = fl.bed_h[p_term + 1] + 1
to_fill = (fl.surface_h < target_h) & (fl.thick > 0)
# Theoretical section area needed
fl.thick[to_fill] = target_h - fl.bed_h[to_fill]
new_section = fl.section.sum() - orig_section.sum()
if new_section > section_redis:
# This would be too much, just add what we have
orig_section[to_fill] += new_section / np.sum(to_fill)
fl.section = orig_section
section_redis = 0
else:
# OK this bin is done, continue
section_redis -= new_section
elif self.advance_method == 3:
# A bit closer to Huss and Rounce maybe?
raise RuntimeError('not yet')
# Done
self.t += dt
return dt
def mass_redistribution_curve_huss(height, bin_area, mb, glac_idx, glacier_delta_v):
"""Apply the mass redistribution curves from Huss and Hock (2015).
This has to be followed by added logic which takes into consideration
retreat and advance.
Parameters
----------
height : np.ndarray
Glacier elevation [m] from previous year for each elevation bin
bin_area : np.ndarray
Glacier area [m2] from previous year for each elevation bin
mb : np.ndarray
Annual climatic mass balance [m ice yr-1] for each elevation bin for
a single year
glac_idx : np.ndarray
glacier indices for present timestep
glacier_delta_v : float
glacier-wide volume change [m3 ice yr-1] based on the annual mb
Returns
-------
bin_volume_change : np.ndarray
Ice volume change [m3 yr-1] for each elevation bin
"""
# Apply mass redistribution curve
if glac_idx.shape[0] <= 3:
# No need for a curve when glacier is so small
return mb * bin_area
# Select the parameters based on glacier area
if bin_area.sum() * 1e-6 > 20:
gamma, a, b, c = (6, -0.02, 0.12, 0)
elif bin_area.sum() * 1e-6 > 5:
gamma, a, b, c = (4, -0.05, 0.19, 0.01)
else:
gamma, a, b, c = (2, -0.30, 0.60, 0.09)
# reset variables
delta_h_norm = bin_area * 0
# Normalized elevation range [-]
# (max elevation - bin elevation) / (max_elevation - min_elevation)
gla_height = height[glac_idx]
max_elev, min_elev = gla_height.max(), gla_height.min()
h_norm = (max_elev - gla_height) / (max_elev - min_elev)
# using indices as opposed to elevations automatically skips bins on
# the glacier that have no area such that the normalization is done
# only on bins where the glacier lies
# Normalized ice thickness change [-]
delta_h_norm[glac_idx] = (h_norm + a) ** gamma + b * (h_norm + a) + c
# delta_h = (h_n + a)**gamma + b*(h_n + a) + c
# limit normalized ice thickness change to between 0 - 1
delta_h_norm = utils.clip_array(delta_h_norm, 0, 1)
# Huss' ice thickness scaling factor, fs_huss [m ice]
# units: m3 / (m2 * [-]) * (1000 m / 1 km) = m ice
fs_huss = glacier_delta_v / (bin_area * delta_h_norm).sum()
# Volume change [m3 ice yr-1]
return delta_h_norm * fs_huss * bin_area
def flowline_from_dataset(ds):
"""Instantiates a flowline from an xarray Dataset."""
cl = globals()[ds.attrs['class']]
line = shpg.LineString(ds['linecoords'].values)
args = dict(line=line, dx=ds.dx, map_dx=ds.map_dx,
surface_h=ds['surface_h'].values,
bed_h=ds['bed_h'].values)
have = {'c', 'x', 'surface_h', 'linecoords', 'bed_h', 'z', 'p', 'n',
'time', 'month', 'year', 'ts_width_m', 'ts_section',
'ts_calving_bucket_m3'}
missing_vars = set(ds.variables.keys()).difference(have)
for k in missing_vars:
data = ds[k].values
if ds[k].dims[0] == 'z':
data = data[0]
args[k] = data
return cl(**args)
def glacier_from_netcdf(path):
"""Instantiates a list of flowlines from an xarray Dataset."""
with xr.open_dataset(path) as ds:
fls = []
for flid in ds['flowlines'].values:
with xr.open_dataset(path, group='fl_{}'.format(flid)) as _ds:
fls.append(flowline_from_dataset(_ds))
for i, fid in enumerate(ds['flows_to_id'].values):
if fid != -1:
fls[i].set_flows_to(fls[fid])
# Adds the line level
for fl in fls:
fl.order = line_order(fl)
return fls
def calving_glacier_downstream_line(line, n_points):
"""Extends a calving glacier flowline past the terminus."""
if line is None:
return None
x, y = line.coords.xy
dx = x[-1] - x[-2]
dy = y[-1] - y[-2]
x = np.append(x, x[-1] + dx * np.arange(1, n_points+1))
y = np.append(y, y[-1] + dy * np.arange(1, n_points+1))
return shpg.LineString(np.array([x, y]).T)
def old_init_present_time_glacier(gdir):
"""Init_present_time_glacier when trapezoid inversion was not possible."""
# Some vars
map_dx = gdir.grid.dx
def_lambda = cfg.PARAMS['trapezoid_lambdas']
min_shape = cfg.PARAMS['mixed_min_shape']
cls = gdir.read_pickle('inversion_flowlines')
invs = gdir.read_pickle('inversion_output')
# Fill the tributaries
new_fls = []
flows_to_ids = []
for cl, inv in zip(cls, invs):
# Get the data to make the model flowlines
line = cl.line
section = inv['volume'] / (cl.dx * map_dx)
surface_h = cl.surface_h
bed_h = surface_h - inv['thick']
widths_m = cl.widths * map_dx
assert np.all(widths_m > 0)
bed_shape = 4 * inv['thick'] / (cl.widths * map_dx) ** 2
lambdas = inv['thick'] * np.NaN
lambdas[bed_shape < min_shape] = def_lambda
lambdas[inv['is_rectangular']] = 0.
# Last pix of not tidewater are always parab (see below)
if not gdir.is_tidewater and inv['is_last']:
lambdas[-5:] = np.nan
# Update bed_h where we now have a trapeze
w0_m = cl.widths * map_dx - lambdas * inv['thick']
b = 2 * w0_m
a = 2 * lambdas
with np.errstate(divide='ignore', invalid='ignore'):
thick = (np.sqrt(b ** 2 + 4 * a * section) - b) / a
ptrap = (lambdas != 0) & np.isfinite(lambdas)
bed_h[ptrap] = cl.surface_h[ptrap] - thick[ptrap]
# For the very last pixs of a glacier, the section might be zero after
# the inversion, and the bedshapes are chaotic. We interpolate from
# the downstream. This is not volume conservative
if not gdir.is_tidewater and inv['is_last']:
dic_ds = gdir.read_pickle('downstream_line')
bed_shape[-5:] = np.nan
# Interpolate
bed_shape = utils.interp_nans(np.append(bed_shape,
dic_ds['bedshapes'][0]))
bed_shape = utils.clip_min(bed_shape[:-1], min_shape)
# Correct the section volume
h = inv['thick']
section[-5:] = (2 / 3 * h * np.sqrt(4 * h / bed_shape))[-5:]
# Add the downstream
bed_shape = np.append(bed_shape, dic_ds['bedshapes'])
lambdas = np.append(lambdas, dic_ds['bedshapes'] * np.NaN)
section = np.append(section, dic_ds['bedshapes'] * 0.)
surface_h = np.append(surface_h, dic_ds['surface_h'])
bed_h = np.append(bed_h, dic_ds['surface_h'])
widths_m = np.append(widths_m, dic_ds['bedshapes'] * 0.)
line = dic_ds['full_line']
if gdir.is_tidewater and inv['is_last']:
# Continue the bed a little
n_points = cfg.PARAMS['calving_line_extension']
cf_slope = cfg.PARAMS['calving_front_slope']
deepening = n_points * cl.dx * map_dx * cf_slope
line = calving_glacier_downstream_line(line, n_points=n_points)
bed_shape = np.append(bed_shape, np.zeros(n_points))
lambdas = np.append(lambdas, np.zeros(n_points))
section = np.append(section, np.zeros(n_points))
# The bed slowly deepens
bed_down = np.linspace(bed_h[-1], bed_h[-1]-deepening, n_points)
bed_h = np.append(bed_h, bed_down)
surface_h = np.append(surface_h, bed_down)
widths_m = np.append(widths_m,
np.zeros(n_points) + np.mean(widths_m[-5:]))
nfl = MixedBedFlowline(line=line, dx=cl.dx, map_dx=map_dx,
surface_h=surface_h, bed_h=bed_h,
section=section, bed_shape=bed_shape,
is_trapezoid=np.isfinite(lambdas),
lambdas=lambdas,
widths_m=widths_m,
rgi_id=cl.rgi_id)
# Update attrs
nfl.mu_star = cl.mu_star
if cl.flows_to:
flows_to_ids.append(cls.index(cl.flows_to))
else:
flows_to_ids.append(None)
new_fls.append(nfl)
# Finalize the linkages
for fl, fid in zip(new_fls, flows_to_ids):
if fid:
fl.set_flows_to(new_fls[fid])
# Adds the line level
for fl in new_fls:
fl.order = line_order(fl)
# Write the data
gdir.write_pickle(new_fls, 'model_flowlines')
@entity_task(log, writes=['model_flowlines'])
def init_present_time_glacier(gdir):
"""Merges data from preprocessing tasks. First task after inversion!
This updates the `mode_flowlines` file and creates a stand-alone numerical
glacier ready to run.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
"""
# Some vars
invs = gdir.read_pickle('inversion_output')
if invs[0].get('is_trapezoid', None) is None:
return old_init_present_time_glacier(gdir)
map_dx = gdir.grid.dx
def_lambda = cfg.PARAMS['trapezoid_lambdas']
cls = gdir.read_pickle('inversion_flowlines')
# Fill the tributaries
new_fls = []
flows_to_ids = []
for cl, inv in zip(cls, invs):
# Get the data to make the model flowlines
line = cl.line
section = inv['volume'] / (cl.dx * map_dx)
surface_h = cl.surface_h
bed_h = surface_h - inv['thick']
widths_m = cl.widths * map_dx
assert np.all(widths_m > 0)
bed_shape = 4 * inv['thick'] / (cl.widths * map_dx) ** 2
lambdas = inv['thick'] * np.NaN
lambdas[inv['is_trapezoid']] = def_lambda
lambdas[inv['is_rectangular']] = 0.
# Where the flux and the thickness is zero we just assume trapezoid:
lambdas[bed_shape == 0] = def_lambda
if not gdir.is_tidewater and inv['is_last']:
# for valley glaciers, simply add the downstream line
dic_ds = gdir.read_pickle('downstream_line')
bed_shape = np.append(bed_shape, dic_ds['bedshapes'])
lambdas = np.append(lambdas, dic_ds['bedshapes'] * np.NaN)
section = np.append(section, dic_ds['bedshapes'] * 0.)
surface_h = np.append(surface_h, dic_ds['surface_h'])
bed_h = np.append(bed_h, dic_ds['surface_h'])
widths_m = np.append(widths_m, dic_ds['bedshapes'] * 0.)
line = dic_ds['full_line']
if gdir.is_tidewater and inv['is_last']:
# Continue the bed a little
n_points = cfg.PARAMS['calving_line_extension']
cf_slope = cfg.PARAMS['calving_front_slope']
deepening = n_points * cl.dx * map_dx * cf_slope
line = calving_glacier_downstream_line(line, n_points=n_points)
bed_shape = np.append(bed_shape, np.zeros(n_points))
lambdas = np.append(lambdas, np.zeros(n_points))
section = np.append(section, np.zeros(n_points))
# The bed slowly deepens
bed_down = np.linspace(bed_h[-1], bed_h[-1]-deepening, n_points)
bed_h = np.append(bed_h, bed_down)
surface_h = np.append(surface_h, bed_down)
widths_m = np.append(widths_m,
np.zeros(n_points) + np.mean(widths_m[-5:]))
nfl = MixedBedFlowline(line=line, dx=cl.dx, map_dx=map_dx,
surface_h=surface_h, bed_h=bed_h,
section=section, bed_shape=bed_shape,
is_trapezoid=np.isfinite(lambdas),
lambdas=lambdas,
widths_m=widths_m,
rgi_id=cl.rgi_id,
gdir=gdir)
# Update attrs
nfl.mu_star = cl.mu_star
if cl.flows_to:
flows_to_ids.append(cls.index(cl.flows_to))
else:
flows_to_ids.append(None)
new_fls.append(nfl)
# Finalize the linkages
for fl, fid in zip(new_fls, flows_to_ids):
if fid:
fl.set_flows_to(new_fls[fid])
# Adds the line level
for fl in new_fls:
fl.order = line_order(fl)
# Write the data
gdir.write_pickle(new_fls, 'model_flowlines')
def robust_model_run(*args, **kwargs):
warnings.warn('The task `robust_model_run` is deprecated.', FutureWarning)
return flowline_model_run(*args, **kwargs)
@entity_task(log)
def flowline_model_run(gdir, output_filesuffix=None, mb_model=None,
ys=None, ye=None, zero_initial_glacier=False,
init_model_fls=None, store_monthly_step=False,
fixed_geometry_spinup_yr=None,
store_model_geometry=None,
store_fl_diagnostics=None,
water_level=None,
evolution_model=FluxBasedModel, stop_criterion=None,
init_model_filesuffix=None, init_model_yr=None,
**kwargs):
"""Runs a model simulation with the default time stepping scheme.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
output_filesuffix : str
this add a suffix to the output file (useful to avoid overwriting
previous experiments)
mb_model : :py:class:`core.MassBalanceModel`
a MassBalanceModel instance
ys : int
start year of the model run (default: from the config file)
ye : int
end year of the model run (default: from the config file)
zero_initial_glacier : bool
if true, the ice thickness is set to zero before the simulation
init_model_filesuffix : str
if you want to start from a previous model run state. Can be
combined with `init_model_yr`
init_model_yr : int
the year of the initial run you want to start from. The default
is to take the last year of the simulation.
init_model_fls : []
list of flowlines to use to initialise the model (the default is the
present_time_glacier file from the glacier directory)
store_monthly_step : bool
whether to store the diagnostic data at a monthly time step or not
(default is yearly)
store_model_geometry : bool
whether to store the full model geometry run file to disk or not.
(new in OGGM v1.4.1: default is to follow
cfg.PARAMS['store_model_geometry'])
store_fl_diagnostics : bool
whether to store the model flowline diagnostics to disk or not.
(default is to follow cfg.PARAMS['store_fl_diagnostics'])
evolution_model : :class:oggm.core.FlowlineModel
which evolution model to use. Default: FluxBasedModel
water_level : float
the water level. It should be zero m a.s.l, but:
- sometimes the frontal elevation is unrealistically high (or low).
- lake terminating glaciers
- other uncertainties
The default is to take the water level obtained from the ice
thickness inversion.
stop_criterion : func
a function which decides on when to stop the simulation. See
`run_until_and_store` documentation for more information.
kwargs : dict
kwargs to pass to the FluxBasedModel instance
fixed_geometry_spinup_yr : int
if set to an integer, the model will artificially prolongate
all outputs of run_until_and_store to encompass all time stamps
starting from the chosen year. The only output affected are the
glacier wide diagnostic files - all other outputs are set
to constants during "spinup"
"""
if init_model_filesuffix is not None:
fp = gdir.get_filepath('model_geometry',
filesuffix=init_model_filesuffix)
fmod = FileModel(fp)
if init_model_yr is None:
init_model_yr = fmod.last_yr
fmod.run_until(init_model_yr)
init_model_fls = fmod.fls
mb_elev_feedback = kwargs.get('mb_elev_feedback', 'annual')
if store_monthly_step and (mb_elev_feedback == 'annual'):
warnings.warn("The mass-balance used to drive the ice dynamics model "
"is updated yearly. If you want the output to be stored "
"monthly and also reflect monthly processes, "
"set store_monthly_step=True and "
"mb_elev_feedback='monthly'. This is not recommended "
"though: for monthly MB applications, we recommend to "
"use the `run_with_hydro` task.")
if cfg.PARAMS['use_inversion_params_for_run']:
diag = gdir.get_diagnostics()
fs = diag.get('inversion_fs', cfg.PARAMS['fs'])
glen_a = diag.get('inversion_glen_a', cfg.PARAMS['glen_a'])
else:
fs = cfg.PARAMS['fs']
glen_a = cfg.PARAMS['glen_a']
kwargs.setdefault('fs', fs)
kwargs.setdefault('glen_a', glen_a)
if store_model_geometry is None:
store_model_geometry = cfg.PARAMS['store_model_geometry']
if store_fl_diagnostics is None:
store_fl_diagnostics = cfg.PARAMS['store_fl_diagnostics']
if store_model_geometry:
geom_path = gdir.get_filepath('model_geometry',
filesuffix=output_filesuffix,
delete=True)
else:
geom_path = False
if store_fl_diagnostics:
fl_diag_path = gdir.get_filepath('fl_diagnostics',
filesuffix=output_filesuffix,
delete=True)
else:
fl_diag_path = False
diag_path = gdir.get_filepath('model_diagnostics',
filesuffix=output_filesuffix,
delete=True)
if init_model_fls is None:
fls = gdir.read_pickle('model_flowlines')
else:
fls = copy.deepcopy(init_model_fls)
if zero_initial_glacier:
for fl in fls:
fl.thick = fl.thick * 0.
if (cfg.PARAMS['use_kcalving_for_run'] and gdir.is_tidewater and
water_level is None):
# check for water level
water_level = gdir.get_diagnostics().get('calving_water_level', None)
if water_level is None:
raise InvalidWorkflowError('This tidewater glacier seems to not '
'have been inverted with the '
'`find_inversion_calving` task. Set '
"PARAMS['use_kcalving_for_run'] to "
'`False` or set `water_level` '
'to prevent this error.')
model = evolution_model(fls, mb_model=mb_model, y0=ys,
inplace=True,
is_tidewater=gdir.is_tidewater,
is_lake_terminating=gdir.is_lake_terminating,
water_level=water_level,
**kwargs)
with np.warnings.catch_warnings():
# For operational runs we ignore the warnings
np.warnings.filterwarnings('ignore', category=RuntimeWarning)
model.run_until_and_store(ye,
geom_path=geom_path,
diag_path=diag_path,
fl_diag_path=fl_diag_path,
store_monthly_step=store_monthly_step,
fixed_geometry_spinup_yr=fixed_geometry_spinup_yr,
stop_criterion=stop_criterion)
return model
@entity_task(log)
def run_random_climate(gdir, nyears=1000, y0=None, halfsize=15,
bias=None, seed=None, temperature_bias=None,
precipitation_factor=None,
store_monthly_step=False,
store_model_geometry=None,
store_fl_diagnostics=None,
climate_filename='climate_historical',
climate_input_filesuffix='',
output_filesuffix='', init_model_fls=None,
init_model_filesuffix=None,
init_model_yr=None,
zero_initial_glacier=False,
unique_samples=False, **kwargs):
"""Runs the random mass-balance model for a given number of years.
This will initialize a
:py:class:`oggm.core.massbalance.MultipleFlowlineMassBalance`,
and run a :py:func:`oggm.core.flowline.flowline_model_run`.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
nyears : int
length of the simulation
y0 : int, optional
central year of the random climate period. The default is to be
centred on t*.
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1)
bias : float
bias of the mb model. Default is to use the calibrated one, which
is often a better idea. For t* experiments it can be useful to set it
to zero
seed : int
seed for the random generator. If you ignore this, the runs will be
different each time. Setting it to a fixed seed across glaciers can
be useful if you want to have the same climate years for all of them
temperature_bias : float
add a bias to the temperature timeseries
precipitation_factor: float
multiply a factor to the precipitation time series
default is None and means that the precipitation factor from the
calibration is applied which is cfg.PARAMS['prcp_scaling_factor']
store_monthly_step : bool
whether to store the diagnostic data at a monthly time step or not
(default is yearly)
store_model_geometry : bool
whether to store the full model geometry run file to disk or not.
(new in OGGM v1.4.1: default is to follow
cfg.PARAMS['store_model_geometry'])
store_fl_diagnostics : bool
whether to store the model flowline diagnostics to disk or not.
(default is to follow cfg.PARAMS['store_fl_diagnostics'])
climate_filename : str
name of the climate file, e.g. 'climate_historical' (default) or
'gcm_data'
climate_input_filesuffix: str
filesuffix for the input climate file
output_filesuffix : str
this add a suffix to the output file (useful to avoid overwriting
previous experiments)
init_model_filesuffix : str
if you want to start from a previous model run state. Can be
combined with `init_model_yr`
init_model_yr : int
the year of the initial run you want to start from. The default
is to take the last year of the simulation.
init_model_fls : []
list of flowlines to use to initialise the model (the default is the
present_time_glacier file from the glacier directory)
zero_initial_glacier : bool
if true, the ice thickness is set to zero before the simulation
unique_samples: bool
if true, chosen random mass-balance years will only be available once
per random climate period-length
if false, every model year will be chosen from the random climate
period with the same probability
kwargs : dict
kwargs to pass to the FluxBasedModel instance
"""
mb = MultipleFlowlineMassBalance(gdir, mb_model_class=RandomMassBalance,
y0=y0, halfsize=halfsize,
bias=bias, seed=seed,
filename=climate_filename,
input_filesuffix=climate_input_filesuffix,
unique_samples=unique_samples)
if temperature_bias is not None:
mb.temp_bias = temperature_bias
if precipitation_factor is not None:
mb.prcp_fac = precipitation_factor
return flowline_model_run(gdir, output_filesuffix=output_filesuffix,
mb_model=mb, ys=0, ye=nyears,
store_monthly_step=store_monthly_step,
store_model_geometry=store_model_geometry,
store_fl_diagnostics=store_fl_diagnostics,
init_model_filesuffix=init_model_filesuffix,
init_model_yr=init_model_yr,
init_model_fls=init_model_fls,
zero_initial_glacier=zero_initial_glacier,
**kwargs)
@entity_task(log)
def run_constant_climate(gdir, nyears=1000, y0=None, halfsize=15,
bias=None, temperature_bias=None,
precipitation_factor=None,
store_monthly_step=False,
store_model_geometry=None,
store_fl_diagnostics=None,
init_model_filesuffix=None,
init_model_yr=None,
output_filesuffix='',
climate_filename='climate_historical',
climate_input_filesuffix='',
init_model_fls=None,
zero_initial_glacier=False,
use_avg_climate=False,
**kwargs):
"""Runs the constant mass-balance model for a given number of years.
This will initialize a
:py:class:`oggm.core.massbalance.MultipleFlowlineMassBalance`,
and run a :py:func:`oggm.core.flowline.flowline_model_run`.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
nyears : int
length of the simulation (default: as long as needed for reaching
equilibrium)
y0 : int
central year of the requested climate period. The default is to be
centred on t*.
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1)
bias : float
bias of the mb model. Default is to use the calibrated one, which
is often a better idea. For t* experiments it can be useful to set it
to zero
temperature_bias : float
add a bias to the temperature timeseries
precipitation_factor: float
multiply a factor to the precipitation time series
default is None and means that the precipitation factor from the
calibration is applied which is cfg.PARAMS['prcp_scaling_factor']
store_monthly_step : bool
whether to store the diagnostic data at a monthly time step or not
(default is yearly)
store_model_geometry : bool
whether to store the full model geometry run file to disk or not.
(new in OGGM v1.4.1: default is to follow
cfg.PARAMS['store_model_geometry'])
store_fl_diagnostics : bool
whether to store the model flowline diagnostics to disk or not.
(default is to follow cfg.PARAMS['store_fl_diagnostics'])
init_model_filesuffix : str
if you want to start from a previous model run state. Can be
combined with `init_model_yr`
init_model_yr : int
the year of the initial run you want to start from. The default
is to take the last year of the simulation.
climate_filename : str
name of the climate file, e.g. 'climate_historical' (default) or
'gcm_data'
climate_input_filesuffix: str
filesuffix for the input climate file
output_filesuffix : str
this add a suffix to the output file (useful to avoid overwriting
previous experiments)
zero_initial_glacier : bool
if true, the ice thickness is set to zero before the simulation
init_model_fls : []
list of flowlines to use to initialise the model (the default is the
present_time_glacier file from the glacier directory)
use_avg_climate : bool
use the average climate instead of the correct MB model. This is
for testing only!!!
kwargs : dict
kwargs to pass to the FluxBasedModel instance
"""
if use_avg_climate:
mb_model = AvgClimateMassBalance
else:
mb_model = ConstantMassBalance
mb = MultipleFlowlineMassBalance(gdir, mb_model_class=mb_model,
y0=y0, halfsize=halfsize,
bias=bias, filename=climate_filename,
input_filesuffix=climate_input_filesuffix)
if temperature_bias is not None:
mb.temp_bias = temperature_bias
if precipitation_factor is not None:
mb.prcp_fac = precipitation_factor
return flowline_model_run(gdir, output_filesuffix=output_filesuffix,
mb_model=mb, ys=0, ye=nyears,
store_monthly_step=store_monthly_step,
store_model_geometry=store_model_geometry,
store_fl_diagnostics=store_fl_diagnostics,
init_model_filesuffix=init_model_filesuffix,
init_model_yr=init_model_yr,
init_model_fls=init_model_fls,
zero_initial_glacier=zero_initial_glacier,
**kwargs)
@entity_task(log)
def run_from_climate_data(gdir, ys=None, ye=None, min_ys=None, max_ys=None,
fixed_geometry_spinup_yr=None,
store_monthly_step=False,
store_model_geometry=None,
store_fl_diagnostics=None,
climate_filename='climate_historical',
climate_input_filesuffix='', output_filesuffix='',
init_model_filesuffix=None, init_model_yr=None,
init_model_fls=None, zero_initial_glacier=False,
bias=None, temperature_bias=None,
precipitation_factor=None, **kwargs):
""" Runs a glacier with climate input from e.g. CRU or a GCM.
This will initialize a
:py:class:`oggm.core.massbalance.MultipleFlowlineMassBalance`,
and run a :py:func:`oggm.core.flowline.flowline_model_run`.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
ys : int
start year of the model run (default: from the glacier geometry
date if init_model_filesuffix is None, else init_model_yr)
ye : int
end year of the model run (default: last year of the provided
climate file)
min_ys : int
if you want to impose a minimum start year, regardless if the glacier
inventory date is earlier (e.g. if climate data does not reach).
max_ys : int
if you want to impose a maximum start year, regardless if the glacier
inventory date is later (e.g. if climate data does not reach).
store_monthly_step : bool
whether to store the diagnostic data at a monthly time step or not
(default is yearly)
store_model_geometry : bool
whether to store the full model geometry run file to disk or not.
(new in OGGM v1.4.1: default is to follow
cfg.PARAMS['store_model_geometry'])
store_fl_diagnostics : bool
whether to store the model flowline diagnostics to disk or not.
(default is to follow cfg.PARAMS['store_fl_diagnostics'])
climate_filename : str
name of the climate file, e.g. 'climate_historical' (default) or
'gcm_data'
climate_input_filesuffix: str
filesuffix for the input climate file
output_filesuffix : str
for the output file
init_model_filesuffix : str
if you want to start from a previous model run state. Can be
combined with `init_model_yr`
init_model_yr : int
the year of the initial run you want to start from. The default
is to take the last year of the simulation.
init_model_fls : []
list of flowlines to use to initialise the model (the default is the
present_time_glacier file from the glacier directory).
Ignored if `init_model_filesuffix` is set
zero_initial_glacier : bool
if true, the ice thickness is set to zero before the simulation
bias : float
bias of the mb model. Default is to use the calibrated one, which
is often a better idea. For t* experiments it can be useful to set it
to zero
temperature_bias : float
add a bias to the temperature timeseries
precipitation_factor: float
multiply a factor to the precipitation time series
default is None and means that the precipitation factor from the
calibration is applied which is cfg.PARAMS['prcp_scaling_factor']
kwargs : dict
kwargs to pass to the FluxBasedModel instance
fixed_geometry_spinup_yr : int
if set to an integer, the model will artificially prolongate
all outputs of run_until_and_store to encompass all time stamps
starting from the chosen year. The only output affected are the
glacier wide diagnostic files - all other outputs are set
to constants during "spinup"
"""
if init_model_filesuffix is not None:
fp = gdir.get_filepath('model_geometry',
filesuffix=init_model_filesuffix)
fmod = FileModel(fp)
if init_model_yr is None:
init_model_yr = fmod.last_yr
fmod.run_until(init_model_yr)
init_model_fls = fmod.fls
if ys is None:
ys = init_model_yr
try:
rgi_year = gdir.rgi_date.year
except AttributeError:
rgi_year = gdir.rgi_date
# Take from rgi date if not set yet
if ys is None:
# The RGI timestamp is in calendar date - we convert to hydro date,
# i.e. 2003 becomes 2004 if hydro_month is not 1 (January)
# (so that we don't count the MB year 2003 in the simulation)
# See also: https://github.com/OGGM/oggm/issues/1020
# even if hydro_month is 1, we prefer to start from Jan 2004
# as in the alps the rgi is from Aug 2003
ys = rgi_year + 1
if ys <= rgi_year and init_model_filesuffix is None:
log.warning('You are attempting to run_with_climate_data at dates '
'prior to the RGI inventory date. This may indicate some '
'problem in your workflow. Consider using '
'`fixed_geometry_spinup_yr` for example.')
# Final crop
if min_ys is not None:
ys = ys if ys > min_ys else min_ys
if max_ys is not None:
ys = ys if ys < max_ys else max_ys
mb = MultipleFlowlineMassBalance(gdir, mb_model_class=PastMassBalance,
filename=climate_filename, bias=bias,
input_filesuffix=climate_input_filesuffix)
if temperature_bias is not None:
mb.temp_bias = temperature_bias
if precipitation_factor is not None:
mb.prcp_fac = precipitation_factor
if ye is None:
# Decide from climate (we can run the last year with data as well)
ye = mb.flowline_mb_models[0].ye + 1
return flowline_model_run(gdir, output_filesuffix=output_filesuffix,
mb_model=mb, ys=ys, ye=ye,
store_monthly_step=store_monthly_step,
store_model_geometry=store_model_geometry,
store_fl_diagnostics=store_fl_diagnostics,
init_model_fls=init_model_fls,
zero_initial_glacier=zero_initial_glacier,
fixed_geometry_spinup_yr=fixed_geometry_spinup_yr,
**kwargs)
@entity_task(log)
def run_with_hydro(gdir, run_task=None, store_monthly_hydro=False,
fixed_geometry_spinup_yr=None, ref_area_from_y0=False,
ref_area_yr=None, ref_geometry_filesuffix=None,
**kwargs):
"""Run the flowline model and add hydro diagnostics.
TODOs:
- Add the possibility to record MB during run to improve performance
(requires change in API)
- ...
Parameters
----------
run_task : func
any of the `run_*`` tasks in the oggm.flowline module.
The mass-balance model used needs to have the `add_climate` output
kwarg available though.
store_monthly_hydro : bool
also compute monthly hydrological diagnostics. The monthly outputs
are stored in 2D fields (years, months)
ref_area_yr : int
the hydrological output is computed over a reference area, which
per default is the largest area covered by the glacier in the simulation
period. Use this kwarg to force a specific area to the state of the
glacier at the provided simulation year.
ref_area_from_y0 : bool
overwrite ref_area_yr to the first year of the timeseries
ref_geometry_filesuffix : str
this kwarg allows to copy the reference area from a previous simulation
(useful for projections with historical spinup for example).
Set to a model_geometry file filesuffix that is present in the
current directory (e.g. `_historical` for pre-processed gdirs).
If set, ref_area_yr and ref_area_from_y0 refer to this file instead.
fixed_geometry_spinup_yr : int
if set to an integer, the model will artificially prolongate
all outputs of run_until_and_store to encompass all time stamps
starting from the chosen year. The only output affected are the
glacier wide diagnostic files - all other outputs are set
to constants during "spinup"
**kwargs : all valid kwargs for ``run_task``
"""
# Make sure it'll return something
kwargs['return_value'] = True
# Check that kwargs and params are compatible
if kwargs.get('store_monthly_step', False):
raise InvalidParamsError('run_with_hydro only compatible with '
'store_monthly_step=False.')
if kwargs.get('mb_elev_feedback', 'annual') != 'annual':
raise InvalidParamsError('run_with_hydro only compatible with '
"mb_elev_feedback='annual' (yes, even "
"when asked for monthly hydro output).")
if not cfg.PARAMS['store_model_geometry']:
raise InvalidParamsError('run_with_hydro only works with '
"PARAMS['store_model_geometry'] = True "
"for now.")
if fixed_geometry_spinup_yr is not None:
kwargs['fixed_geometry_spinup_yr'] = fixed_geometry_spinup_yr
out = run_task(gdir, **kwargs)
if out is None:
raise InvalidWorkflowError('The run task ({}) did not run '
'successfully.'.format(run_task.__name__))
do_spinup = fixed_geometry_spinup_yr is not None
if do_spinup:
start_dyna_model_yr = out.y0
# Mass balance model used during the run
mb_mod = out.mb_model
# Glacier geometry during the run
suffix = kwargs.get('output_filesuffix', '')
# We start by fetching the reference model geometry
# The one we just computed
fmod = FileModel(gdir.get_filepath('model_geometry', filesuffix=suffix))
# The last one is the final state - we can't compute MB for that
years = fmod.years[:-1]
if ref_geometry_filesuffix:
if not ref_area_from_y0 and ref_area_yr is None:
raise InvalidParamsError('If `ref_geometry_filesuffix` is set, '
'users need to specify `ref_area_from_y0`'
' or `ref_area_yr`')
# User provided
fmod_ref = FileModel(gdir.get_filepath('model_geometry',
filesuffix=ref_geometry_filesuffix))
else:
# ours as well
fmod_ref = fmod
# Check input
if ref_area_from_y0:
ref_area_yr = fmod_ref.years[0]
# Geometry at year yr to start with + off-glacier snow bucket
if ref_area_yr is not None:
if ref_area_yr not in fmod_ref.years:
raise InvalidParamsError('The chosen ref_area_yr is not '
'available!')
fmod_ref.run_until(ref_area_yr)
bin_area_2ds = []
bin_elev_2ds = []
ref_areas = []
snow_buckets = []
for fl in fmod_ref.fls:
# Glacier area on bins
bin_area = fl.bin_area_m2
ref_areas.append(bin_area)
snow_buckets.append(bin_area * 0)
# Output 2d data
shape = len(years), len(bin_area)
bin_area_2ds.append(np.empty(shape, np.float64))
bin_elev_2ds.append(np.empty(shape, np.float64))
# Ok now fetch all geometry data in a first loop
# We do that because we might want to get the largest possible area (default)
# and we want to minimize the number of calls to run_until
for i, yr in enumerate(years):
fmod.run_until(yr)
for fl_id, (fl, bin_area_2d, bin_elev_2d) in \
enumerate(zip(fmod.fls, bin_area_2ds, bin_elev_2ds)):
# Time varying bins
bin_area_2d[i, :] = fl.bin_area_m2
bin_elev_2d[i, :] = fl.surface_h
if ref_area_yr is None:
# Ok we get the max area instead
for ref_area, bin_area_2d in zip(ref_areas, bin_area_2ds):
ref_area[:] = bin_area_2d.max(axis=0)
# Ok now we have arrays, we can work with that
# -> second time varying loop is for mass-balance
months = [1]
seconds = cfg.SEC_IN_YEAR
ntime = len(years) + 1
oshape = (ntime, 1)
if store_monthly_hydro:
months = np.arange(1, 13)
seconds = cfg.SEC_IN_MONTH
oshape = (ntime, 12)
out = {
'off_area': {
'description': 'Off-glacier area',
'unit': 'm 2',
'data': np.zeros(ntime),
},
'on_area': {
'description': 'On-glacier area',
'unit': 'm 2',
'data': np.zeros(ntime),
},
'melt_off_glacier': {
'description': 'Off-glacier melt',
'unit': 'kg yr-1',
'data': np.zeros(oshape),
},
'melt_on_glacier': {
'description': 'On-glacier melt',
'unit': 'kg yr-1',
'data': np.zeros(oshape),
},
'melt_residual_off_glacier': {
'description': 'Off-glacier melt due to MB model residual',
'unit': 'kg yr-1',
'data': np.zeros(oshape),
},
'melt_residual_on_glacier': {
'description': 'On-glacier melt due to MB model residual',
'unit': 'kg yr-1',
'data': np.zeros(oshape),
},
'liq_prcp_off_glacier': {
'description': 'Off-glacier liquid precipitation',
'unit': 'kg yr-1',
'data': np.zeros(oshape),
},
'liq_prcp_on_glacier': {
'description': 'On-glacier liquid precipitation',
'unit': 'kg yr-1',
'data': np.zeros(oshape),
},
'snowfall_off_glacier': {
'description': 'Off-glacier solid precipitation',
'unit': 'kg yr-1',
'data': np.zeros(oshape),
},
'snowfall_on_glacier': {
'description': 'On-glacier solid precipitation',
'unit': 'kg yr-1',
'data': np.zeros(oshape),
},
'snow_bucket': {
'description': 'Off-glacier snow reservoir (state variable)',
'unit': 'kg',
'data': np.zeros(oshape),
},
'model_mb': {
'description': 'Annual mass-balance from dynamical model',
'unit': 'kg yr-1',
'data': np.zeros(ntime),
},
'residual_mb': {
'description': 'Difference (before correction) between mb model and dyn model melt',
'unit': 'kg yr-1',
'data': np.zeros(oshape),
},
}
# Initialize
fmod.run_until(years[0])
prev_model_vol = fmod.volume_m3
for i, yr in enumerate(years):
# Now the loop over the months
for m in months:
# A bit silly but avoid double counting in monthly ts
off_area_out = 0
on_area_out = 0
for fl_id, (ref_area, snow_bucket, bin_area_2d, bin_elev_2d) in \
enumerate(zip(ref_areas, snow_buckets, bin_area_2ds, bin_elev_2ds)):
bin_area = bin_area_2d[i, :]
bin_elev = bin_elev_2d[i, :]
# Make sure we have no negative contribution when glaciers are out
off_area = utils.clip_min(ref_area - bin_area, 0)
try:
if store_monthly_hydro:
flt_yr = utils.date_to_floatyear(int(yr), m)
mb_out = mb_mod.get_monthly_mb(bin_elev, fl_id=fl_id,
year=flt_yr,
add_climate=True)
mb, _, _, prcp, prcpsol = mb_out
else:
mb_out = mb_mod.get_annual_mb(bin_elev, fl_id=fl_id,
year=yr, add_climate=True)
mb, _, _, prcp, prcpsol = mb_out
except ValueError as e:
if 'too many values to unpack' in str(e):
raise InvalidWorkflowError('Run with hydro needs a MB '
'model able to add climate '
'info to `get_annual_mb`.')
raise
# Here we use mass (kg yr-1) not ice volume
mb *= seconds * cfg.PARAMS['ice_density']
# Bias of the mb model is a fake melt term that we need to deal with
# This is here for correction purposes later
mb_bias = mb_mod.bias * seconds / cfg.SEC_IN_YEAR
liq_prcp_on_g = (prcp - prcpsol) * bin_area
liq_prcp_off_g = (prcp - prcpsol) * off_area
prcpsol_on_g = prcpsol * bin_area
prcpsol_off_g = prcpsol * off_area
# IMPORTANT: this does not guarantee that melt cannot be negative
# the reason is the MB residual that here can only be understood
# as a fake melt process.
# In particular at the monthly scale this can lead to negative
# or winter positive melt - we try to mitigate this
# issue at the end of the year
melt_on_g = (prcpsol - mb) * bin_area
melt_off_g = (prcpsol - mb) * off_area
if mb_mod.bias == 0:
# Here we can add an additional sanity check
# These thresholds are arbitrary for now. TODO: remove
if np.any(melt_on_g < -1):
log.warning('WARNING: Melt on glacier is negative although it '
'should not be. If you have time, check '
'whats going on. Melt: {}'.format(melt_on_g.min()))
if np.any(melt_off_g < -1):
log.warning('WARNING: Melt off glacier is negative although it '
'should not be. If you have time, check '
'whats going on. Melt: {}'.format(melt_off_g.min()))
# We clip anyway
melt_on_g = utils.clip_min(melt_on_g, 0)
melt_off_g = utils.clip_min(melt_off_g, 0)
# This is the bad boy
bias_on_g = mb_bias * bin_area
bias_off_g = mb_bias * off_area
# Update bucket with accumulation and melt
snow_bucket += prcpsol_off_g
# It can only melt that much
melt_off_g = np.where((snow_bucket - melt_off_g) >= 0, melt_off_g, snow_bucket)
# Update bucket
snow_bucket -= melt_off_g
# This is recomputed each month but well
off_area_out += np.sum(off_area)
on_area_out += np.sum(bin_area)
# Monthly out
out['melt_off_glacier']['data'][i, m-1] += np.sum(melt_off_g)
out['melt_on_glacier']['data'][i, m-1] += np.sum(melt_on_g)
out['melt_residual_off_glacier']['data'][i, m-1] += np.sum(bias_off_g)
out['melt_residual_on_glacier']['data'][i, m-1] += np.sum(bias_on_g)
out['liq_prcp_off_glacier']['data'][i, m-1] += np.sum(liq_prcp_off_g)
out['liq_prcp_on_glacier']['data'][i, m-1] += np.sum(liq_prcp_on_g)
out['snowfall_off_glacier']['data'][i, m-1] += np.sum(prcpsol_off_g)
out['snowfall_on_glacier']['data'][i, m-1] += np.sum(prcpsol_on_g)
# Snow bucket is a state variable - stored at end of timestamp
if store_monthly_hydro:
if m == 12:
out['snow_bucket']['data'][i+1, 0] += np.sum(snow_bucket)
else:
out['snow_bucket']['data'][i, m] += np.sum(snow_bucket)
else:
out['snow_bucket']['data'][i+1, m-1] += np.sum(snow_bucket)
# Update the annual data
out['off_area']['data'][i] = off_area_out
out['on_area']['data'][i] = on_area_out
# If monthly, put the residual where we can
if store_monthly_hydro and mb_mod.bias != 0:
for melt, bias in zip(
[
out['melt_on_glacier']['data'][i, :],
out['melt_off_glacier']['data'][i, :],
],
[
out['melt_residual_on_glacier']['data'][i, :],
out['melt_residual_off_glacier']['data'][i, :],
],
):
real_melt = melt - bias
to_correct = utils.clip_min(real_melt, 0)
to_correct_sum = np.sum(to_correct)
if (to_correct_sum > 1e-7) and (np.sum(melt) > 0):
# Ok we correct the positive melt instead
fac = np.sum(melt) / to_correct_sum
melt[:] = to_correct * fac
if do_spinup and yr < start_dyna_model_yr:
residual_mb = 0
model_mb = (out['snowfall_on_glacier']['data'][i, :].sum() -
out['melt_on_glacier']['data'][i, :].sum())
else:
# Correct for mass-conservation and match the ice-dynamics model
fmod.run_until(yr + 1)
model_mb = (fmod.volume_m3 - prev_model_vol) * cfg.PARAMS['ice_density']
prev_model_vol = fmod.volume_m3
reconstructed_mb = (out['snowfall_on_glacier']['data'][i, :].sum() -
out['melt_on_glacier']['data'][i, :].sum())
residual_mb = model_mb - reconstructed_mb
# Now correct
if residual_mb == 0:
pass
elif store_monthly_hydro:
# We try to correct the melt only where there is some
asum = out['melt_on_glacier']['data'][i, :].sum()
if asum > 1e-7 and (residual_mb / asum < 1):
# try to find a fac
fac = 1 - residual_mb / asum
corr = out['melt_on_glacier']['data'][i, :] * fac
residual_mb = out['melt_on_glacier']['data'][i, :] - corr
out['melt_on_glacier']['data'][i, :] = corr
else:
# We simply spread over the months
residual_mb /= 12
out['melt_on_glacier']['data'][i, :] = (out['melt_on_glacier']['data'][i, :] -
residual_mb)
else:
# We simply apply the residual - no choice here
out['melt_on_glacier']['data'][i, :] = (out['melt_on_glacier']['data'][i, :] -
residual_mb)
out['model_mb']['data'][i] = model_mb
out['residual_mb']['data'][i] = residual_mb
# Convert to xarray
out_vars = cfg.PARAMS['store_diagnostic_variables']
ods = xr.Dataset()
ods.coords['time'] = fmod.years
if store_monthly_hydro:
ods.coords['month_2d'] = ('month_2d', np.arange(1, 13))
# For the user later
sm = cfg.PARAMS['hydro_month_' + mb_mod.hemisphere]
ods.coords['calendar_month_2d'] = ('month_2d', (np.arange(12) + sm - 1) % 12 + 1)
for varname, d in out.items():
data = d.pop('data')
if varname not in out_vars:
continue
if len(data.shape) == 2:
# First the annual agg
if varname == 'snow_bucket':
# Snowbucket is a state variable
ods[varname] = ('time', data[:, 0])
else:
# Last year is never good
data[-1, :] = np.NaN
ods[varname] = ('time', np.sum(data, axis=1))
# Then the monthly ones
if store_monthly_hydro:
ods[varname + '_monthly'] = (('time', 'month_2d'), data)
else:
assert varname != 'snow_bucket'
data[-1] = np.NaN
ods[varname] = ('time', data)
for k, v in d.items():
ods[varname].attrs[k] = v
# Append the output to the existing diagnostics
fpath = gdir.get_filepath('model_diagnostics', filesuffix=suffix)
ods.to_netcdf(fpath, mode='a')
@entity_task(log)
def run_dynamic_spinup(gdir, init_model_filesuffix=None,
init_model_fls=None,
climate_input_filesuffix='',
evolution_model=FluxBasedModel,
spinup_period=20, spinup_start_yr=None, min_spinup_period=10,
yr_rgi=None, minimise_for='area', precision_percent=1,
first_guess_t_bias=-2, t_bias_max_step_length=2,
maxiter=30, output_filesuffix='_dynamic_spinup',
store_model_geometry=True, store_fl_diagnostics=False,
store_model_evolution=True, ignore_errors=True):
"""Dynamically spinup the glacier to match area or volume at the RGI date.
This task allows to do simulations in the recent past (before the glacier
inventory date), when the state of the glacier was unknown. This is a
very difficult task the longer further back in time one goes
(see publications by Eis et al. for a theoretical background), but can
work quite well for short periods. Note that the solution is not unique.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
init_model_filesuffix : str or None
if you want to start from a previous model run state. This state
should be at time yr_rgi_date.
init_model_fls : []
list of flowlines to use to initialise the model (the default is the
present_time_glacier file from the glacier directory).
Ignored if `init_model_filesuffix` is set
climate_input_filesuffix : str
filesuffix for the input climate file
evolution_model : :class:oggm.core.FlowlineModel
which evolution model to use. Default: FluxBasedModel
spinup_period : int
The period how long the spinup should run. Start date of historical run
is defined "yr_rgi - spinup_period". Minimum allowed value is 10. If
the provided climate data starts at year later than
(yr_rgi - spinup_period) the spinup_period is set to
(yr_rgi - yr_climate_start). Caution if spinup_start_yr is set the
spinup_period is ignored.
Default is 20.
spinup_start_yr : int or None
The start year of the dynamic spinup. If the provided year is before
the provided climate data starts the year of the climate data is used.
If set it overrides the spinup_period.
Default is None.
min_spinup_period : int
If the dynamic spinup function fails with the initial 'spinup_period'
a shorter period is tried. Here you can define the minimum period to
try.
Default is 10.
yr_rgi : int
The rgi date, at which we want to match area or volume.
If None, gdir.rgi_date + 1 is used (the default).
minimise_for : str
The variable we want to match at yr_rgi. Default is 'area'.
Options are 'area' or 'volume'.
precision_percent : float
Gives the precision we want to match in percent. Default is 10,
meaning the difference must be within 10% of the given value
(area or volume).
first_guess_t_bias : float
The initial guess for the temperature bias for the spinup
MassBalanceModel in °C.
Default is -2.
t_bias_max_step_length: float
Defines the maximums allowed change of t_bias between two iteratons. Is
needed to avoid to large changes.
Default is 2
maxiter : int
Maximum number of minimisation iterations per spinup period. If reached
and 'ignore_errors=False' an error is raised. Default is 10.
output_filesuffix : str
for the output file
store_model_geometry : bool
whether to store the full model geometry run file to disk or not.
Default is True.
store_fl_diagnostics : bool
whether to store the model flowline diagnostics to disk or not.
Default is False.
store_model_evolution : bool
if True the complete dynamic spinup run is saved (complete evolution
of the model during the dynamic spinup), if False only the final model
state after the dynamic spinup run is saved. (Hint: if
store_model_evolution = True and ignore_errors = True and an Error
during the dynamic spinup occurs the stored model evolution is only one
year long)
Default is True.
ignore_errors : bool
If True the function saves the model without a dynamic spinup using
the 'output_filesuffix', if an error during the dynamic spinup occurs.
This is useful if you want to keep glaciers for the following tasks.
Default is True.
Returns
-------
:py:class:`oggm.core.flowline.evolution_model`
The final dynamically spined-up model. Type depends on the selected
evolution_model, by default a FluxBasedModel.
"""
if yr_rgi is None:
yr_rgi = gdir.rgi_date + 1 # + 1 converted to hydro years
yr_min = gdir.get_climate_info()['baseline_hydro_yr_0']
if init_model_filesuffix is not None:
fp = gdir.get_filepath('model_geometry',
filesuffix=init_model_filesuffix)
fmod = FileModel(fp)
init_model_fls = fmod.fls
if init_model_fls is None:
fls_spinup = gdir.read_pickle('model_flowlines')
else:
fls_spinup = copy.deepcopy(init_model_fls)
# MassBalance for actual run from yr_spinup to yr_rgi
mb_historical = MultipleFlowlineMassBalance(gdir,
fls=fls_spinup,
mb_model_class=PastMassBalance,
filename='climate_historical',
input_filesuffix=climate_input_filesuffix)
# here we define the file-paths for the output
if store_model_geometry:
geom_path = gdir.get_filepath('model_geometry',
filesuffix=output_filesuffix,
delete=True)
else:
geom_path = False
if store_fl_diagnostics:
fl_diag_path = gdir.get_filepath('fl_diagnostics',
filesuffix=output_filesuffix,
delete=True)
else:
fl_diag_path = False
diag_path = gdir.get_filepath('model_diagnostics',
filesuffix=output_filesuffix,
delete=True)
# this function saves a model without conducting a dynamic spinup, but with
# the provided output_filesuffix, so following tasks can find it.
# This is necessary if yr_rgi < yr_min + 10 or if the dynamic spinup failed.
def save_model_without_dynamic_spinup():
gdir.add_to_diagnostics('run_dynamic_spinup_success', False)
yr_use = np.clip(yr_rgi, yr_min, None)
model_dynamic_spinup_end = evolution_model(fls_spinup,
mb_historical,
y0=yr_use)
with np.warnings.catch_warnings():
# For operational runs we ignore the warnings
np.warnings.filterwarnings('ignore', category=RuntimeWarning)
model_dynamic_spinup_end.run_until_and_store(
yr_use,
geom_path=geom_path,
diag_path=diag_path,
fl_diag_path=fl_diag_path, )
return model_dynamic_spinup_end
if yr_rgi < yr_min + min_spinup_period:
log.warning('The provided rgi_date is smaller than yr_climate_start + '
'min_spinup_period, therefore no dynamic spinup is '
'conducted and the original flowlines are saved at the '
'provided rgi_date or the start year of the provided '
'climate data (if yr_climate_start > yr_rgi)')
if ignore_errors:
model_dynamic_spinup_end = save_model_without_dynamic_spinup()
return model_dynamic_spinup_end
else:
raise RuntimeError('The difference between the rgi_date and the '
'start year of the climate data is to small to '
'run a dynamic spinup!')
# here we define the flowline we want to match, it is assumed that during
# the inversion the volume was calibrated towards the consensus estimate
# (as it is by default), but this means the volume is matched on a
# regional scale, maybe we could use here the individual glacier volume
fls_ref = copy.deepcopy(fls_spinup)
if minimise_for == 'area':
unit = 'km2'
other_variable = 'volume'
other_unit = 'km3'
elif minimise_for == 'volume':
unit = 'km3'
other_variable = 'area'
other_unit = 'km2'
else:
raise NotImplementedError
cost_var = f'{minimise_for}_{unit}'
reference_value = np.sum([getattr(f, cost_var) for f in fls_ref])
other_reference_value = np.sum([getattr(f, f'{other_variable}_{other_unit}')
for f in fls_ref])
# only used to check performance of function
forward_model_runs = [0]
# the actual spinup run
def run_model_with_spinup_to_rgi_date(t_bias):
forward_model_runs.append(forward_model_runs[-1] + 1)
# with t_bias the glacier state after spinup is changed between iterations
mb_spinup.temp_bias = t_bias
# run the spinup
model_spinup = evolution_model(copy.deepcopy(fls_spinup),
mb_spinup,
y0=0)
model_spinup.run_until(2 * halfsize_spinup)
# if glacier is completely gone return information in ice-free
ice_free = False
if np.isclose(model_spinup.volume_km3, 0.):
ice_free = True
# Now conduct the actual model run to the rgi date
model_historical = evolution_model(model_spinup.fls,
mb_historical,
y0=yr_spinup)
if store_model_evolution:
model_historical.run_until_and_store(
yr_rgi,
geom_path=geom_path,
diag_path=diag_path,
fl_diag_path=fl_diag_path, )
else:
model_historical.run_until(yr_rgi)
return model_historical, ice_free
def cost_fct(t_bias, model_dynamic_spinup_end):
# actual model run
model_dynamic_spinup, ice_free = run_model_with_spinup_to_rgi_date(t_bias)
# save the final model for later
model_dynamic_spinup_end.append(copy.deepcopy(model_dynamic_spinup))
value_ref = np.sum([getattr(f, cost_var) for f in fls_ref])
value_dynamic_spinup = getattr(model_dynamic_spinup, cost_var)
# calculate the mismatch in percent
cost = (value_dynamic_spinup - value_ref) / value_ref * 100
return cost, ice_free
def init_cost_fct():
model_dynamic_spinup_end = []
def c_fun(t_bias):
return cost_fct(t_bias, model_dynamic_spinup_end)
return c_fun, model_dynamic_spinup_end
def minimise_with_spline_fit(fct_to_minimise):
# defines limits of t_bias in accordance to maximal allowed change
# between iterations
t_bias_limits = [first_guess_t_bias - t_bias_max_step_length,
first_guess_t_bias + t_bias_max_step_length]
t_bias_guess = []
mismatch = []
# this two variables indicate that the limits were already adapted to
# avoid an ice_free or out_of_domain error
was_ice_free = False
was_out_of_domain = False
was_errors = [was_out_of_domain, was_ice_free]
def get_mismatch(t_bias):
t_bias = copy.deepcopy(t_bias)
# first check if the new t_bias is in limits
if t_bias < t_bias_limits[0]:
# was the smaller limit already executed, if not first do this
if t_bias_limits[0] not in t_bias_guess:
t_bias = copy.deepcopy(t_bias_limits[0])
else:
# smaller limit was already used, check if it was
# already newly defined with glacier exceeding domain
if was_errors[0]:
raise RuntimeError('Not able to minimise without '
'exceeding the domain! Best '
f'mismatch '
f'{np.min(np.abs(mismatch))}%')
else:
# ok we set a new lower limit
t_bias_limits[0] = t_bias_limits[0] - \
t_bias_max_step_length
elif t_bias > t_bias_limits[1]:
# was the larger limit already executed, if not first do this
if t_bias_limits[1] not in t_bias_guess:
t_bias = copy.deepcopy(t_bias_limits[1])
else:
# larger limit was already used, check if it was
# already newly defined with ice free glacier
if was_errors[1]:
raise RuntimeError('Not able to minimise without ice '
'free glacier after spinup! Best '
'mismatch '
f'{np.min(np.abs(mismatch))}%')
else:
# ok we set a new upper limit
t_bias_limits[1] = t_bias_limits[1] + \
t_bias_max_step_length
# now clip t_bias with limits
t_bias = np.clip(t_bias, t_bias_limits[0], t_bias_limits[1])
# now start with mismatch calculation
# if error during spinup (ice_free or out_of_domain) this defines
# how much t_bias is changed to look for an error free glacier spinup
t_bias_search_change = 0.1
# maximum number of changes to look for an error free glacier
max_iterations = int(t_bias_max_step_length / t_bias_search_change)
is_out_of_domain = True
is_ice_free_spinup = True
is_ice_free_end = True
is_first_guess_ice_free = False
is_first_guess_out_of_domain = False
doing_first_guess = (len(mismatch) == 0)
define_new_lower_limit = False
define_new_upper_limit = False
iteration = 0
while ((is_out_of_domain | is_ice_free_spinup | is_ice_free_end) &
(iteration < max_iterations)):
try:
tmp_mismatch, is_ice_free_spinup = fct_to_minimise(t_bias)
# check if mismatch is inf -> reference value is 0
if np.isinf(tmp_mismatch):
raise RuntimeError('Mismatch is INF, this indicates '
'that the reference value is 0.!')
# no error occurred, so we are not outside the domain
is_out_of_domain = False
# check if we are ice_free after spinup, if so we search
# for a new upper limit for t_bias
if is_ice_free_spinup:
was_errors[1] = True
define_new_upper_limit = True
# special treatment if it is the first guess
if np.isclose(t_bias, first_guess_t_bias) & \
doing_first_guess:
is_first_guess_ice_free = True
# here directly jump to the smaller limit
t_bias = copy.deepcopy(t_bias_limits[0])
elif is_first_guess_ice_free & doing_first_guess:
# make large steps if it is first guess
t_bias = t_bias - t_bias_max_step_length
else:
t_bias = np.round(t_bias - t_bias_search_change,
decimals=1)
if np.isclose(t_bias, t_bias_guess).any():
iteration = copy.deepcopy(max_iterations)
# check if we are ice_free at the end of the model run, if
# so we use the lower t_bias limit and change the limit if
# needed
elif np.isclose(tmp_mismatch, -100.):
is_ice_free_end = True
was_errors[1] = True
define_new_upper_limit = True
# special treatment if it is the first guess
if np.isclose(t_bias, first_guess_t_bias) &\
doing_first_guess:
is_first_guess_ice_free = True
# here directly jump to the smaller limit
t_bias = copy.deepcopy(t_bias_limits[0])
elif is_first_guess_ice_free & doing_first_guess:
# make large steps if it is first guess
t_bias = t_bias - t_bias_max_step_length
else:
# if lower limit was already used change it and use
if t_bias == t_bias_limits[0]:
t_bias_limits[0] = t_bias_limits[0] - \
t_bias_max_step_length
t_bias = copy.deepcopy(t_bias_limits[0])
else:
# otherwise just try with a colder t_bias
t_bias = np.round(t_bias - t_bias_search_change,
decimals=1)
else:
is_ice_free_end = False
except RuntimeError as e:
# check if glacier grow to large
if 'Glacier exceeds domain boundaries, at year:' in f'{e}':
# ok we where outside the domain, therefore we search
# for a new lower limit for t_bias in 0.1 °C steps
is_out_of_domain = True
define_new_lower_limit = True
was_errors[0] = True
# special treatment if it is the first guess
if np.isclose(t_bias, first_guess_t_bias) &\
doing_first_guess:
is_first_guess_out_of_domain = True
# here directly jump to the larger limit
t_bias = t_bias_limits[1]
elif is_first_guess_out_of_domain & doing_first_guess:
# make large steps if it is first guess
t_bias = t_bias + t_bias_max_step_length
else:
t_bias = np.round(t_bias + t_bias_search_change,
decimals=1)
if np.isclose(t_bias, t_bias_guess).any():
iteration = copy.deepcopy(max_iterations)
else:
# otherwise this error can not be handled here
raise RuntimeError(e)
iteration += 1
if iteration >= max_iterations:
# ok we were not able to find an mismatch without error
# (ice_free or out of domain), so we try to raise an descriptive
# RuntimeError
if len(mismatch) == 0:
# unfortunately we were not able conduct one single error
# free run
msg = 'Not able to conduct one error free run. Error is '
if is_first_guess_ice_free:
msg += f'"ice_free" with last t_bias of {t_bias}.'
elif is_first_guess_out_of_domain:
msg += f'"out_of_domain" with last t_bias of {t_bias}.'
else:
raise RuntimeError('Something unexpected happened!')
raise RuntimeError(msg)
elif define_new_lower_limit:
raise RuntimeError('Not able to minimise without '
'exceeding the domain! Best '
f'mismatch '
f'{np.min(np.abs(mismatch))}%')
elif define_new_upper_limit:
raise RuntimeError('Not able to minimise without ice '
'free glacier after spinup! Best mismatch '
f'{np.min(np.abs(mismatch))}%')
elif is_ice_free_end:
raise RuntimeError('Not able to find a t_bias so that '
'glacier is not ice free at the end! '
'(Last t_bias '
f'{t_bias + t_bias_max_step_length} °C)')
else:
raise RuntimeError('Something unexpected happened during '
'definition of new t_bias limits!')
else:
# if we found a new limit set it
if define_new_upper_limit & define_new_lower_limit:
# we can end here if we are at the first guess and took
# a to large step
was_errors[0] = False
was_errors[1] = False
if t_bias <= t_bias_limits[0]:
t_bias_limits[0] = t_bias
t_bias_limits[1] = t_bias_limits[0] + \
t_bias_max_step_length
elif t_bias >= t_bias_limits[1]:
t_bias_limits[1] = t_bias
t_bias_limits[0] = t_bias_limits[1] - \
t_bias_max_step_length
else:
if is_first_guess_ice_free:
t_bias_limits[1] = t_bias
elif is_out_of_domain:
t_bias_limits[0] = t_bias
else:
raise RuntimeError('I have not expected to get here!')
elif define_new_lower_limit:
t_bias_limits[0] = copy.deepcopy(t_bias)
if t_bias >= t_bias_limits[1]:
# this happens when the first guess was out of domain
was_errors[0] = False
t_bias_limits[1] = t_bias_limits[0] + \
t_bias_max_step_length
elif define_new_upper_limit:
t_bias_limits[1] = copy.deepcopy(t_bias)
if t_bias <= t_bias_limits[0]:
# this happens when the first guess was ice free
was_errors[1] = False
t_bias_limits[0] = t_bias_limits[1] - \
t_bias_max_step_length
return tmp_mismatch, float(t_bias)
# first guess
new_mismatch, new_t_bias = get_mismatch(first_guess_t_bias)
t_bias_guess.append(new_t_bias)
mismatch.append(new_mismatch)
if abs(mismatch[-1]) < precision_percent:
return t_bias_guess, mismatch
# second (arbitrary) guess is given depending on the outcome of first
# guess, when mismatch is 100% t_bias is changed for
# t_bias_max_step_length
step = mismatch[-1] * t_bias_max_step_length / 100
new_mismatch, new_t_bias = get_mismatch(t_bias_guess[0] + step)
t_bias_guess.append(new_t_bias)
mismatch.append(new_mismatch)
if abs(mismatch[-1]) < precision_percent:
return t_bias_guess, mismatch
# Now start with splin fit for guessing
while len(t_bias_guess) < maxiter:
# get next guess from splin (fit partial linear function to previously
# calculated (mismatch, t_bias) pairs and get t_bias value where
# mismatch=0 from this fitted curve)
sort_index = np.argsort(np.array(mismatch))
tck = interpolate.splrep(np.array(mismatch)[sort_index],
np.array(t_bias_guess)[sort_index],
k=1)
# here we catch interpolation errors (two different t_bias with
# same mismatch), could happen if one t_bias was close to a newly
# defined limit
if np.isnan(tck[1]).any():
if was_errors[0]:
raise RuntimeError('Not able to minimise without '
'exceeding the domain! Best '
f'mismatch '
f'{np.min(np.abs(mismatch))}%')
elif was_errors[1]:
raise RuntimeError('Not able to minimise without ice '
'free glacier! Best mismatch '
f'{np.min(np.abs(mismatch))}%')
else:
raise RuntimeError('Not able to minimise! Problem is '
'unknown, need to check by hand! Best '
'mismatch '
f'{np.min(np.abs(mismatch))}%')
new_mismatch, new_t_bias = get_mismatch(float(interpolate.splev(0,
tck)
))
t_bias_guess.append(new_t_bias)
mismatch.append(new_mismatch)
if abs(mismatch[-1]) < precision_percent:
return t_bias_guess, mismatch
# Ok when we end here the spinup could not find satifying match after
# maxiter(ations)
raise RuntimeError(f'Could not find mismatch smaller '
f'{precision_percent}% (only '
f'{np.min(np.abs(mismatch))}%) in {maxiter}'
f'Iterations!')
# define function for the actual minimisation
c_fun, model_dynamic_spinup_end = init_cost_fct()
# define the MassBalanceModels for different spinup periods and try to
# minimise, if minimisation fails a shorter spinup period is used
# (first a spinup period between initial period and 'min_spinup_period'
# years and the second try is to use a period of 'min_spinup_period' years,
# if it still fails the actual error is raised)
if spinup_start_yr is not None:
spinup_period_initial = min(yr_rgi - spinup_start_yr, yr_rgi - yr_min)
else:
spinup_period_initial = min(spinup_period, yr_rgi - yr_min)
if spinup_period_initial <= min_spinup_period:
spinup_periods_to_try = [min_spinup_period]
else:
# try out a maximum of three different spinup_periods
spinup_periods_to_try = [spinup_period_initial,
int((spinup_period_initial + min_spinup_period) / 2),
min_spinup_period]
for spinup_period in spinup_periods_to_try:
yr_spinup = yr_rgi - spinup_period
# define spinup MassBalance
# spinup is running for 'yr_rgi - yr_spinup' years, using a
# ConstantMassBalance
y0_spinup = (yr_spinup + yr_rgi) / 2
halfsize_spinup = yr_rgi - y0_spinup
mb_spinup = MultipleFlowlineMassBalance(gdir,
fls=fls_spinup,
mb_model_class=ConstantMassBalance,
filename='climate_historical',
input_filesuffix=climate_input_filesuffix,
y0=y0_spinup,
halfsize=halfsize_spinup)
# try to conduct minimisation, if an error occurred try shorter spinup
# period
try:
final_t_bias_guess, final_mismatch = minimise_with_spline_fit(c_fun)
# ok no error occurred so we succeeded
break
except RuntimeError as e:
# if the last spinup period was min_spinup_period the dynamic
# spinup failed
if spinup_period == min_spinup_period:
log.warning('No dynamic spinup could be conducted and the '
'original model with no spinup is saved using the '
f'provided output_filesuffix "{output_filesuffix}". '
f'The error message of the dynamic spinup is: {e}')
if ignore_errors:
model_dynamic_spinup_end = save_model_without_dynamic_spinup()
return model_dynamic_spinup_end
else:
# delete all files which could be saved during the previous
# iterations
if geom_path and os.path.exists(geom_path):
os.remove(geom_path)
if fl_diag_path and os.path.exists(fl_diag_path):
os.remove(fl_diag_path)
if diag_path and os.path.exists(diag_path):
os.remove(diag_path)
raise RuntimeError(e)
# hurray, dynamic spinup successfully
gdir.add_to_diagnostics('run_dynamic_spinup_success', True)
# also save some other stuff
gdir.add_to_diagnostics('temp_bias_dynamic_spinup',
float(final_t_bias_guess[-1]))
gdir.add_to_diagnostics('dynamic_spinup_period',
int(spinup_period))
gdir.add_to_diagnostics('dynamic_spinup_forward_model_runs',
int(forward_model_runs[-1]))
gdir.add_to_diagnostics(f'{minimise_for}_mismatch_dynamic_spinup_{unit}_'
f'percent',
float(final_mismatch[-1]))
gdir.add_to_diagnostics(f'reference_{minimise_for}_dynamic_spinup_{unit}',
float(reference_value))
gdir.add_to_diagnostics('dynamic_spinup_other_variable_reference',
float(other_reference_value))
other_mismatch = (getattr(model_dynamic_spinup_end[-1],
f'{other_variable}_{other_unit}') -
other_reference_value)
gdir.add_to_diagnostics('dynamic_spinup_mismatch_other_variable_percent',
float(other_mismatch / other_reference_value * 100))
# here only save the final model state if store_model_evolution = False
if not store_model_evolution:
with np.warnings.catch_warnings():
# For operational runs we ignore the warnings
np.warnings.filterwarnings('ignore', category=RuntimeWarning)
model_dynamic_spinup_end[-1].run_until_and_store(
yr_rgi,
geom_path=geom_path,
diag_path=diag_path,
fl_diag_path=fl_diag_path, )
return model_dynamic_spinup_end[-1]
def zero_glacier_stop_criterion(model, state, n_zero=5, n_years=20):
"""Stop the simulation when the glacier volume is zero for a given period
To be passed as kwarg to `run_until_and_store`.
Parameters
----------
model : the model class
state : a dict
n_zero : number of 0 volume years
n_years : number of years to consider
Returns
-------
stop (True/False), new_state
"""
if state is None:
# OK first call
state = {}
if 'was_zero' not in state:
# Maybe the state is from another criteria
state['was_zero'] = []
if model.yr != int(model.yr):
# We consider only full model years
return False, state
if model.volume_m3 == 0:
if len(state['was_zero']) < n_years:
return False, state
if np.sum(state['was_zero'][-n_years:]) >= n_zero - 1:
return True, state
else:
state['was_zero'] = np.append(state['was_zero'], [True])
else:
state['was_zero'] = np.append(state['was_zero'], [False])
return False, state
def spec_mb_stop_criterion(model, state, spec_mb_threshold=50, n_years=60):
"""Stop the simulation when the specific MB is close to zero for a given period.
To be passed as kwarg to `run_until_and_store`.
Parameters
----------
model : the model class
state : a dict
spec_mb_threshold : the specific MB threshold (in mm w.e. per year)
n_years : number of years to consider
Returns
-------
stop (True/False), new_state
"""
if state is None:
# OK first call
state = {}
if 'spec_mb' not in state:
# Maybe the state is from another criteria
state['spec_mb'] = []
state['volume_m3'] = []
if model.yr != int(model.yr):
# We consider only full model years
return False, state
area = model.area_m2
volume = model.volume_m3
if area < 1 or len(state['volume_m3']) == 0:
spec_mb = np.NaN
else:
spec_mb = (volume - state['volume_m3'][-1]) / area * cfg.PARAMS['ice_density']
state['spec_mb'] = np.append(state['spec_mb'], [spec_mb])
state['volume_m3'] = np.append(state['volume_m3'], [volume])
if len(state['spec_mb']) < n_years:
return False, state
mbavg = np.nanmean(state['spec_mb'][-n_years:])
if abs(mbavg) <= spec_mb_threshold:
return True, state
else:
return False, state
def equilibrium_stop_criterion(model, state,
spec_mb_threshold=50, n_years_specmb=60,
n_zero=5, n_years_zero=20):
"""Stop the simulation when of og spec_mb and zero_volume criteria are met.
To be passed as kwarg to `run_until_and_store`.
Parameters
----------
model : the model class
state : a dict
spec_mb_threshold : the specific MB threshold (in mm w.e. per year)
n_years_specmb : number of years to consider for the spec_mb criterion
n_zero : number of 0 volume years
n_years_zero : number of years to consider for the zero volume criterion.
Returns
-------
stop (True/False), new_state
"""
if state is None:
# OK first call
state = {}
s1, state = zero_glacier_stop_criterion(model, state, n_years=n_years_zero,
n_zero=n_zero)
s2, state = spec_mb_stop_criterion(model, state, n_years=n_years_specmb,
spec_mb_threshold=spec_mb_threshold)
return s1 or s2, state
def merge_to_one_glacier(main, tribs, filename='climate_historical',
input_filesuffix=''):
"""Merge multiple tributary glacier flowlines to a main glacier
This function will merge multiple tributary glaciers to a main glacier
and write modified `model_flowlines` to the main GlacierDirectory.
The provided tributaries must have an intersecting downstream line.
To be sure about this, use `intersect_downstream_lines` first.
This function is mainly responsible to reproject the flowlines, set
flowline attributes and to copy additional files, like the necessary climate
files.
Parameters
----------
main : oggm.GlacierDirectory
The new GDir of the glacier of interest
tribs : list or dictionary containing oggm.GlacierDirectories
true tributary glaciers to the main glacier
filename: str
Baseline climate file
input_filesuffix: str
Filesuffix to the climate file
"""
# read flowlines of the Main glacier
fls = main.read_pickle('model_flowlines')
mfl = fls.pop(-1) # remove main line from list and treat separately
for trib in tribs:
# read tributary flowlines and append to list
tfls = trib.read_pickle('model_flowlines')
# copy climate file and local_mustar to new gdir
# if we have a merge-merge situation we need to copy multiple files
rgiids = set([fl.rgi_id for fl in tfls])
for uid in rgiids:
if len(rgiids) == 1:
# we do not have a merge-merge situation
in_id = ''
out_id = trib.rgi_id
else:
in_id = '_' + uid
out_id = uid
climfile_in = filename + in_id + input_filesuffix + '.nc'
climfile_out = filename + '_' + out_id + input_filesuffix + '.nc'
shutil.copyfile(os.path.join(trib.dir, climfile_in),
os.path.join(main.dir, climfile_out))
_m = os.path.basename(trib.get_filepath('local_mustar')).split('.')
muin = _m[0] + in_id + '.' + _m[1]
muout = _m[0] + '_' + out_id + '.' + _m[1]
shutil.copyfile(os.path.join(trib.dir, muin),
os.path.join(main.dir, muout))
# sort flowlines descending
tfls.sort(key=lambda x: x.order, reverse=True)
# loop over tributaries and reproject to main glacier
for nr, tfl in enumerate(tfls):
# 1. Step: Change projection to the main glaciers grid
_line = salem.transform_geometry(tfl.line,
crs=trib.grid, to_crs=main.grid)
# 2. set new line
tfl.set_line(_line)
# 3. set map attributes
dx = [shpg.Point(tfl.line.coords[i]).distance(
shpg.Point(tfl.line.coords[i+1]))
for i, pt in enumerate(tfl.line.coords[:-1])] # get distance
# and check if equally spaced
if not np.allclose(dx, np.mean(dx), atol=1e-2):
raise RuntimeError('Flowline is not evenly spaced.')
tfl.dx = np.mean(dx).round(2)
tfl.map_dx = mfl.map_dx
tfl.dx_meter = tfl.map_dx * tfl.dx
# 3. remove attributes, they will be set again later
tfl.inflow_points = []
tfl.inflows = []
# 4. set flows to, mainly to update flows_to_point coordinates
if tfl.flows_to is not None:
tfl.set_flows_to(tfl.flows_to)
# append tributary flowlines to list
fls += tfls
# add main flowline to the end
fls = fls + [mfl]
# Finally write the flowlines
main.write_pickle(fls, 'model_flowlines')
def clean_merged_flowlines(gdir, buffer=None):
"""Order and cut merged flowlines to size.
After matching flowlines were found and merged to one glacier directory
this function makes them nice:
There should only be one flowline per bed, so overlapping lines have to be
cut, attributed to a another flowline and ordered.
Parameters
----------
gdir : oggm.GlacierDirectory
The GDir of the glacier of interest
buffer: float
Buffer around the flowlines to find overlaps
"""
# No buffer does not work
if buffer is None:
buffer = cfg.PARAMS['kbuffer']
# Number of pixels to arbitrarily remove at junctions
lid = int(cfg.PARAMS['flowline_junction_pix'])
fls = gdir.read_pickle('model_flowlines')
# separate the main main flowline
mainfl = fls.pop(-1)
# split fls in main and tribs
mfls = [fl for fl in fls if fl.flows_to is None]
tfls = [fl for fl in fls if fl not in mfls]
# --- first treat the main flowlines ---
# sort by order and length as a second choice
mfls.sort(key=lambda x: (x.order, len(x.inflows), x.length_m),
reverse=False)
merged = []
# for fl1 in mfls:
while len(mfls) > 0:
fl1 = mfls.pop(0)
ol_index = [] # list of index from first overlap
# loop over other main lines and main main line
for fl2 in mfls + [mainfl]:
# calculate overlap, maybe use larger buffer here only to find it
_overlap = fl1.line.intersection(fl2.line.buffer(buffer*2))
# calculate indice of first overlap if overlap length > 0
oix = 9999
if _overlap.length > 0 and fl1 != fl2 and fl2.flows_to != fl1:
if isinstance(_overlap, shpg.MultiLineString):
if _overlap[0].coords[0] == fl1.line.coords[0]:
# if the head of overlap is same as the first line,
# best guess is, that the heads are close topgether!
_ov1 = _overlap[1].coords[1]
else:
_ov1 = _overlap[0].coords[1]
else:
_ov1 = _overlap.coords[1]
for _i, _p in enumerate(fl1.line.coords):
if _p == _ov1:
oix = _i
# low indices are more likely due to an wrong overlap
if oix < 10:
oix = 9999
ol_index.append(oix)
ol_index = np.array(ol_index)
if np.all(ol_index == 9999):
log.warning('Glacier %s could not be merged, removed!' %
fl1.rgi_id)
# remove possible tributary flowlines
tfls = [fl for fl in tfls if fl.rgi_id != fl1.rgi_id]
# skip rest of this while loop
continue
# make this based on first overlap, but consider order and or length
minx = ol_index[ol_index <= ol_index.min()+10][-1]
i = np.where(ol_index == minx)[0][-1]
_olline = (mfls + [mainfl])[i]
# 1. cut line to size
_line = fl1.line
bufferuse = buffer
while bufferuse > 0:
_overlap = _line.intersection(_olline.line.buffer(bufferuse))
_linediff = _line.difference(_overlap) # cut to new line
# if the tributary flowline is longer than the main line,
# _line will contain multiple LineStrings: only keep the first
if isinstance(_linediff, shpg.MultiLineString):
_linediff = _linediff[0]
if len(_linediff.coords) < 10:
bufferuse -= 1
else:
break
if bufferuse <= 0:
log.warning('Glacier %s would be to short after merge, removed!' %
fl1.rgi_id)
# remove possible tributary flowlines
tfls = [fl for fl in tfls if fl.rgi_id != fl1.rgi_id]
# skip rest of this while loop
continue
# remove cfg.PARAMS['flowline_junction_pix'] from the _line
# gives a bigger gap at the junction and makes sure the last
# point is not corrupted in terms of spacing
_line = shpg.LineString(_linediff.coords[:-lid])
# 2. set new line
fl1.set_line(_line)
# 3. set flow to attributes. This also adds inflow values to other
fl1.set_flows_to(_olline)
# change the array size of tributary flowline attributes
for atr, value in fl1.__dict__.items():
if atr in ['_ptrap', '_prec']:
# those are indices, remove those above nx
fl1.__setattr__(atr, value[value < fl1.nx])
elif isinstance(value, np.ndarray) and (len(value) > fl1.nx):
# those are actual parameters on the grid
fl1.__setattr__(atr, value[:fl1.nx])
merged.append(fl1)
allfls = merged + tfls
# now check all lines for possible cut offs
for fl in allfls:
try:
fl.flows_to_indice
except AssertionError:
mfl = fl.flows_to
# remove it from original
mfl.inflow_points.remove(fl.flows_to_point)
mfl.inflows.remove(fl)
prdis = mfl.line.project(fl.tail)
mfl_keep = mfl
while mfl.flows_to is not None:
prdis2 = mfl.flows_to.line.project(fl.tail)
if prdis2 < prdis:
mfl_keep = mfl
prdis = prdis2
mfl = mfl.flows_to
# we should be good to add this line here
fl.set_flows_to(mfl_keep.flows_to)
allfls = allfls + [mainfl]
for fl in allfls:
fl.inflows = []
fl.inflow_points = []
if hasattr(fl, '_lazy_flows_to_indice'):
delattr(fl, '_lazy_flows_to_indice')
if hasattr(fl, '_lazy_inflow_indices'):
delattr(fl, '_lazy_inflow_indices')
for fl in allfls:
if fl.flows_to is not None:
fl.set_flows_to(fl.flows_to)
for fl in allfls:
fl.order = line_order(fl)
# order flowlines in descending way
allfls.sort(key=lambda x: x.order, reverse=False)
# assert last flowline is main flowline
assert allfls[-1] == mainfl
# Finally write the flowlines
gdir.write_pickle(allfls, 'model_flowlines')
|
TimoRoth/oggm
|
oggm/core/flowline.py
|
Python
|
bsd-3-clause
| 198,026
|
[
"Gaussian",
"NetCDF"
] |
aeb6f8da5f46087c31b932e2bfb5f66cc3c5cbcf3d2ad869ba417e798b788a90
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Eli
#
# Created: 06/04/2014
# Copyright: (c) Eli 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
def main():
pass
if __name__ == '__main__':
main()
import sys
#This script filters a data file by id's listed one per line in another file
ids = open("C:/rnaseq/mirna_data/clusters/10rep_redo_deseq-edger/DEseq2_1cpm3redo_nopara2_logFCall.txt", "r")
#Take header from ID file & initialize empty dict
head_ids = ids.readline().strip("\n")
idlist1 = {}
#id_count = 0
#Make dict of ID's (key) & selected variables/annotations (values)
for line in ids:
name = line.strip('\n').split('\t')[0]
#name = name[4:]
#if len(name.split('-')) > 3:
# name = '-'.join(name.split('-')[1:])
#arm = name.split('-')[-1]
#name = '-'.join(['-'.join(name.split('-')[0:2]), arm])
name = name.strip('cin-')
#print name
#name = name[-5:]
#values = '\t'.join(line.strip('\n').split('\t')[1:3])
values = '\t'.join(line.strip('\n').split('\t')[1:4])
#if "ENSCINP" in values:
# values2 = values[7:]
# values = "ENSCINT" + values2
#values = '\t'.join(line.strip('\n').split('\t')[2:])
#values = values[0:-3]
if name in idlist1 and len(name) > 0:
if values in idlist1[name]:
continue
else:
idlist1[name].append(values)
elif len(name) > 0:
idlist1[name] = [values]
#id_count+=1
#if id_count%1000==0:
# print id_count
ids.close
#Debugging code below:
#print 'idlist1:', len(idlist1)
#sorted(idlist1)
#print idlist1
idlist1 = ['miR-216']
data = open("C:/rnaseq/coexpression/mirna-mrna/logfc_pearson/1cpm3_5rpkm3_redo2_edger_logfcValues_pearson_targetscan_deseq2logfc_mirs2.txt", "r")
#Output merged header & initialize retrieved list + row counter
#sys.stdout.write("LogFC.consensus" + '\t' + data.readline())
#sys.stdout.write("LogFC.consensus" + '\t' + '\t'.join(data.readline().split('\t')[0:3]) + '\n')
#sys.stdout.write(data.readline())
#data.readline()
matched = 0
idlist2 = {}
out = 0
#Match ID's between lists and return associated variables
for line in data:
#print line
name = line.strip('\n').split('\t')[6]
#print name
#name = name.split('|')[3].split('.')[0] # for first ID from BLAST target
#name = name[0:7]
#if name[-1].isalpha():
# name = name[0:-1]
#print name
#variables = line.strip('\n').split('\t')[5,9,10]
#idlist2[name] = line.split('\t')[1]
descr = line.strip('\n').split('\t')[1]
#if "," in descr:
# descr = descr.split(',')[0]
#name = line[1:20] # for trimmed encin gene name
#kh = '.'.join(line.split('\t')[1].split(':')[1].split('.')[0:4])
#Loop through input dict ID's and search for "name" in associated variables
#for item in idlist1: #Loop through keys (refseq)
if name in idlist1: #match primary ID's
#for item in idlist1[name].split(' '):
sys.stdout.write('\t'.join(idlist1[0]) + '\t' + line)
#EXCHANGE ID'S BUT KEEP REST OF LINE/DESCRIPTION
# sys.stdout.write(descr + '\t' + '\t'.join(idlist1[name]) + '\n')
#else:
# sys.stdout.write(descr + '\t' + name + '\n')
#print idlist1[name]
#sys.stdout.write(line.strip('\n') + '\t' + '\t'.join(idlist1[name]) + '\n')
#continue
#matched +=1
else:
sys.stdout.write(line)
#if name in idlist1[item]: #Check for each ID in the name variable
# idlist2[name] = variables
# values = idlist1[item]
# stop = 1
#while stop <= len(values):
# if descr in idlist1[name]:
# sys.stdout.write(line)
# out+=1
#print out
#Return items in matched list (idlist2) using associations from idlist1
#for mir in idlist1:
# if mir in idlist2:
# sys.stdout.write(mir + '\t' + '\t'.join(idlist2[mir]) + '\n')
# for mrna in idlist1[mir]:
# if mrna in idlist2:
# sys.stdout.write(mrna+ '\t' + '\t'.join(idlist2[mrna]) + '\n')
#if len(idlist1[name]) > 1:
# for value in idlist1[name]: #Print all values on separate lines
# sys.stdout.write(value + '\t' + line)
#sys.stdout.write(descr + '\t' + value + '\t' + name + '\t' + '\t'.join(variables) + '\n')
# sys.stdout.write(value + '\t' + '\t'.join(line.split('\t')[0:]))
#sys.stdout.write(value + '\t' + '\t'.join(line.split('\t')[0:3]) + '\n')
# out+=1
#else:
# sys.stdout.write('\t'.join(idlist1[name]) + '\t' + line)
#sys.stdout.write(descr + '\t' + ".\t".join(idlist1[name]) + '\t' + name + '\t' + '\t'.join(variables) + '\n')
#print idlist1[name]
# sys.stdout.write(('\t'.join(idlist1[name]) + '\t' + '\t'.join(line.split('\t')[0:])))
#sys.stdout.write(name + '\t' + '\t'.join(idlist1[name]) + '\t' + '\t'.join(line.split('\t')[2:]))
# out+=1
#print matched, out
#print gene
#print idlist1[item]
# sys.stdout.write(value + "\t" + name + '\t' + line)#'\t' + '\t'.join(line.split('\t')[2:]))
# stop+=1
#continue
#if name in idlist1:
# if descr in idlist1[name]:
# sys.stdout.write(line)
# descr = idlist1[name]
# sys.stdout.write('\t'.join(idlist1[name]) + '\t' + '\t'.join(line.split('\t')[2:]))
#sys.stdout.write('\t'.join(line.split('\t')[0:2]) + '\t' + descr + '\n')
#del idlist1[name]
#else:
# pass
#sys.stdout.write(line + '\n')
#if name in idlist2:
# pass
#else:
#idlist2.append(name)
#idlist1.remove(name)
#print line
#count+=1
#Code for checking remaining values in ID list
#for item in idlist1:
# print "bakow!"
# sys.stdout.write(item + '\t' + idlist2[item] + '\t' + idlist1[item] + '\n')
#else:
# print line.split('\t')[0]
#print len(idlist1), len(idlist2)
#print len(idlist1)-len(idlist2)
#print len(idlist1)
#sorted(idlist2)
#print idlist1
#for item in idlist2:
# if item in idlist1:
# idlist1.remove(item)
#print 'idlist1-idlist2', len(idlist1)
#for item in idlist1:
# print item
#cross check input and output lists
#idlist3= []
#for thing in idlist1:
# if thing in idlist2:
# pass
# else:
# idlist3.append(thing)
#print len(idlist3)
#print len(idlist4)
#idlist4 = [x for x in idlist1 if x not in idlist2]
|
ejspina/Gene_expression_tools
|
Python/FilterByID_dict_parse.py
|
Python
|
gpl-2.0
| 7,098
|
[
"BLAST"
] |
0696a8289bd7351a6287ebf0ebd57dce2d39cd89751e25205b09e16da50547bc
|
# Copyright 2002 by Katharine Lindner. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Hetero, Crystal and Chain exist to represent the NDB Atlas structure. Atlas is a minimal
subset of the PDB format. Heteo supports a 3 alphameric code.
The NDB web interface is located at http://ndbserver.rutgers.edu/NDB/index.html
"""
import string, array, copy
from Bio.Seq import Seq
from Bio.Seq import MutableSeq
def wrap_line( line ):
output = ''
for i in range( 0, len( line ), 80 ):
output = output + '%s\n' % line[ i: i + 80 ]
return output
def validate_key( key ):
if( type( key ) != type( '' ) ):
raise CrystalError( 'chain requires a string label' )
if( len( key ) != 1 ):
raise CrystalError( 'chain label should contain one letter' )
class Error( Exception ):
"""
"""
def __init__( self ):
pass
class CrystalError( Error ):
"""
message - description of error
"""
def __init__( self, message ):
self.message = message
class Hetero:
"""
This class exists to support the PDB hetero codes. Supports only the 3 alphameric code.
The annotation is available from http://alpha2.bmc.uu.se/hicup/
"""
def __init__(self, data):
# Enforce string storage
if( type(data) != type("") ):
raise CrystalError( 'Hetero data must be an alphameric string' )
if( data.isalnum() == 0 ):
raise CrystalError( 'Hetero data must be an alphameric string' )
if( len( data ) > 3 ):
raise CrystalError( 'Hetero data may contain up to 3 characters' )
if( len( data ) < 1 ):
raise CrystalError( 'Hetero data must not be empty' )
self.data = data[:].lower()
def __eq__(self, other):
return (self.data == other.data )
def __ne__(self, other):
"""Returns true iff self is not equal to other."""
return not self.__eq__(other)
def __repr__(self):
return "%s" % self.data
def __str__(self):
return "%s" % self.data
def __len__(self): return len(self.data)
class Chain:
def __init__(self, residues = '' ):
self.data = []
if( type( residues ) == type( '' ) ):
residues = residues.replace( '*', ' ' )
residues = residues.strip()
elements = residues.split()
self.data = map( Hetero, elements )
elif( type( residues ) == type( [] ) ):
for element in residues:
if( not isinstance( element, Hetero ) ):
raise CrystalError( 'Text must be a string' )
for residue in residues:
self.data.append( residue )
elif( isinstance( residues, Chain ) ):
for residue in residues:
self.data.append( residue )
self.validate()
def validate( self ):
data = self.data
for element in data:
self.validate_element( element )
def validate_element( self, element ):
if( not isinstance( element, Hetero ) ):
raise TypeError
def __str__( self ):
output = ''
i = 0
for element in self.data:
output = output + '%s ' % element
output = output.strip()
output = wrap_line( output )
return output
def __eq__(self, other):
if( len( self.data ) != len( other.data ) ):
return 0
ok = reduce( lambda x, y: x and y, map( lambda x, y: x == y, self.data, other.data ) )
return ok
def __ne__(self, other):
"""Returns true iff self is not equal to other."""
return not self.__eq__(other)
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item):
try:
self.validate_element( item )
except TypeError:
item = Hetero( item.lower() )
self.data[i] = item
def __delitem__(self, i):
del self.data[i]
def __getslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
return self.__class__(self.data[i:j])
def __setslice__(self, i, j, other):
i = max(i, 0); j = max(j, 0)
if isinstance(other, Chain):
self.data[i:j] = other.data
elif isinstance(other, type(self.data)):
self.data[i:j] = other
elif type( other ) == type( '' ):
self.data[ i:j ] = Chain( other ).data
else:
raise TypeError
def __delslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
del self.data[i:j]
def __contains__(self, item):
try:
self.validate_element( item )
except TypeError:
item = Hetero( item.lower() )
return item in self.data
def append(self, item):
try:
self.validate_element( item )
except TypeError:
item = Hetero( item.lower() )
self.data.append(item)
def insert(self, i, item):
try:
self.validate_element( item )
except TypeError:
item = Hetero( item.lower() )
self.data.insert(i, item)
def remove(self, item):
item = Hetero( item.lower() )
self.data.remove(item)
def count(self, item):
try:
self.validate_element( item )
except TypeError:
item = Hetero( item.lower() )
return self.data.count(item)
def index(self, item):
try:
self.validate_element( item )
except TypeError:
item = Hetero( item.lower() )
return self.data.index(item)
def __add__(self, other):
if isinstance(other, Chain):
return self.__class__(self.data + other.data)
elif type( other ) == type( '' ):
return self.__class__(self.data + Chain( other).data )
else:
raise TypeError
def __radd__(self, other):
if isinstance(other, Chain):
return self.__class__( other.data + self.data )
elif type( other ) == type( '' ):
return self.__class__( Chain( other ).data + self.data )
else:
raise TypeError
def __iadd__(self, other):
if isinstance(other, Chain ):
self.data += other.data
elif type( other ) == type( '' ):
self.data += Chain( other ).data
else:
raise TypeError
return self
class Crystal:
def __init__(self, data = {} ):
# Enforcestorage
if( type( data ) != type( {} ) ):
raise CrystalError( 'Crystal must be a dictionary' )
self.data = data
self.fix()
def fix( self ):
data = self.data
for key in data.keys():
element = data[ key ]
if( isinstance( element, Chain ) ):
pass
elif type( element ) == type( '' ):
data[ key ] = Chain( element )
else:
raise TypeError
def __repr__(self):
output = ''
keys = self.data.keys()
keys.sort()
for key in keys:
output = output + '%s : %s\n' % ( key, self.data[ key ] )
return output
def __str__(self):
output = ''
keys = self.data.keys()
keys.sort()
for key in keys:
output = output + '%s : %s\n' % ( key, self.data[ key ] )
return output
def tostring(self):
return self.data
def __len__(self): return len(self.data)
def __getitem__(self, key): return self.data[key]
def __setitem__(self, key, item):
if isinstance( item, Chain ):
self.data[key] = item
elif type( item ) == type( '' ):
self.data[ key ] = Chain( item )
else:
raise TypeError
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
return copy.copy(self)
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def values(self): return self.data.values()
def has_key(self, key): return self.data.has_key(key)
def get(self, key, failobj=None):
return self.data.get(key, failobj)
def setdefault(self, key, failobj=None):
if not self.data.has_key(key):
self.data[key] = failobj
return self.data[key]
def popitem(self):
return self.data.popitem()
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/Crystal/__init__.py
|
Python
|
apache-2.0
| 8,604
|
[
"Biopython",
"CRYSTAL"
] |
1323d12d06882d94ffcb1dea0b6361bcc4fe391cab46f84fd3e5acbb4d80e03e
|
# -*- coding:utf-8 -*-
#
# Copyright 2012 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import patterns, url, include
from rest_framework.urlpatterns import format_suffix_patterns
from pypln.web.indexing.views import IndexDocument, IndexQuery
urlpatterns = patterns('pypln.web.indexing.views',
url(r'^index-document/$', IndexDocument.as_view(), name='index-document'),
url(r'^query/$', IndexQuery.as_view(), name='index-query'),
)
urlpatterns = format_suffix_patterns(urlpatterns, allowed=['json', 'api'])
|
flavioamieiro/pypln.web
|
pypln/web/indexing/urls.py
|
Python
|
gpl-3.0
| 1,206
|
[
"NAMD"
] |
c39d614c9e1c745f2230f28bc57280e88468e8d2d0444b0b241d70b22adbae49
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import deprecated
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.fixes import bincount
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_* instead of deprecated *data_min*.
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_* instead of deprecated *data_max*.
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_* instead of deprecated *data_range*.
See also
--------
minmax_scale: Equivalent function without the object oriented API.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
@property
@deprecated("Attribute data_range will be removed in "
"0.19. Use ``data_range_`` instead")
def data_range(self):
return self.data_range_
@property
@deprecated("Attribute data_min will be removed in "
"0.19. Use ``data_min_`` instead")
def data_min(self):
return self.data_min_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* is recommended instead of deprecated *std_*.
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
@property
@deprecated("Attribute ``std_`` will be removed in 0.19. Use ``scale_`` instead")
def std_(self):
return self.scale_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
maxabs_scale: Equivalent function without the object oriented API.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
robust_scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
https://en.wikipedia.org/wiki/Median_(statistics)
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
normalize: Equivalent function without the object oriented API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the object oriented API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be in ``range(n_values[i])``
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
|
imaculate/scikit-learn
|
sklearn/preprocessing/data.py
|
Python
|
bsd-3-clause
| 68,961
|
[
"Gaussian"
] |
752a0c0df84c0407da7bbe4d45fdacad4fb1f77441b1cb176f8e8bd309468f8b
|
"""This is a simple simulation code for GHOST or Veloce, with a class ARM that simulates
a single arm of the instrument. The key default parameters are hardwired for each named
configuration in the __init__ function of ARM.
Note that in this simulation code, the 'x' and 'y' directions are the along-slit and
dispersion directions respectively... (similar to physical axes) but by convention,
images are returned/displayed with a vertical slit and a horizontal dispersion direction.
For a simple simulation, run:
import pyghost
blue = pyghost.ghostsim.Arm('blue')
blue.simulate_frame()
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import optics
import os
import pdb
try:
import pyfits
except:
import astropy.io.fits as pyfits
class Arm():
"""A class for each arm of the spectrograph. The initialisation function takes a
single string representing the configuration. For GHOST, it can be "red" or "blue"."""
def __init__(self,arm):
self.arm=arm
self.d = 1000/52.67 #Distance in microns
self.theta= 65.0 #Blaze angle
self.assym = 1.0/0.41 #Magnification
self.gamma = 0.56 #Echelle gamma
self.nwave = 1e2 #Wavelengths per order for interpolation.
self.f_col = 1750.6 #Collimator focal length.
self.lenslet_high_size = 118.0 #Lenslet flat-to-flat in microns
self.lenslet_std_size = 197.0 #Lenslet flat-to-flat in microns
self.microns_pix = 2.0 #When simulating the slit image, use this many microns per pixel
self.microns_arcsec = 400.0 #Number of microns in the slit image plane per arcsec
self.im_slit_sz = 2048 #Size of the image slit size in pixels.
if (arm == 'red'):
self.extra_rot = 3.0 #Additional slit rotation across an order needed to match Zemax.
self.szx = 6144
self.szy = 6144
self.f_cam = 264.0
self.px_sz = 15e-3
self.drot = -2.0 #Detector rotation
self.d_x = 1000/565. #VPH line spacing
self.theta_i=30.0 #Prism incidence angle
self.alpha1 = 0.0 #First prism apex angle
self.alpha2 = 0.0 #Second prism apex angle
self.m_min = 34
self.m_max = 67
elif (arm == 'blue'):
self.extra_rot = 2.0 #Additional slit rotation accross an order needed to match Zemax.
self.szx = 4096
self.szy = 4096
self.f_cam = 264.0
self.px_sz = 15e-3
self.d_x = 1000/1137. #VPH line spacing
self.theta_i=30.0 #Prism incidence angle
self.drot = -2.0 #Detector rotation.
self.alpha1 = 0.0 #First prism apex angle
self.alpha2 = 0.0 #Second prism apex angle
self.m_min = 63
self.m_max = 95
else:
print("Unknown spectrograph arm!")
raise UserWarning
def spectral_format(self,xoff=0.0,yoff=0.0,ccd_centre={}):
"""Create a spectrum, with wavelengths sampled in 2 orders.
Parameters
----------
xoff: float
An input offset from the field center in the slit plane in
mm in the x (spatial) direction.
yoff: float
An input offset from the field center in the slit plane in
mm in the y (spectral) direction.
ccd_centre: dict
An input describing internal parameters for the angle of the center of the
CCD. To run this program multiple times with the same co-ordinate system,
take the returned ccd_centre and use it as an input.
Returns
-------
x: (nm, ny) float array
The x-direction pixel co-ordinate corresponding to each y-pixel and each
order (m).
wave: (nm, ny) float array
The wavelength co-ordinate corresponding to each y-pixel and each
order (m).
blaze: (nm, ny) float array
The blaze function (pixel flux divided by order center flux) corresponding
to each y-pixel and each order (m).
ccd_centre: dict
Parameters of the internal co-ordinate system describing the center of the
CCD.
"""
# Parameters for the Echelle. Note that we put the
# co-ordinate system along the principle Echelle axis, and
# make the beam come in at the gamma angle.
u1 = -np.sin(np.radians(self.gamma) + xoff/self.f_col)
u2 = np.sin(yoff/self.f_col)
u3 = np.sqrt(1 - u1**2 - u2**2)
u = np.array([u1,u2,u3])
l = np.array([1.0,0,0])
s = np.array([0,np.cos(np.radians(self.theta)), -np.sin(np.radians(self.theta))])
#Orders for each wavelength. We choose +/- 1 free spectral range.
ms = np.arange(self.m_min,self.m_max+1)
wave_mins = 2*self.d*np.sin(np.radians(self.theta))/(ms + 1.0)
wave_maxs = 2*self.d*np.sin(np.radians(self.theta))/(ms - 1.0)
wave = np.empty( (len(ms),self.nwave))
for i in range(len(ms)):
wave[i,:] = np.linspace(wave_mins[i],wave_maxs[i],self.nwave)
wave = wave.flatten()
ms = np.repeat(ms,self.nwave)
order_frac = np.abs(ms - 2*self.d*np.sin(np.radians(self.theta))/wave)
ml_d = ms*wave/self.d
#Propagate the beam through the Echelle.
nl = len(wave)
v = np.zeros( (3,nl) )
for i in range(nl):
v[:,i] = optics.grating_sim(u,l,s,ml_d[i])
## Find the current mean direction in the x-z plane, and magnify
## the angles to represent passage through the beam reducer.
if len(ccd_centre)==0:
mean_v = np.mean(v,axis=1)
## As the range of angles is so large in the y direction, the mean
## will depend on the wavelength sampling within an order. So just consider
## a horizontal beam.
mean_v[1] = 0
## Re-normalise this mean direction vector
mean_v /= np.sqrt(np.sum(mean_v**2))
else:
mean_v = ccd_centre['mean_v']
for i in range(nl):
## Expand the range of angles around the mean direction.
temp = mean_v + (v[:,i]-mean_v)*self.assym
## Re-normalise.
v[:,i] = temp/np.sum(temp**2)
## Here we diverge from Veloce. We will ignore the glass, and
## just consider the cross-disperser.
l = np.array([0,-1,0])
theta_xdp = -self.theta_i + self.gamma
# Angle on next line may be negative...
s = optics.rotate_xz(np.array( [1,0,0] ), theta_xdp)
n = np.cross(s,l) # The normal
print('Incidence angle in air: {0:5.3f}'.format(np.degrees(np.arccos(np.dot(mean_v,n)))))
#W is the exit vector after the grating.
w = np.zeros( (3,nl) )
for i in range(nl):
w[:,i] = optics.grating_sim(v[:,i],l,s,wave[i]/self.d_x)
mean_w = np.mean(w,axis=1)
mean_w[1]=0
mean_w /= np.sqrt(np.sum(mean_w**2))
print('Grating exit angle in glass: {0:5.3f}'.format(np.degrees(np.arccos(np.dot(mean_w,n)))))
# Define the CCD x and y axes by the spread of angles.
if len(ccd_centre)==0:
ccdy = np.array([0,1,0])
ccdx = np.array([1,0,0]) - np.dot([1,0,0],mean_w)*mean_w
ccdx[1]=0
ccdx /= np.sqrt(np.sum(ccdx**2))
else:
ccdx = ccd_centre['ccdx']
ccdy = ccd_centre['ccdy']
# Make the spectrum on the detector.
xpx = np.zeros(nl)
ypx = np.zeros(nl)
xy = np.zeros(2)
## There is definitely a more vectorised way to do this.
for i in range(nl):
xy[0] = np.dot(ccdx,w[:,i])*self.f_cam/self.px_sz
xy[1] = np.dot(ccdy,w[:,i])*self.f_cam/self.px_sz
# Rotate the chip to get the orders along the columns.
rot_rad = np.radians(self.drot)
rot_matrix = np.array([[np.cos(rot_rad),np.sin(rot_rad)],[-np.sin(rot_rad),np.cos(rot_rad)]])
xy = np.dot(rot_matrix,xy)
xpx[i]=xy[0]
ypx[i]=xy[1]
## Center the spectra on the CCD in the x-direction.
if len(ccd_centre)==0:
w = np.where( (ypx < self.szy/2) * (ypx > -self.szy/2) )[0]
xpix_offset = 0.5*( np.min(xpx[w]) + np.max(xpx[w]) )
else:
xpix_offset=ccd_centre['xpix_offset']
xpx -= xpix_offset
## Now lets interpolate onto a pixel grid rather than the arbitrary wavelength
## grid we began with.
nm = self.m_max-self.m_min+1
x_int = np.zeros( (nm,self.szy) )
wave_int = np.zeros((nm,self.szy) )
blaze_int = np.zeros((nm,self.szy) )
plt.clf()
for m in range(self.m_min,self.m_max+1):
ww = np.where(ms == m)[0]
y_int_m = np.arange( np.max([np.min(ypx[ww]).astype(int),-self.szy/2]),\
np.min([np.max(ypx[ww]).astype(int),self.szy/2]),dtype=int )
ix = y_int_m + self.szy/2
x_int[m-self.m_min,ix] = np.interp(y_int_m,ypx[ww],xpx[ww])
wave_int[m-self.m_min,ix] = np.interp(y_int_m,ypx[ww],wave[ww])
blaze_int[m-self.m_min,ix] = np.interp(y_int_m,ypx[ww],np.sinc(order_frac[ww])**2)
plt.plot(x_int[m-self.m_min,ix],y_int_m)
plt.axis( (-self.szx/2,self.szx/2,-self.szx/2,self.szx/2) )
plt.draw()
return x_int,wave_int,blaze_int,{'ccdx':ccdx,'ccdy':ccdy,'xpix_offset':xpix_offset,'mean_v':mean_v}
def spectral_format_with_matrix(self):
"""Create a spectral format, including a detector to slit matrix at every point.
Returns
-------
x: (nm, ny) float array
The x-direction pixel co-ordinate corresponding to each y-pixel and each
order (m).
w: (nm, ny) float array
The wavelength co-ordinate corresponding to each y-pixel and each
order (m).
blaze: (nm, ny) float array
The blaze function (pixel flux divided by order center flux) corresponding
to each y-pixel and each order (m).
matrices: (nm, ny, 2, 2) float array
2x2 slit rotation matrices.
"""
x,w,b,ccd_centre = self.spectral_format()
x_xp,w_xp,b_xp,dummy = self.spectral_format(xoff=-1e-3,ccd_centre=ccd_centre)
x_yp,w_yp,b_yp,dummy = self.spectral_format(yoff=-1e-3,ccd_centre=ccd_centre)
dy_dyoff = np.zeros(x.shape)
dy_dxoff = np.zeros(x.shape)
#For the y coordinate, spectral_format output the wavelength at fixed pixel, not
#the pixel at fixed wavelength. This means we need to interpolate to find the
#slit to detector transform.
isbad = w*w_xp*w_yp == 0
for i in range(x.shape[0]):
ww = np.where(isbad[i,:] == False)[0]
dy_dyoff[i,ww] = np.interp(w_yp[i,ww],w[i,ww],np.arange(len(ww))) - np.arange(len(ww))
dy_dxoff[i,ww] = np.interp(w_xp[i,ww],w[i,ww],np.arange(len(ww))) - np.arange(len(ww))
#Interpolation won't work beyond the end, so extrapolate manually (why isn't this a numpy
#option???)
dy_dyoff[i,ww[-1]] = dy_dyoff[i,ww[-2]]
dy_dxoff[i,ww[-1]] = dy_dxoff[i,ww[-2]]
#For dx, no interpolation is needed so the numerical derivative is trivial...
dx_dxoff = x_xp - x
dx_dyoff = x_yp - x
#flag bad data...
x[isbad] = np.nan
w[isbad] = np.nan
b[isbad] = np.nan
dy_dyoff[isbad] = np.nan
dy_dxoff[isbad] = np.nan
dx_dyoff[isbad] = np.nan
dx_dxoff[isbad] = np.nan
matrices = np.zeros( (x.shape[0],x.shape[1],2,2) )
amat = np.zeros((2,2))
for i in range(x.shape[0]):
for j in range(x.shape[1]):
## Create a matrix where we map input angles to output coordinates.
amat[0,0] = dx_dxoff[i,j]
amat[0,1] = dx_dyoff[i,j]
amat[1,0] = dy_dxoff[i,j]
amat[1,1] = dy_dyoff[i,j]
## Apply an additional rotation matrix. If the simulation was complete,
## this wouldn't be required.
r_rad = np.radians(self.extra_rot)
dy_frac = (j - x.shape[1]/2.0)/(x.shape[1]/2.0)
extra_rot_mat = np.array([[np.cos(r_rad*dy_frac),np.sin(r_rad*dy_frac)],[-np.sin(r_rad*dy_frac),np.cos(r_rad*dy_frac)]])
amat = np.dot(extra_rot_mat,amat)
## We actually want the inverse of this (mapping output coordinates back
## onto the slit.
matrices[i,j,:,:] = np.linalg.inv(amat)
return x,w,b,matrices
def make_lenslets(self,fluxes=[],mode='',seeing=0.8, llet_offset=0):
"""Make an image of the lenslets with sub-pixel sampling.
Parameters
----------
fluxes: float array (optional)
Flux in each lenslet
mode: string (optional)
'high' or 'std', i.e. the resolving power mode of the spectrograph. Either
mode or fluxes must be set.
seeing: float (optional)
If fluxes is not given, then the flux in each lenslet is defined by the seeing.
llet_offset: int
Offset in lenslets to apply to the input spectrum"""
print("Computing a simulated slit image...")
szx = self.im_slit_sz
szy = 256
fillfact = 0.98
s32 = np.sqrt(3)/2
hex_scale = 1.15
conv_fwhm = 30.0 #equivalent to a 1 degree FWHM for an f/3 input ??? !!! Double-check !!!
if len(fluxes)==28:
mode = 'high'
elif len(fluxes)==17:
mode = 'std'
elif len(mode)==0:
print("Error: 17 or 28 lenslets needed... or mode should be set")
raise UserWarning
if mode=='std':
nl=17
lenslet_width = self.lenslet_std_size
yoffset = (lenslet_width/self.microns_pix/hex_scale*np.array([0,-s32,s32,0,-s32,s32,0])).astype(int)
xoffset = (lenslet_width/self.microns_pix/hex_scale*np.array([-1,-0.5,-0.5,0,0.5,0.5,1.0])).astype(int)
elif mode=='high':
nl=28
lenslet_width = self.lenslet_high_size
yoffset = (lenslet_width/self.microns_pix/hex_scale*s32*np.array([-2,2,-2,-1,-1,0,-1,-1,0,0,0,1,1,0,1,1,2,-2,2])).astype(int)
xoffset = (lenslet_width/self.microns_pix/hex_scale*0.5*np.array([-2,0,2,-3,3,-4,-1,1,-2,0,2,-1,1,4,-3,3,-2,0,2])).astype(int)
else:
print("Error: mode must be standard or high")
#Some preliminaries...
cutout_hw = int(lenslet_width/self.microns_pix*1.5)
im_slit = np.zeros((szy,szx))
x = np.arange(szx) - szx/2.0
y = np.arange(szy) - szy/2.0
xy = np.meshgrid(x,y)
#r and wr enable the radius from the lenslet center to be indexed
r = np.sqrt(xy[0]**2 + xy[1]**2)
wr = np.where(r < 2*lenslet_width/self.microns_pix)
#g is a Gaussian used for FRD
g = np.exp(-r**2/2.0/(conv_fwhm/self.microns_pix/2.35)**2)
g = np.fft.fftshift(g)
g /= np.sum(g)
gft = np.conj(np.fft.rfft2(g))
pix_size_slit = self.px_sz*(self.f_col/self.assym)/self.f_cam*1000.0/self.microns_pix
pix = np.zeros( (szy,szx) )
pix[np.where( (np.abs(xy[0]) < pix_size_slit/2) * (np.abs(xy[1]) < pix_size_slit/2) )] = 1
pix = np.fft.fftshift(pix)
pix /= np.sum(pix)
pix_ft = np.conj(np.fft.rfft2(pix))
#Create some hexagons. We go via a "cutout" for efficiency.
h_cutout = optics.hexagon(szy, lenslet_width/self.microns_pix*fillfact/hex_scale)
hbig_cutout = optics.hexagon(szy, lenslet_width/self.microns_pix*fillfact)
h = np.zeros( (szy,szx) )
hbig = np.zeros( (szy,szx) )
h[:,szx/2-szy/2:szx/2+szy/2] = h_cutout
hbig[:,szx/2-szy/2:szx/2+szy/2] = hbig_cutout
if len(fluxes)!=0:
#If we're not simulating seeing, the image-plane is uniform, and we only use
#the values of "fluxes" to scale the lenslet fluxes.
im = np.ones( (szy,szx) )
#Set the offsets to zero because we may be simulating e.g. a single Th/Ar lenslet
#and not starlight (from the default xoffset etc)
xoffset = np.zeros(len(fluxes),dtype=int)
yoffset = np.zeros(len(fluxes),dtype=int)
else:
#If we're simulating seeing, create a Moffat function as our input profile,
#but just make the lenslet fluxes uniform.
im = np.zeros( (szy,szx) )
im_cutout = optics.moffat2d(szy,seeing*self.microns_arcsec/self.microns_pix/2, beta=4.0)
im[:,szx/2-szy/2:szx/2+szy/2] = im_cutout
fluxes = np.ones(len(xoffset))
#Go through the flux vector and fill in each lenslet.
for i in range(len(fluxes)):
im_one = np.zeros((szy,szx))
im_cutout = np.roll(np.roll(im,yoffset[i],axis=0),xoffset[i],axis=1)*h
im_cutout = im_cutout[szy/2-cutout_hw:szy/2+cutout_hw,szx/2-cutout_hw:szx/2+cutout_hw]
prof = optics.azimuthalAverage(im_cutout, returnradii=True, binsize=1)
prof = (prof[0],prof[1]*fluxes[i])
xprof = np.append(np.append(0,prof[0]),np.max(prof[0])*2)
yprof = np.append(np.append(prof[1][0],prof[1]),0)
im_one[wr] = np.interp(r[wr], xprof, yprof)
im_one = np.fft.irfft2(np.fft.rfft2(im_one)*gft)*hbig
im_one = np.fft.irfft2(np.fft.rfft2(im_one)*pix_ft)
#!!! The line below could add tilt offsets... important for PRV simulation !!!
#im_one = np.roll(np.roll(im_one, tilt_offsets[0,i], axis=1),tilt_offsets[1,i], axis=0)*hbig
the_shift = int( (llet_offset + i - nl/2.0)*lenslet_width/self.microns_pix )
im_slit += np.roll(im_one,the_shift,axis=1)
return im_slit
def simulate_image(self,x,w,b,matrices,im_slit,spectrum=[],nx=0, xshift=0.0, yshift=0.0, rv=0.0):
"""Simulate a spectrum on the CCD.
Parameters
----------
x,w,b,matrices: float arrays
See the output of spectral_format_with_matrix
im_slit: float array
See the output of make_lenslets
spectrum: (2,nwave) array (optional)
An input spectrum, arbitrarily gridded (but at a finer resolution than the
spectrograph resolving power. If not given, a solar spectrum is used.
nx: float
Number of x (along-slit) direction pixels in the image. If not given or
zero, a square CCD is assumed.
xshift: float
Bulk shift to put in to the spectrum along the slit.
yshift: float
NOT IMPLEMENTED
rv: float
Radial velocity in m/s.
"""
#If no input spectrum, use the sun.
if len(spectrum)==0:
d =pyfits.getdata(os.path.join(os.path.dirname(os.path.abspath(__file__)),'data/ardata.fits.gz'))
spectrum=np.array([np.append(0.35,d['WAVELENGTH'])/1e4,np.append(0.1,d['SOLARFLUX'])])
nm = x.shape[0]
ny = x.shape[1]
if nx==0:
nx = ny
image = np.zeros( (ny,nx) )
#Simulate the slit image within a small cutout region.
cutout_xy = np.meshgrid( np.arange(81)-40, np.arange(7)-3 )
#Loop over orders
for i in range(nm):
for j in range(ny):
if x[i,j] != x[i,j]:
continue
#We are looping through y pixel and order. The x-pixel is therefore non-integer.
#Allow an arbitrary shift of this image.
the_x = x[i,j] + xshift
#Create an (x,y) index of the actual pixels we want to index.
cutout_shifted = (cutout_xy[0].copy() + int(the_x) + nx/2, \
cutout_xy[1].copy() + j)
ww = np.where( (cutout_shifted[0]>=0) * (cutout_shifted[1]>=0) * \
(cutout_shifted[0]<nx) * (cutout_shifted[1]<ny) )
cutout_shifted = (cutout_shifted[0][ww], cutout_shifted[1][ww])
flux = np.interp(w[i,j]*(1 + rv/299792458.0),spectrum[0], spectrum[1],left=0,right=0)
#Rounded to the nearest microns_pix, find the co-ordinate in the simulated slit image corresponding to
#each pixel. The co-ordinate order in the matrix is (x,y).
xy_scaled = np.dot( matrices[i,j], np.array([cutout_xy[0][ww]+int(the_x)-the_x,cutout_xy[1][ww]])/self.microns_pix ).astype(int)
image[cutout_shifted[1],cutout_shifted[0]] += b[i,j]*flux*im_slit[xy_scaled[1] + im_slit.shape[0]/2,xy_scaled[0] + im_slit.shape[1]/2]
print('Done order: {0}'.format(i + self.m_min))
return image
def simulate_frame(self, output_prefix='test_', xshift=0.0, yshift=0.0, rv=0.0,
rv_thar=0.0, flux=1e2, rnoise=3.0, gain=1.0, use_thar=True, mode='high', return_image=False, thar_flatlamp=False):
"""Simulate a single frame.
TODO (these can be implemented manually using the other functions):
1) Variable seeing (the slit profile is currently fixed)
2) Standard resolution mode.
3) Sky
4) Arbitrary input spectra
Parameters
----------
output_prefix: string (optional)
Prefix for the output filename.
xshift: float (optional)
x-direction (along-slit) shift.
yshift: float (optional)
y-direction (spectral direction) shift.
rv: float (optional)
Radial velocity in m/s for the target star with respect to the observer.
rv_thar: float (optional)
Radial velocity in m/s applied to the Thorium/Argon source. It is unlikely
that this is useful (use yshift instead for common shifts in the dispersion
direction).
flux: float (optional)
Flux multiplier for the reference spectrum to give photons/pix.
rnoise: float (optional)
Readout noise in electrons/pix
gain: float (optional)
Gain in electrons per ADU.
use_thar: bool (optional)
Is the Thorium/Argon lamp in use?
mode: string (optional)
Can be 'high' or 'std' for the resolution mode.
return_image: bool (optional)
Do we return an image as an array? The fits file is always written.
"""
x,w,b,matrices = self.spectral_format_with_matrix()
if (mode == 'high'):
slit_fluxes = np.ones(19)*0.37
slit_fluxes[6:13] = 0.78
slit_fluxes[9] = 1.0
slit_fluxes /= np.mean(slit_fluxes)
im_slit = self.make_lenslets(fluxes=slit_fluxes, mode='high', llet_offset=2)
image = self.simulate_image(x,w,b,matrices,im_slit, xshift=xshift,rv=rv)
if (use_thar):
#Create an appropriately convolved Thorium-Argon spectrum after appropriately
#convolving.
thar = np.loadtxt(os.path.join(os.path.dirname(os.path.abspath(__file__)),'data/mnras0378-0221-SD1.txt'),usecols=[0,1,2])
thar_wave = 3600 * np.exp(np.arange(5e5)/5e5)
thar_flux = np.zeros(5e5)
ix = (np.log(thar[:,1]/3600)*5e5).astype(int)
ix = np.minimum(np.maximum(ix,0),5e5-1).astype(int)
thar_flux[ ix ] = 10**(np.minimum(thar[:,2],4))
thar_flux = np.convolve(thar_flux,[0.2,0.5,0.9,1,0.9,0.5,0.2],mode='same')
#Make the peak flux equal to 10
thar_flux /= 0.1*np.max(thar_flux)
#Include an option to assume the Th/Ar fiber is connected to a flat lamp
if thar_flatlamp:
thar_flux[:]=10
thar_spect = np.array([thar_wave/1e4,thar_flux])
#Now that we have our spectrum, create the Th/Ar image.
slit_fluxes = np.ones(1)
im_slit2 = self.make_lenslets(fluxes=slit_fluxes, mode='high', llet_offset=0)
image += self.simulate_image(x,w,b,matrices,im_slit2, spectrum=thar_spect, xshift=xshift,rv=rv_thar)
else:
print "ERROR: unknown mode."
raise UserWarning
#Prevent any interpolation errors (negative flux) prior to adding noise.
image = np.maximum(image,0)
image = np.random.poisson(flux*image) + rnoise*np.random.normal(size=image.shape)
#For conventional axes, transpose the image, and divide by the gain in e/ADU
image = image.T/gain
#Now create our fits image!
hdu = pyfits.PrimaryHDU(image)
hdu.writeto(output_prefix + self.arm + '.fits', clobber=True)
if (return_image):
return image
|
mikeireland/pyghost
|
pyghost/ghostsim.py
|
Python
|
mit
| 25,668
|
[
"Gaussian"
] |
7f59381d3c5d79f269a35fc534ed65fa93d199a77c3c0182a5b22ee4cf5052f1
|
from __future__ import division
import os
from skimage import io
from skimage.util import random_noise
from skimage.filters import scharr
from scipy import ndimage
import matplotlib.pyplot as plt
import numpy as np
import cv2
import phasepack
def input_data(path, filename):
img_path = os.path.join(path, filename)
img = io.imread(img_path)
img = img[85:341,90:346]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img, gray
def _preprocess( reference_image, blur_amount ):
blur = cv2.GaussianBlur( reference_image,( blur_amount, blur_amount ), 0 )
# can also downsample and average filter
# noise = random_noise( random_noise( random_noise(reference_image,
# mode = "gaussian") ))
return blur
inputs = input_data( '/home/cparr/Downloads/jpeg2000_db/db/', 'rapids.bmp' )
img = inputs[0]
dst = _preprocess( img, 25 )
r, g, b = img[:,:,0], img[:,:,1], img[:,:,2]
imgY = 0.299 * r + 0.587 * g + 0.114 * b
imgI = 0.596 * r - 0.275 * g - 0.321 * b
imgQ = 0.212 * r - 0.523 * g + 0.311 * b
r_d, g_d, b_d = dst[:,:,0], dst[:,:,1], dst[:,:,2]
dstY = 0.299 * r_d + 0.587 * g_d + 0.114 * b_d
dstI = 0.596 * r_d - 0.275 * g_d - 0.321 * b_d
dstQ = 0.212 * r_d - 0.523 * g_d + 0.311 * b_d
t1 = 0.85
t2 = 160
t3 = 200
t4 = 200
s_Q = ( 2*imgQ + dstQ + t4 ) / ( imgQ**2 + dstQ**2 + t4 )
s_I = ( 2*imgI + dstI + t3 ) / ( imgI**2 + dstI**2 + t3 )
pc1 = phasepack.phasecong(imgY, nscale = 4, norient = 4, minWaveLength = 6, mult = 2, sigmaOnf=0.55)
pc2 = phasepack.phasecong(dstY, nscale = 4, norient = 4, minWaveLength = 6, mult = 2, sigmaOnf=0.55)
pc1 = pc1[0]
pc2 = pc2[0]
s_PC = ( 2*pc1 + pc2 + t1 ) / ( pc1**2 + pc2**2 + t1 )
g1 = scharr( imgY )
g2 = scharr( dstY )
s_G = ( 2*g1 + g2 + t2 ) / ( g1**2 + g2**2 + t2 )
s_L = s_PC * s_G
s_C = s_I * s_Q
pcM = np.maximum(pc1,pc2)
fsim = round( np.nansum( s_L * pcM) / np.nansum(pcM), 3)
fsimc = round( np.nansum( s_L * s_C**0.3 * pcM) / np.nansum(pcM), 3)
print 'FSIM: ' + str(fsim)
print 'FSIMC: ' + str(fsimc)
fig, axes = plt.subplots( nrows = 2, ncols = 3 )
plt.subplot(231)
plt.imshow(img)
plt.title('Reference')
plt.xticks([])
plt.yticks([])
plt.subplot(232)
plt.imshow(dst, cmap = 'gray')
plt.title('Distorted')
plt.xticks([])
plt.yticks([])
plt.subplot(233)
plt.imshow(pc1, cmap = 'gray')
plt.title('Ref. PC', size = 8)
plt.xticks([])
plt.yticks([])
plt.subplot(234)
plt.imshow(pc2, cmap = 'gray')
plt.title('Dist. PC', size = 8)
plt.xticks([])
plt.yticks([])
plt.subplot(235)
plt.imshow(s_L, cmap = 'gray')
plt.xticks([])
plt.yticks([])
plt.title('FSIM: '+ str(fsim))
fig.delaxes(axes[-1,-1])
plt.savefig('/home/cparr/Snow_Patterns/figures/gsmd/fsim_rapids.png',
bbox_inches = 'tight', dpi = 300, facecolor = 'skyblue')
|
charparr/tundra-snow
|
fsim.py
|
Python
|
mit
| 2,813
|
[
"Gaussian"
] |
6813694f779ce42cca6e73e0257a75cfdccb661cf29c4225a3a3ff8976f20da7
|
# Python Version: 3.x
import pathlib
import re
import sys
import time
import webbrowser
from typing import *
import onlinejudge_command.download_history
import onlinejudge_command.logging as log
import onlinejudge_command.utils as utils
import onlinejudge
from onlinejudge.type import *
if TYPE_CHECKING:
import argparse
def submit(args: 'argparse.Namespace') -> None:
# guess url
history = onlinejudge_command.download_history.DownloadHistory()
if args.file.parent.resolve() == pathlib.Path.cwd():
guessed_urls = history.get()
else:
log.warning('cannot guess URL since the given file is not in the current directory')
guessed_urls = []
if args.url is None:
if len(guessed_urls) == 1:
args.url = guessed_urls[0]
log.info('guessed problem: %s', args.url)
else:
log.error('failed to guess the URL to submit')
log.info('please manually specify URL as: $ oj submit URL FILE')
sys.exit(1)
# parse url
problem = onlinejudge.dispatch.problem_from_url(args.url)
if problem is None:
sys.exit(1)
# read code
with args.file.open('rb') as fh:
code = fh.read() # type: bytes
format_config = {
'dos2unix': args.format_dos2unix or args.golf,
'rstrip': args.format_dos2unix or args.golf,
}
code = format_code(code, **format_config)
# report code
log.info('code (%d byte):', len(code))
log.emit(utils.make_pretty_large_file_content(code, limit=30, head=10, tail=10, bold=True))
with utils.new_session_with_our_user_agent(path=args.cookie) as sess:
# guess or select language ids
language_dict = {language.id: language.name for language in problem.get_available_languages(session=sess)} # type: Dict[LanguageId, str]
matched_lang_ids = None # type: Optional[List[str]]
if args.language in language_dict:
matched_lang_ids = [args.language]
else:
if args.guess:
kwargs = {
'language_dict': language_dict,
'cxx_latest': args.guess_cxx_latest,
'cxx_compiler': args.guess_cxx_compiler,
'python_version': args.guess_python_version,
'python_interpreter': args.guess_python_interpreter,
}
matched_lang_ids = guess_lang_ids_of_file(args.file, code, **kwargs)
if not matched_lang_ids:
log.info('failed to guess languages from the file name')
matched_lang_ids = list(language_dict.keys())
if args.language is not None:
log.info('you can use `--no-guess` option if you want to do an unusual submission')
matched_lang_ids = select_ids_of_matched_languages(args.language.split(), matched_lang_ids, language_dict=language_dict)
else:
if args.language is None:
matched_lang_ids = None
else:
matched_lang_ids = select_ids_of_matched_languages(args.language.split(), list(language_dict.keys()), language_dict=language_dict)
# report selected language ids
if matched_lang_ids is not None and len(matched_lang_ids) == 1:
args.language = matched_lang_ids[0]
log.info('chosen language: %s (%s)', args.language, language_dict[LanguageId(args.language)])
else:
if matched_lang_ids is None:
log.error('language is unknown')
log.info('supported languages are:')
elif len(matched_lang_ids) == 0:
log.error('no languages are matched')
log.info('supported languages are:')
else:
log.error('Matched languages were not narrowed down to one.')
log.info('You have to choose:')
for lang_id in sorted(matched_lang_ids or language_dict.keys()):
log.emit('%s (%s)', lang_id, language_dict[LanguageId(lang_id)])
sys.exit(1)
# confirm
guessed_unmatch = ([problem.get_url()] != guessed_urls)
if guessed_unmatch:
samples_text = ('samples of "{}'.format('", "'.join(guessed_urls)) if guessed_urls else 'no samples')
log.warning('the problem "%s" is specified to submit, but %s were downloaded in this directory. this may be mis-operation', problem.get_url(), samples_text)
if args.wait:
log.status('sleep(%.2f)', args.wait)
time.sleep(args.wait)
if not args.yes:
if guessed_unmatch:
problem_id = problem.get_url().rstrip('/').split('/')[-1].split('?')[-1] # this is too ad-hoc
key = problem_id[:3] + (problem_id[-1] if len(problem_id) >= 4 else '')
sys.stdout.write('Are you sure? Please type "{}" '.format(key))
sys.stdout.flush()
c = sys.stdin.readline().rstrip()
if c != key:
log.info('terminated.')
return
else:
sys.stdout.write('Are you sure? [y/N] ')
sys.stdout.flush()
c = sys.stdin.read(1)
if c.lower() != 'y':
log.info('terminated.')
return
# submit
try:
submission = problem.submit_code(code, language_id=LanguageId(args.language), session=sess)
except NotLoggedInError:
log.failure('login required')
sys.exit(1)
except SubmissionError:
log.failure('submission failed')
sys.exit(1)
# show result
if args.open:
browser = webbrowser.get()
log.status('open the submission page with browser')
opened = browser.open_new_tab(submission.get_url())
if not opened:
log.failure('failed to open the url. please set the $BROWSER envvar')
def select_ids_of_matched_languages(words: List[str], lang_ids: List[str], language_dict, split: bool = False, remove: bool = False) -> List[str]:
result = []
for lang_id in lang_ids:
desc = language_dict[lang_id].lower()
if split:
desc = desc.split()
pred = all([word.lower() in desc for word in words])
if remove:
pred = not pred
if pred:
result.append(lang_id)
return result
def is_cplusplus_description(description: str) -> bool:
# Here, 'clang' is not used as intended. Think about strings like "C++ (Clang)", "Clang++" (this includes "g++" as a substring), or "C (Clang)".
return 'c++' in description.lower() or 'g++' in description.lower()
def parse_cplusplus_compiler(description: str) -> str:
"""
:param description: must be for C++
"""
assert is_cplusplus_description(description)
if 'clang' in description.lower():
return 'clang'
if 'gcc' in description.lower() or 'g++' in description.lower():
return 'gcc'
return 'gcc' # by default
def parse_cplusplus_version(description: str) -> Optional[str]:
"""
:param description: must be for C++
"""
assert is_cplusplus_description(description)
match = re.search(r'[CG]\+\+\s?(\d\w)\b', description)
if match:
return match.group(1)
return None
def is_python_description(description: str) -> bool:
return 'python' in description.lower() or 'pypy' in description.lower()
def parse_python_version(description: str) -> Optional[int]:
"""
:param description: must be for Python
"""
assert is_python_description(description)
match = re.match(r'([23])\.(?:\d+(?:\.\d+)?|x)', description)
if match:
return int(match.group(1))
match = re.match(r'(?:Python|PyPy) *\(?([23])', description, re.IGNORECASE)
if match:
return int(match.group(1))
return None
def parse_python_interpreter(description: str) -> str:
"""
:param description: must be for Python
"""
assert is_python_description(description)
if 'pypy' in description.lower():
return 'pypy'
else:
return 'cpython'
def guess_lang_ids_of_file(filename: pathlib.Path, code: bytes, language_dict, cxx_latest: bool = False, cxx_compiler: str = 'all', python_version: str = 'all', python_interpreter: str = 'all') -> List[str]:
assert cxx_compiler in ('gcc', 'clang', 'all')
assert python_version in ('2', '3', 'auto', 'all')
assert python_interpreter in ('cpython', 'pypy', 'all')
ext = filename.suffix
lang_ids = language_dict.keys()
log.debug('file extension: %s', ext)
ext = ext.lstrip('.')
if ext in ('cpp', 'cxx', 'cc', 'C'):
log.debug('language guessing: C++')
# memo: https://stackoverflow.com/questions/1545080/c-code-file-extension-cc-vs-cpp
lang_ids = list(filter(lambda lang_id: is_cplusplus_description(language_dict[lang_id]), lang_ids))
if not lang_ids:
return []
log.debug('all lang ids for C++: %s', lang_ids)
# compiler
found_gcc = False
found_clang = False
for lang_id in lang_ids:
compiler = parse_cplusplus_compiler(language_dict[lang_id])
if compiler == 'gcc':
found_gcc = True
elif compiler == 'clang':
found_clang = True
if found_gcc and found_clang:
log.status('both GCC and Clang are available for C++ compiler')
if cxx_compiler == 'gcc':
log.status('use: GCC')
lang_ids = list(filter(lambda lang_id: parse_cplusplus_compiler(language_dict[lang_id]) in ('gcc', None), lang_ids))
elif cxx_compiler == 'clang':
log.status('use: Clang')
lang_ids = list(filter(lambda lang_id: parse_cplusplus_compiler(language_dict[lang_id]) in ('clang', None), lang_ids))
else:
assert cxx_compiler == 'all'
log.debug('lang ids after compiler filter: %s', lang_ids)
# version
if cxx_latest:
saved_lang_ids = lang_ids
lang_ids = []
for compiler in ('gcc', 'clang'): # use the latest for each compiler
ids = list(filter(lambda lang_id: parse_cplusplus_compiler(language_dict[lang_id]) in (compiler, None), saved_lang_ids))
if not ids:
continue
ids.sort(key=lambda lang_id: (parse_cplusplus_version(language_dict[lang_id]) or '', language_dict[lang_id]))
lang_ids += [ids[-1]] # since C++11 < C++1y < ... as strings
log.debug('lang ids after version filter: %s', lang_ids)
assert lang_ids
lang_ids = sorted(set(lang_ids))
return lang_ids
elif ext == 'py':
log.debug('language guessing: Python')
# interpreter
lang_ids = list(filter(lambda lang_id: is_python_description(language_dict[lang_id]), lang_ids))
if any([parse_python_interpreter(language_dict[lang_id]) == 'pypy' for lang_id in lang_ids]):
log.status('PyPy is available for Python interpreter')
if python_interpreter != 'all':
lang_ids = list(filter(lambda lang_id: parse_python_interpreter(language_dict[lang_id]) == python_interpreter, lang_ids))
# version
three_found = False
two_found = False
for lang_id in lang_ids:
version = parse_python_version(language_dict[lang_id])
log.debug('%s (%s) is recognized as Python %s', lang_id, language_dict[lang_id], str(version or 'unknown'))
if version == 3:
three_found = True
if version == 2:
two_found = True
if two_found and three_found:
log.status('both Python2 and Python3 are available for version of Python')
if python_version in ('2', '3'):
versions = [int(python_version)] # type: List[Optional[int]]
elif python_version == 'all':
versions = [2, 3]
else:
assert python_version == 'auto'
lines = code.splitlines()
if code.startswith(b'#!'):
s = lines[0] # use shebang
else:
s = b'\n'.join(lines[:10] + lines[-5:]) # use modelines
versions = []
for version in (2, 3):
if re.search(r'python *(version:? *)?%d'.encode() % version, s.lower()):
versions += [version]
if not versions:
log.status('no version info in code')
versions = [3]
log.status('use: %s', ', '.join(map(str, versions)))
lang_ids = list(filter(lambda lang_id: parse_python_version(language_dict[lang_id]) in versions + [None], lang_ids))
lang_ids = sorted(set(lang_ids))
return lang_ids
else:
log.debug('language guessing: others')
table = [
{ 'names': [ 'awk' ], 'exts': [ 'awk' ] },
{ 'names': [ 'bash' ], 'exts': [ 'sh' ] },
{ 'names': [ 'brainfuck' ], 'exts': [ 'bf' ] },
{ 'names': [ 'c#' ], 'exts': [ 'cs' ] },
{ 'names': [ 'c' ], 'exts': [ 'c' ], 'split': True },
{ 'names': [ 'ceylon' ], 'exts': [ 'ceylon' ] },
{ 'names': [ 'clojure' ], 'exts': [ 'clj' ] },
{ 'names': [ 'common lisp' ], 'exts': [ 'lisp', 'lsp', 'cl' ] },
{ 'names': [ 'crystal' ], 'exts': [ 'cr' ] },
{ 'names': [ 'd' ], 'exts': [ 'd' ], 'split': True },
{ 'names': [ 'f#' ], 'exts': [ 'fs' ] },
{ 'names': [ 'fortran' ], 'exts': [ 'for', 'f', 'f90', 'f95', 'f03' ] },
{ 'names': [ 'go' ], 'exts': [ 'go' ], 'split': True },
{ 'names': [ 'haskell' ], 'exts': [ 'hs' ] },
{ 'names': [ 'java' ], 'exts': [ 'java' ] },
{ 'names': [ 'javascript' ], 'exts': [ 'js' ] },
{ 'names': [ 'julia' ], 'exts': [ 'jl' ] },
{ 'names': [ 'kotlin' ], 'exts': [ 'kt', 'kts' ] },
{ 'names': [ 'lua' ], 'exts': [ 'lua' ] },
{ 'names': [ 'nim' ], 'exts': [ 'nim' ] },
{ 'names': [ 'moonscript' ], 'exts': [ 'moon' ] },
{ 'names': [ 'objective-c' ], 'exts': [ 'm' ] },
{ 'names': [ 'ocaml' ], 'exts': [ 'ml' ] },
{ 'names': [ 'octave' ], 'exts': [ 'm' ] },
{ 'names': [ 'pascal' ], 'exts': [ 'pas' ] },
{ 'names': [ 'perl6' ], 'exts': [ 'p6', 'pl6', 'pm6' ] },
{ 'names': [ 'perl' ], 'exts': [ 'pl', 'pm' ], 'split': True },
{ 'names': [ 'php' ], 'exts': [ 'php' ] },
{ 'names': [ 'ruby' ], 'exts': [ 'rb' ] },
{ 'names': [ 'rust' ], 'exts': [ 'rs' ] },
{ 'names': [ 'scala' ], 'exts': [ 'scala' ] },
{ 'names': [ 'scheme' ], 'exts': [ 'scm' ] },
{ 'names': [ 'sed' ], 'exts': [ 'sed' ] },
{ 'names': [ 'standard ml' ], 'exts': [ 'sml' ] },
{ 'names': [ 'swift' ], 'exts': [ 'swift' ] },
{ 'names': [ 'text' ], 'exts': [ 'txt' ] },
{ 'names': [ 'typescript' ], 'exts': [ 'ts' ] },
{ 'names': [ 'unlambda' ], 'exts': [ 'unl' ] },
{ 'names': [ 'vim script' ], 'exts': [ 'vim' ] },
{ 'names': [ 'visual basic' ], 'exts': [ 'vb' ] },
] # type: List[Dict[str, Any]] # yapf: disable
lang_ids = []
for data in table:
if ext in data['exts']:
for name in data['names']:
lang_ids += select_ids_of_matched_languages([name], language_dict.keys(), language_dict=language_dict, split=data.get('split', False))
return sorted(set(lang_ids))
def format_code(code: bytes, dos2unix: bool = False, rstrip: bool = False) -> bytes:
if dos2unix:
log.status('dos2unix...')
code = code.replace(b'\r\n', b'\n')
if rstrip:
log.status('rstrip...')
code = code.rstrip()
return code
|
kmyk/online-judge-tools
|
onlinejudge_command/subcommand/submit.py
|
Python
|
mit
| 17,077
|
[
"CRYSTAL"
] |
d21d48015da328d1fb4117093a37bf93a83a5f81d077a25ed9b808c2f2ea8a3d
|
# -*- coding:utf-8 -*-
# Copyright (c) 2015, Galaxy Authors. All Rights Reserved
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Author: wangtaize@baidu.com
# Date: 2015-03-30
import logging
from common import http
from galaxy import wrapper
from bootstrap import settings
from console.taskgroup import helper
from console.service import decorator as s_decorator
from console.taskgroup import decorator as t_decorator
from django.db import transaction
from django.views.decorators.csrf import csrf_exempt
LOG = logging.getLogger("console")
# service group 0
SHOW_G_BYTES_LIMIT = 1024 * 1024 * 1024
def str_pretty(total_bytes):
if total_bytes < SHOW_G_BYTES_LIMIT:
return "%sM"%(total_bytes/(1024*1024))
return "%sG"%(total_bytes/(1024*1024*1024))
@t_decorator.task_group_id_required
def update_task_group(request):
pass
def get_task_status(request):
builder = http.ResponseBuilder()
id = request.GET.get('id',None)
agent = request.GET.get('agent',None)
master_addr = request.GET.get('master',None)
if not master_addr:
return builder.error('master is required').build_json()
galaxy = wrapper.Galaxy(master_addr,settings.GALAXY_CLIENT_BIN)
tasklist = []
if id :
status,tasklist = galaxy.list_task_by_job_id(id)
if not status:
return builder.error("fail to get task list")\
.build_json()
if agent:
status,tasklist = galaxy.list_task_by_host(agent)
if not status:
return builder.error("fail to get task list")\
.build_json()
statics = {"RUNNING":0,"DEPLOYING":0,"ERROR":0}
for task in tasklist:
task['mem_used'] = str_pretty(task['mem_used'])
task['mem_limit'] = str_pretty(task['mem_limit'])
task['cpu_used'] ="%0.2f"%(task['cpu_limit'] * task['cpu_used'])
if task['status'] in statics:
statics[task['status']] += 1
return builder.ok(data={'needInit':False,'taskList':tasklist,'statics':statics}).build_json()
def get_job_sched_history(request):
builder = http.ResponseBuilder()
id = request.GET.get('id',None)
master_addr = request.GET.get('master',None)
if not master_addr:
return builder.error('master is required').build_json()
galaxy = wrapper.Galaxy(master_addr,settings.GALAXY_CLIENT_BIN)
if not id :
return builder.error("id is required").build_json()
status,tasklist = galaxy.job_history(id)
for task in tasklist:
task['mem_used'] = str_pretty(task['mem_used'])
task['mem_limit'] = str_pretty(task['mem_limit'])
task['cpu_used'] ="%0.2f"%(task['cpu_limit'] * task['cpu_used'])
return builder.ok(data={'needInit':False,'taskList':tasklist}).build_json()
|
leoYY/galaxy
|
console/backend/src/console/taskgroup/views.py
|
Python
|
bsd-3-clause
| 2,835
|
[
"Galaxy"
] |
eb65b2f71eb924000ed07712ced789cc94956eb43a28297c6ca30ec66167bc02
|
#!/usr/bin/env python
"""This example demonstrates the flow for retrieving a refresh token.
This tool can be used to conveniently create refresh tokens for later use with your web
application OAuth2 credentials.
To create a Reddit application visit the following link while logged into the account
you want to create a refresh token for: https://www.reddit.com/prefs/apps/
Create a "web app" with the redirect uri set to: http://localhost:8080
After the application is created, take note of:
- REDDIT_CLIENT_ID; the line just under "web app" in the upper left of the Reddit
Application
- REDDIT_CLIENT_SECRET; the value to the right of "secret"
Usage:
EXPORT praw_client_id=<REDDIT_CLIENT_ID>
EXPORT praw_client_secret=<REDDIT_CLIENT_SECRET>
python3 obtain_refresh_token.py
"""
import random
import socket
import sys
import praw
def main():
"""Provide the program's entry point when directly executed."""
scope_input = input(
"Enter a comma separated list of scopes, or `*` for all scopes: "
)
scopes = [scope.strip() for scope in scope_input.strip().split(",")]
reddit = praw.Reddit(
redirect_uri="http://localhost:8080",
user_agent="obtain_refresh_token/v0 by u/bboe",
)
state = str(random.randint(0, 65000))
url = reddit.auth.url(scopes, state, "permanent")
print(f"Now open this url in your browser: {url}")
client = receive_connection()
data = client.recv(1024).decode("utf-8")
param_tokens = data.split(" ", 2)[1].split("?", 1)[1].split("&")
params = {
key: value for (key, value) in [token.split("=") for token in param_tokens]
}
if state != params["state"]:
send_message(
client,
f"State mismatch. Expected: {state} Received: {params['state']}",
)
return 1
elif "error" in params:
send_message(client, params["error"])
return 1
refresh_token = reddit.auth.authorize(params["code"])
send_message(client, f"Refresh token: {refresh_token}")
return 0
def receive_connection():
"""Wait for and then return a connected socket..
Opens a TCP connection on port 8080, and waits for a single client.
"""
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(("localhost", 8080))
server.listen(1)
client = server.accept()[0]
server.close()
return client
def send_message(client, message):
"""Send message to client and close the connection."""
print(message)
client.send(f"HTTP/1.1 200 OK\r\n\r\n{message}".encode("utf-8"))
client.close()
if __name__ == "__main__":
sys.exit(main())
|
praw-dev/praw
|
docs/examples/obtain_refresh_token.py
|
Python
|
bsd-2-clause
| 2,728
|
[
"VisIt"
] |
a506bd9ddf25af6464f93da94d14a6a1b06104ab23e829db0b0acbbfd2f6bce1
|
"""
The reader for Gaussian input files
===================================
A primitive reader for Gaussian ``.gjf`` input files is defined here. Note that
basically it just read the atomic coordinate and the connectivity if possible.
And the atomic coordinate has to be in Cartesian format.
"""
import itertools
import numpy as np
from .structure import Atm, Structure
def get_gjf_sections(file_name):
"""Gets the sections of Gaussian input file
The Gaussian input file contains sections divided by blank lines. This
function will read the input file given by the input file name and return
the sections of the input file, with each section given as a list of
strings for the lines.
:param file_name: The name of the input file
:raises IOError: If the input file cannot be opened.
"""
try:
input_file = open(file_name, 'r')
except IOError as err:
raise IOError(
'The given Gaussian input file cannot be opened!\n' + err.args
)
lines = [i.strip() for i in input_file]
return [
list(g) for k, g in itertools.groupby(lines, lambda x: x == '')
if not k
]
def parse_coord(lines):
"""Parses the atomic coordinate section of the gjf file
:param lines: The section for coordinates of the atoms
:raises ValueError: If the format is not correct
:returns: A pair, with the first field being the list of atomic
coordinates, the second being the lattices vectors. Empty list for
non-periodic systems,
"""
atms = []
# skip the charge and spin multiplicity
for i in lines[1:]:
fields = i.split()
symb = fields[0]
try:
coord = np.array(fields[1:4], dtype=np.float64)
except ValueError as verr:
raise ValueError(
'Corrupt atomic coordinate in gjf file:\n' + verr.args
)
atms.append(Atm(symb=symb, coord=coord))
continue
latt_vecs = [i[1] for i in atms if i[0] == 'Tv']
return (
[i for i in atms if i[0] != 'Tv'],
latt_vecs
)
def parse_connectivity(lines):
"""Parses the connectivity section of the gjf file
:param lines: The lines of the section
:raises ValueError: if the format is not correct
"""
bonds = []
for l_i in lines:
fields = l_i.split()
try:
# Need to convert one-based input to zero-based indices
start = int(fields[0]) - 1
# Parition into pairs, based on the official recipe in
# itertools
it1 = iter(fields[1:])
it2 = it1
for conn in itertools.izip_longest(it1, it2, fillvalue=None):
end = int(conn[0]) - 1
bond_order = float(conn[1])
bonds.append(
(start, end, bond_order)
)
except ValueError as verr:
raise ValueError(
'Corrupt connectivity in gjf file:\n' + verr.args
)
return bonds
def parse_gjf(file_name):
"""Parses a Gaussian gjf file based on the input file name
:param file_name: The name of the input file
:raises IOError: if the file cannot be opened
:raises ValueError: if the file is not of correct format
"""
sections = get_gjf_sections(file_name)
if len(sections) < 3:
raise ValueError('There is no atomic coordinate section in the input')
title = sections[1]
atms, latt_vecs = parse_coord(sections[2])
if len(sections) > 3:
bonds = parse_connectivity(sections[3])
else:
bonds = []
structure = Structure(title)
structure.extend_atms(atms)
structure.extend_bonds(bonds)
structure.set_latt_vecs(latt_vecs)
return structure
|
tschijnmo/ccpoviz
|
ccpoviz/gjfreader.py
|
Python
|
mit
| 3,826
|
[
"Gaussian"
] |
452bd31b0ea9d10e9ae7e26f5cf0c76f26ac9412dfc1f064a29747ed1a15a32c
|
"""
Assesment of Generalized Estimating Equations using simulation.
Only Gaussian models are currently checked.
See the generated file "gee_simulation_check.txt" for results.
"""
from statsmodels.compat.python import lrange
import scipy
import numpy as np
from itertools import product
from statsmodels.genmod.families import Gaussian
from statsmodels.genmod.generalized_estimating_equations import GEE
from statsmodels.genmod.cov_struct import Autoregressive, Nested
np.set_printoptions(formatter={'all': lambda x: "%8.3f" % x},
suppress=True)
OUT = open("gee_simulation_check.txt", "w")
class GEE_simulator(object):
#
# Parameters that must be defined
#
# Number of groups
ngroups = None
# Standard deviation of the pure errors
error_sd = None
# The regression coefficients
params = None
# The parameters defining the dependence structure
dparams = None
#
# Output parameters
#
# Matrix of exogeneous data (rows are cases, columns are
# variables)
exog = None
# Matrix of endogeneous data (len(endog) = exog.shape[0])
endog = None
# Matrix of time information (time.shape[0] = len(endog))
time = None
# Group labels (len(groups) = len(endog))
group = None
# Group sizes are random within this range
group_size_range = [4, 11]
# dparams_est is dparams with scale_inv appended
def print_dparams(self, dparams_est):
raise NotImplementedError
class AR_simulator(GEE_simulator):
# The distance function for determining AR correlations.
distfun = [lambda x, y: np.sqrt(np.sum((x-y)**2)),]
def print_dparams(self, dparams_est):
OUT.write("AR coefficient estimate: %8.4f\n" %
dparams_est[0])
OUT.write("AR coefficient truth: %8.4f\n" %
self.dparams[0])
OUT.write("Error variance estimate: %8.4f\n" %
dparams_est[1])
OUT.write("Error variance truth: %8.4f\n" %
self.error_sd**2)
OUT.write("\n")
def simulate(self):
endog, exog, group, time = [], [], [], []
for i in range(self.ngroups):
gsize = np.random.randint(self.group_size_range[0],
self.group_size_range[1])
group.append([i,] * gsize)
time1 = np.random.normal(size=(gsize,2))
time.append(time1)
exog1 = np.random.normal(size=(gsize, 5))
exog1[:,0] = 1
exog.append(exog1)
# Pairwise distances within the cluster
distances = scipy.spatial.distance.cdist(time1, time1,
self.distfun[0])
# Pairwise correlations within the cluster
correlations = self.dparams[0]**distances
correlations_sr = np.linalg.cholesky(correlations)
errors = np.dot(correlations_sr, np.random.normal(size=gsize))
endog1 = np.dot(exog1, self.params) + errors * self.error_sd
endog.append(endog1)
self.exog = np.concatenate(exog, axis=0)
self.endog = np.concatenate(endog)
self.time = np.concatenate(time, axis=0)
self.group = np.concatenate(group)
class Nested_simulator(GEE_simulator):
# Vector containing list of nest sizes (used instead of
# group_size_range).
nest_sizes = None
# Matrix of nest id's (an output parameter)
id_matrix = None
def print_dparams(self, dparams_est):
for j in range(len(self.nest_sizes)):
OUT.write("Nest %d variance estimate: %8.4f\n" % \
(j+1, dparams_est[j]))
OUT.write("Nest %d variance truth: %8.4f\n" % \
(j+1, self.dparams[j]))
OUT.write("Error variance estimate: %8.4f\n" % \
(dparams_est[-1] - sum(dparams_est[0:-1])))
OUT.write("Error variance truth: %8.4f\n" %
self.error_sd**2)
OUT.write("\n")
def simulate(self):
group_effect_var = self.dparams[0]
vcomp = self.dparams[1:]
vcomp.append(0)
endog, exog, group, id_matrix = [], [], [], []
for i in range(self.ngroups):
iterators = [lrange(n) for n in self.nest_sizes]
# The random effects
variances = [np.sqrt(v)*np.random.normal(size=n)
for v,n in zip(vcomp, self.nest_sizes)]
gpe = np.random.normal() * np.sqrt(group_effect_var)
nest_all = []
for j in self.nest_sizes:
nest_all.append(set())
for nest in product(*iterators):
group.append(i)
# The sum of all random effects that apply to this
# unit
ref = gpe + sum([v[j] for v,j in zip(variances, nest)])
exog1 = np.random.normal(size=5)
exog1[0] = 1
exog.append(exog1)
error = ref + self.error_sd * np.random.normal()
endog1 = np.dot(exog1, self.params) + error
endog.append(endog1)
for j in range(len(nest)):
nest_all[j].add(tuple(nest[0:j+1]))
nest1 = [len(x)-1 for x in nest_all]
id_matrix.append(nest1[0:-1])
self.exog = np.array(exog)
self.endog = np.array(endog)
self.group = np.array(group)
self.id_matrix = np.array(id_matrix)
self.time = np.zeros_like(self.endog)
def check_constraint(da, va, ga):
"""
Check the score testing of the parameter constraints.
"""
def gen_gendat_ar0(ar):
def gendat_ar0(msg = False):
ars = AR_simulator()
ars.ngroups = 200
ars.params = np.r_[0, -1, 1, 0, 0.5]
ars.error_sd = 2
ars.dparams = [ar,]
ars.simulate()
return ars, Autoregressive()
return gendat_ar0
def gen_gendat_ar1(ar):
def gendat_ar1():
ars = AR_simulator()
ars.ngroups = 200
ars.params = np.r_[0, -0.8, 1.2, 0, 0.5]
ars.error_sd = 2
ars.dparams = [ar,]
ars.simulate()
return ars, Autoregressive()
return gendat_ar1
def gendat_nested0():
ns = Nested_simulator()
ns.error_sd = 1.
ns.params = np.r_[0., 1, 1, -1, -1]
ns.ngroups = 50
ns.nest_sizes = [10, 5]
ns.dparams = [2., 1.]
ns.simulate()
return ns, Nested(ns.id_matrix)
def gendat_nested1():
ns = Nested_simulator()
ns.error_sd = 2.
ns.params = np.r_[0, 1, 1.3, -0.8, -1.2]
ns.ngroups = 50
ns.nest_sizes = [10, 5]
ns.dparams = [1., 3.]
ns.simulate()
return ns, Nested(ns.id_matrix)
nrep = 100
gendats = [gen_gendat_ar0(ar) for ar in (0, 0.3, 0.6)]
gendats.extend([gen_gendat_ar1(ar) for ar in (0, 0.3, 0.6)])
gendats.extend([gendat_nested0, gendat_nested1])
lhs = np.array([[0., 1, 1, 0, 0],])
rhs = np.r_[0.,]
# Loop over data generating models
for gendat in gendats:
pvalues = []
params = []
std_errors = []
dparams = []
for j in range(nrep):
da,va = gendat()
ga = Gaussian()
md = GEE(da.endog, da.exog, da.group, da.time, ga, va)
mdf = md.fit()
scale_inv = 1 / md.estimate_scale()
dparams.append(np.r_[va.dparams, scale_inv])
params.append(np.asarray(mdf.params))
std_errors.append(np.asarray(mdf.standard_errors))
da,va = gendat()
ga = Gaussian()
md = GEE(da.endog, da.exog, da.group, da.time, ga, va,
constraint=(lhs, rhs))
mdf = md.fit()
score = md.score_test_results
pvalue = score["p-value"]
pvalues.append(pvalue)
dparams_mean = np.array(sum(dparams) / len(dparams))
OUT.write("Checking dependence parameters:\n")
da.print_dparams(dparams_mean)
params = np.array(params)
eparams = params.mean(0)
sdparams = params.std(0)
std_errors = np.array(std_errors)
std_errors = std_errors.mean(0)
OUT.write("Checking parameter values:\n")
OUT.write("Observed: ")
OUT.write(np.array_str(eparams) + "\n")
OUT.write("Expected: ")
OUT.write(np.array_str(da.params) + "\n")
OUT.write("Absolute difference: ")
OUT.write(np.array_str(eparams - da.params) + "\n")
OUT.write("Relative difference: ")
OUT.write(np.array_str((eparams - da.params) / da.params) + "\n")
OUT.write("\n")
OUT.write("Checking standard errors\n")
OUT.write("Observed: ")
OUT.write(np.array_str(sdparams) + "\n")
OUT.write("Expected: ")
OUT.write(np.array_str(std_errors) + "\n")
OUT.write("Absolute difference: ")
OUT.write(np.array_str(sdparams - std_errors) + "\n")
OUT.write("Relative difference: ")
OUT.write(np.array_str((sdparams - std_errors) / std_errors) + "\n")
OUT.write("\n")
pvalues.sort()
OUT.write("Checking constrained estimation:\n")
OUT.write("Left hand side:\n")
OUT.write(np.array_str(lhs) + "\n")
OUT.write("Right hand side:\n")
OUT.write(np.array_str(rhs) + "\n")
OUT.write("Observed p-values Expected Null p-values\n")
for q in np.arange(0.1, 0.91, 0.1):
OUT.write("%20.3f %20.3f\n" % (pvalues[int(q*len(pvalues))], q))
OUT.write("=" * 80 + "\n\n")
OUT.close()
|
bashtage/statsmodels
|
statsmodels/genmod/tests/gee_simulation_check.py
|
Python
|
bsd-3-clause
| 9,433
|
[
"Gaussian"
] |
89800b5891776c9b253d9e3499a300b84c469d0531d25964348f60660c3852d1
|
"""
A collection of utility functions and classes. Originally, many
(but not all) were from the Python Cookbook -- hence the name cbook.
This module is safe to import from anywhere within matplotlib;
it imports matplotlib only at runtime.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
from itertools import repeat
import collections
import datetime
import errno
from functools import reduce
import glob
import gzip
import io
import locale
import os
import re
import sys
import time
import traceback
import types
import warnings
from weakref import ref, WeakKeyDictionary
import numpy as np
import numpy.ma as ma
class MatplotlibDeprecationWarning(UserWarning):
"""
A class for issuing deprecation warnings for Matplotlib users.
In light of the fact that Python builtin DeprecationWarnings are ignored
by default as of Python 2.7 (see link below), this class was put in to
allow for the signaling of deprecation, but via UserWarnings which are not
ignored by default.
https://docs.python.org/dev/whatsnew/2.7.html#the-future-for-python-2-x
"""
pass
mplDeprecation = MatplotlibDeprecationWarning
def _generate_deprecation_message(since, message='', name='',
alternative='', pending=False,
obj_type='attribute'):
if not message:
altmessage = ''
if pending:
message = (
'The %(func)s %(obj_type)s will be deprecated in a '
'future version.')
else:
message = (
'The %(func)s %(obj_type)s was deprecated in version '
'%(since)s.')
if alternative:
altmessage = ' Use %s instead.' % alternative
message = ((message % {
'func': name,
'name': name,
'alternative': alternative,
'obj_type': obj_type,
'since': since}) +
altmessage)
return message
def warn_deprecated(
since, message='', name='', alternative='', pending=False,
obj_type='attribute'):
"""
Used to display deprecation warning in a standard way.
Parameters
----------
since : str
The release at which this API became deprecated.
message : str, optional
Override the default deprecation message. The format
specifier `%(func)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function; if not provided the name
is automatically determined from the passed in function,
though this is useful in the case of renamed functions, where
the new function is just assigned to the name of the
deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user about
this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
obj_type : str, optional
The object type being deprecated.
Examples
--------
Basic example::
# To warn of the deprecation of "matplotlib.name_of_module"
warn_deprecated('1.4.0', name='matplotlib.name_of_module',
obj_type='module')
"""
message = _generate_deprecation_message(
since, message, name, alternative, pending, obj_type)
warnings.warn(message, mplDeprecation, stacklevel=1)
def deprecated(since, message='', name='', alternative='', pending=False,
obj_type='function'):
"""
Decorator to mark a function as deprecated.
Parameters
----------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier `%(func)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function; if not provided the name
is automatically determined from the passed in function,
though this is useful in the case of renamed functions, where
the new function is just assigned to the name of the
deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user about
this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
Examples
--------
Basic example::
@deprecated('1.4.0')
def the_function_to_deprecate():
pass
"""
def deprecate(func, message=message, name=name, alternative=alternative,
pending=pending):
import functools
import textwrap
if isinstance(func, classmethod):
func = func.__func__
is_classmethod = True
else:
is_classmethod = False
if not name:
name = func.__name__
message = _generate_deprecation_message(
since, message, name, alternative, pending, obj_type)
@functools.wraps(func)
def deprecated_func(*args, **kwargs):
warnings.warn(message, mplDeprecation, stacklevel=2)
return func(*args, **kwargs)
old_doc = deprecated_func.__doc__
if not old_doc:
old_doc = ''
old_doc = textwrap.dedent(old_doc).strip('\n')
message = message.strip()
new_doc = (('\n.. deprecated:: %(since)s'
'\n %(message)s\n\n' %
{'since': since, 'message': message}) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
deprecated_func.__doc__ = new_doc
if is_classmethod:
deprecated_func = classmethod(deprecated_func)
return deprecated_func
return deprecate
# On some systems, locale.getpreferredencoding returns None,
# which can break unicode; and the sage project reports that
# some systems have incorrect locale specifications, e.g.,
# an encoding instead of a valid locale name. Another
# pathological case that has been reported is an empty string.
# On some systems, getpreferredencoding sets the locale, which has
# side effects. Passing False eliminates those side effects.
def unicode_safe(s):
import matplotlib
if isinstance(s, bytes):
try:
preferredencoding = locale.getpreferredencoding(
matplotlib.rcParams['axes.formatter.use_locale']).strip()
if not preferredencoding:
preferredencoding = None
except (ValueError, ImportError, AttributeError):
preferredencoding = None
if preferredencoding is None:
return six.text_type(s)
else:
return six.text_type(s, preferredencoding)
return s
class converter(object):
"""
Base class for handling string -> python type with support for
missing values
"""
def __init__(self, missing='Null', missingval=None):
self.missing = missing
self.missingval = missingval
def __call__(self, s):
if s == self.missing:
return self.missingval
return s
def is_missing(self, s):
return not s.strip() or s == self.missing
class tostr(converter):
"""convert to string or None"""
def __init__(self, missing='Null', missingval=''):
converter.__init__(self, missing=missing, missingval=missingval)
class todatetime(converter):
"""convert to a datetime or None"""
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s):
return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.datetime(*tup[:6])
class todate(converter):
"""convert to a date or None"""
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
"""use a :func:`time.strptime` format string for conversion"""
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s):
return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.date(*tup[:3])
class tofloat(converter):
"""convert to a float or None"""
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
self.missingval = missingval
def __call__(self, s):
if self.is_missing(s):
return self.missingval
return float(s)
class toint(converter):
"""convert to an int or None"""
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
def __call__(self, s):
if self.is_missing(s):
return self.missingval
return int(s)
class _BoundMethodProxy(object):
"""
Our own proxy object which enables weak references to bound and unbound
methods and arbitrary callables. Pulls information about the function,
class, and instance out of a bound method. Stores a weak reference to the
instance to support garbage collection.
@organization: IBM Corporation
@copyright: Copyright (c) 2005, 2006 IBM Corporation
@license: The BSD License
Minor bugfixes by Michael Droettboom
"""
def __init__(self, cb):
self._hash = hash(cb)
self._destroy_callbacks = []
try:
try:
if six.PY3:
self.inst = ref(cb.__self__, self._destroy)
else:
self.inst = ref(cb.im_self, self._destroy)
except TypeError:
self.inst = None
if six.PY3:
self.func = cb.__func__
self.klass = cb.__self__.__class__
else:
self.func = cb.im_func
self.klass = cb.im_class
except AttributeError:
self.inst = None
self.func = cb
self.klass = None
def add_destroy_callback(self, callback):
self._destroy_callbacks.append(_BoundMethodProxy(callback))
def _destroy(self, wk):
for callback in self._destroy_callbacks:
try:
callback(self)
except ReferenceError:
pass
def __getstate__(self):
d = self.__dict__.copy()
# de-weak reference inst
inst = d['inst']
if inst is not None:
d['inst'] = inst()
return d
def __setstate__(self, statedict):
self.__dict__ = statedict
inst = statedict['inst']
# turn inst back into a weakref
if inst is not None:
self.inst = ref(inst)
def __call__(self, *args, **kwargs):
"""
Proxy for a call to the weak referenced object. Take
arbitrary params to pass to the callable.
Raises `ReferenceError`: When the weak reference refers to
a dead object
"""
if self.inst is not None and self.inst() is None:
raise ReferenceError
elif self.inst is not None:
# build a new instance method with a strong reference to the
# instance
mtd = types.MethodType(self.func, self.inst())
else:
# not a bound method, just return the func
mtd = self.func
# invoke the callable and return the result
return mtd(*args, **kwargs)
def __eq__(self, other):
"""
Compare the held function and instance with that held by
another proxy.
"""
try:
if self.inst is None:
return self.func == other.func and other.inst is None
else:
return self.func == other.func and self.inst() == other.inst()
except Exception:
return False
def __ne__(self, other):
"""
Inverse of __eq__.
"""
return not self.__eq__(other)
def __hash__(self):
return self._hash
class CallbackRegistry(object):
"""
Handle registering and disconnecting for a set of signals and
callbacks:
>>> def oneat(x):
... print('eat', x)
>>> def ondrink(x):
... print('drink', x)
>>> from matplotlib.cbook import CallbackRegistry
>>> callbacks = CallbackRegistry()
>>> id_eat = callbacks.connect('eat', oneat)
>>> id_drink = callbacks.connect('drink', ondrink)
>>> callbacks.process('drink', 123)
drink 123
>>> callbacks.process('eat', 456)
eat 456
>>> callbacks.process('be merry', 456) # nothing will be called
>>> callbacks.disconnect(id_eat)
>>> callbacks.process('eat', 456) # nothing will be called
In practice, one should always disconnect all callbacks when they
are no longer needed to avoid dangling references (and thus memory
leaks). However, real code in matplotlib rarely does so, and due
to its design, it is rather difficult to place this kind of code.
To get around this, and prevent this class of memory leaks, we
instead store weak references to bound methods only, so when the
destination object needs to die, the CallbackRegistry won't keep
it alive. The Python stdlib weakref module can not create weak
references to bound methods directly, so we need to create a proxy
object to handle weak references to bound methods (or regular free
functions). This technique was shared by Peter Parente on his
`"Mindtrove" blog
<http://mindtrove.info/python-weak-references/>`_.
"""
def __init__(self):
self.callbacks = dict()
self._cid = 0
self._func_cid_map = {}
def __getstate__(self):
# We cannot currently pickle the callables in the registry, so
# return an empty dictionary.
return {}
def __setstate__(self, state):
# re-initialise an empty callback registry
self.__init__()
def connect(self, s, func):
"""
register *func* to be called when a signal *s* is generated
func will be called
"""
self._func_cid_map.setdefault(s, WeakKeyDictionary())
# Note proxy not needed in python 3.
# TODO rewrite this when support for python2.x gets dropped.
proxy = _BoundMethodProxy(func)
if proxy in self._func_cid_map[s]:
return self._func_cid_map[s][proxy]
proxy.add_destroy_callback(self._remove_proxy)
self._cid += 1
cid = self._cid
self._func_cid_map[s][proxy] = cid
self.callbacks.setdefault(s, dict())
self.callbacks[s][cid] = proxy
return cid
def _remove_proxy(self, proxy):
for signal, proxies in list(six.iteritems(self._func_cid_map)):
try:
del self.callbacks[signal][proxies[proxy]]
except KeyError:
pass
if len(self.callbacks[signal]) == 0:
del self.callbacks[signal]
del self._func_cid_map[signal]
def disconnect(self, cid):
"""
disconnect the callback registered with callback id *cid*
"""
for eventname, callbackd in list(six.iteritems(self.callbacks)):
try:
del callbackd[cid]
except KeyError:
continue
else:
for signal, functions in list(
six.iteritems(self._func_cid_map)):
for function, value in list(six.iteritems(functions)):
if value == cid:
del functions[function]
return
def process(self, s, *args, **kwargs):
"""
process signal *s*. All of the functions registered to receive
callbacks on *s* will be called with *\*args* and *\*\*kwargs*
"""
if s in self.callbacks:
for cid, proxy in list(six.iteritems(self.callbacks[s])):
try:
proxy(*args, **kwargs)
except ReferenceError:
self._remove_proxy(proxy)
class silent_list(list):
"""
override repr when returning a list of matplotlib artists to
prevent long, meaningless output. This is meant to be used for a
homogeneous list of a given type
"""
def __init__(self, type, seq=None):
self.type = type
if seq is not None:
self.extend(seq)
def __repr__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def __str__(self):
return repr(self)
def __getstate__(self):
# store a dictionary of this SilentList's state
return {'type': self.type, 'seq': self[:]}
def __setstate__(self, state):
self.type = state['type']
self.extend(state['seq'])
class IgnoredKeywordWarning(UserWarning):
"""
A class for issuing warnings about keyword arguments that will be ignored
by matplotlib
"""
pass
def local_over_kwdict(local_var, kwargs, *keys):
"""
Enforces the priority of a local variable over potentially conflicting
argument(s) from a kwargs dict. The following possible output values are
considered in order of priority:
local_var > kwargs[keys[0]] > ... > kwargs[keys[-1]]
The first of these whose value is not None will be returned. If all are
None then None will be returned. Each key in keys will be removed from the
kwargs dict in place.
Parameters
----------
local_var: any object
The local variable (highest priority)
kwargs: dict
Dictionary of keyword arguments; modified in place
keys: str(s)
Name(s) of keyword arguments to process, in descending order of
priority
Returns
-------
out: any object
Either local_var or one of kwargs[key] for key in keys
Raises
------
IgnoredKeywordWarning
For each key in keys that is removed from kwargs but not used as
the output value
"""
out = local_var
for key in keys:
kwarg_val = kwargs.pop(key, None)
if kwarg_val is not None:
if out is None:
out = kwarg_val
else:
warnings.warn('"%s" keyword argument will be ignored' % key,
IgnoredKeywordWarning)
return out
def strip_math(s):
"""remove latex formatting from mathtext"""
remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
s = s[1:-1]
for r in remove:
s = s.replace(r, '')
return s
class Bunch(object):
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables::
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: Alex Martelli
From: https://code.activestate.com/recipes/121294/
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
keys = six.iterkeys(self.__dict__)
return 'Bunch(%s)' % ', '.join(['%s=%s' % (k, self.__dict__[k])
for k
in keys])
def unique(x):
"""Return a list of unique elements of *x*"""
return list(six.iterkeys(dict([(val, 1) for val in x])))
def iterable(obj):
"""return true if *obj* is iterable"""
try:
iter(obj)
except TypeError:
return False
return True
def is_string_like(obj):
"""Return True if *obj* looks like a string"""
if isinstance(obj, six.string_types):
return True
# numpy strings are subclass of str, ma strings are not
if ma.isMaskedArray(obj):
if obj.ndim == 0 and obj.dtype.kind in 'SU':
return True
else:
return False
try:
obj + ''
except:
return False
return True
def is_sequence_of_strings(obj):
"""Returns true if *obj* is iterable and contains strings"""
if not iterable(obj):
return False
if is_string_like(obj) and not isinstance(obj, np.ndarray):
try:
obj = obj.values
except AttributeError:
# not pandas
return False
for o in obj:
if not is_string_like(o):
return False
return True
def is_hashable(obj):
"""Returns true if *obj* can be hashed"""
try:
hash(obj)
except TypeError:
return False
return True
def is_writable_file_like(obj):
"""return true if *obj* looks like a file object with a *write* method"""
return hasattr(obj, 'write') and six.callable(obj.write)
def file_requires_unicode(x):
"""
Returns `True` if the given writable file-like object requires Unicode
to be written to it.
"""
try:
x.write(b'')
except TypeError:
return True
else:
return False
def is_scalar(obj):
"""return true if *obj* is not string like and is not iterable"""
return not is_string_like(obj) and not iterable(obj)
def is_numlike(obj):
"""return true if *obj* looks like a number"""
try:
obj + 1
except:
return False
else:
return True
def to_filehandle(fname, flag='rU', return_opened=False):
"""
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in .gz. *flag* is a
read/write flag for :func:`file`
"""
if is_string_like(fname):
if fname.endswith('.gz'):
# get rid of 'U' in flag for gzipped files.
flag = flag.replace('U', '')
fh = gzip.open(fname, flag)
elif fname.endswith('.bz2'):
# get rid of 'U' in flag for bz2 files
flag = flag.replace('U', '')
import bz2
fh = bz2.BZ2File(fname, flag)
else:
fh = open(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fh = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fh, opened
return fh
def is_scalar_or_string(val):
"""Return whether the given object is a scalar or string like."""
return is_string_like(val) or not iterable(val)
def _string_to_bool(s):
if not is_string_like(s):
return s
if s == 'on':
return True
if s == 'off':
return False
raise ValueError("string argument must be either 'on' or 'off'")
def get_sample_data(fname, asfileobj=True):
"""
Return a sample data file. *fname* is a path relative to the
`mpl-data/sample_data` directory. If *asfileobj* is `True`
return a file object, otherwise just a file path.
Set the rc parameter examples.directory to the directory where we should
look, if sample_data files are stored in a location different than
default (which is 'mpl-data/sample_data` at the same level of 'matplotlib`
Python module files).
If the filename ends in .gz, the file is implicitly ungzipped.
"""
import matplotlib
if matplotlib.rcParams['examples.directory']:
root = matplotlib.rcParams['examples.directory']
else:
root = os.path.join(matplotlib._get_data_path(), 'sample_data')
path = os.path.join(root, fname)
if asfileobj:
if (os.path.splitext(fname)[-1].lower() in
('.csv', '.xrc', '.txt')):
mode = 'r'
else:
mode = 'rb'
base, ext = os.path.splitext(fname)
if ext == '.gz':
return gzip.open(path, mode)
else:
return open(path, mode)
else:
return path
def flatten(seq, scalarp=is_scalar_or_string):
"""
Returns a generator of flattened nested containers
For example:
>>> from matplotlib.cbook import flatten
>>> l = (('John', ['Hunter']), (1, 23), [[([42, (5, 23)], )]])
>>> print(list(flatten(l)))
['John', 'Hunter', 1, 23, 42, 5, 23]
By: Composite of Holger Krekel and Luther Blissett
From: https://code.activestate.com/recipes/121294/
and Recipe 1.12 in cookbook
"""
for item in seq:
if scalarp(item):
yield item
else:
for subitem in flatten(item, scalarp):
yield subitem
class Sorter(object):
"""
Sort by attribute or item
Example usage::
sort = Sorter()
list = [(1, 2), (4, 8), (0, 3)]
dict = [{'a': 3, 'b': 4}, {'a': 5, 'b': 2}, {'a': 0, 'b': 0},
{'a': 9, 'b': 9}]
sort(list) # default sort
sort(list, 1) # sort by index 1
sort(dict, 'a') # sort a list of dicts by key 'a'
"""
def _helper(self, data, aux, inplace):
aux.sort()
result = [data[i] for junk, i in aux]
if inplace:
data[:] = result
return result
def byItem(self, data, itemindex=None, inplace=1):
if itemindex is None:
if inplace:
data.sort()
result = data
else:
result = data[:]
result.sort()
return result
else:
aux = [(data[i][itemindex], i) for i in range(len(data))]
return self._helper(data, aux, inplace)
def byAttribute(self, data, attributename, inplace=1):
aux = [(getattr(data[i], attributename), i) for i in range(len(data))]
return self._helper(data, aux, inplace)
# a couple of handy synonyms
sort = byItem
__call__ = byItem
class Xlator(dict):
"""
All-in-one multiple-string-substitution class
Example usage::
text = "Larry Wall is the creator of Perl"
adict = {
"Larry Wall" : "Guido van Rossum",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print(multiple_replace(adict, text))
xlat = Xlator(adict)
print(xlat.xlat(text))
"""
def _make_regex(self):
""" Build re object based on the keys of the current dictionary """
return re.compile("|".join(map(re.escape, list(six.iterkeys(self)))))
def __call__(self, match):
""" Handler invoked for each regex *match* """
return self[match.group(0)]
def xlat(self, text):
""" Translate *text*, returns the modified text. """
return self._make_regex().sub(self, text)
def soundex(name, len=4):
""" soundex module conforming to Odell-Russell algorithm """
# digits holds the soundex values for the alphabet
soundex_digits = '01230120022455012623010202'
sndx = ''
fc = ''
# Translate letters in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc:
fc = c # Remember first letter
d = soundex_digits[ord(c) - ord('A')]
# Duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# Replace first digit with first letter
sndx = fc + sndx[1:]
# Remove all 0s from the soundex code
sndx = sndx.replace('0', '')
# Return soundex code truncated or 0-padded to len characters
return (sndx + (len * '0'))[:len]
class Null(object):
""" Null objects always and reliably "do nothing." """
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __str__(self):
return "Null()"
def __repr__(self):
return "Null()"
if six.PY3:
def __bool__(self):
return 0
else:
def __nonzero__(self):
return 0
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
def mkdirs(newdir, mode=0o777):
"""
make directory *newdir* recursively, and set *mode*. Equivalent to ::
> mkdir -p NEWDIR
> chmod MODE NEWDIR
"""
# this functionality is now in core python as of 3.2
# LPY DROP
if six.PY3:
os.makedirs(newdir, mode=mode, exist_ok=True)
else:
try:
os.makedirs(newdir, mode=mode)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
class GetRealpathAndStat(object):
def __init__(self):
self._cache = {}
def __call__(self, path):
result = self._cache.get(path)
if result is None:
realpath = os.path.realpath(path)
if sys.platform == 'win32':
stat_key = realpath
else:
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
result = realpath, stat_key
self._cache[path] = result
return result
get_realpath_and_stat = GetRealpathAndStat()
def dict_delall(d, keys):
"""delete all of the *keys* from the :class:`dict` *d*"""
for key in keys:
try:
del d[key]
except KeyError:
pass
class RingBuffer(object):
""" class that implements a not-yet-full buffer """
def __init__(self, size_max):
self.max = size_max
self.data = []
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.data[self.cur] = x
self.cur = (self.cur + 1) % self.max
def get(self):
""" return list of elements in correct order """
return self.data[self.cur:] + self.data[:self.cur]
def append(self, x):
"""append an element at the end of the buffer"""
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = __Full
def get(self):
""" Return a list of elements from the oldest to the newest. """
return self.data
def __get_item__(self, i):
return self.data[i % len(self.data)]
def get_split_ind(seq, N):
"""
*seq* is a list of words. Return the index into seq such that::
len(' '.join(seq[:ind])<=N
.
"""
s_len = 0
# todo: use Alex's xrange pattern from the cbook for efficiency
for (word, ind) in zip(seq, xrange(len(seq))):
s_len += len(word) + 1 # +1 to account for the len(' ')
if s_len >= N:
return ind
return len(seq)
def wrap(prefix, text, cols):
"""wrap *text* with *prefix* at length *cols*"""
pad = ' ' * len(prefix.expandtabs())
available = cols - len(pad)
seq = text.split(' ')
Nseq = len(seq)
ind = 0
lines = []
while ind < Nseq:
lastInd = ind
ind += get_split_ind(seq[ind:], available)
lines.append(seq[lastInd:ind])
# add the prefix to the first line, pad with spaces otherwise
ret = prefix + ' '.join(lines[0]) + '\n'
for line in lines[1:]:
ret += pad + ' '.join(line) + '\n'
return ret
# A regular expression used to determine the amount of space to
# remove. It looks for the first sequence of spaces immediately
# following the first newline, or at the beginning of the string.
_find_dedent_regex = re.compile("(?:(?:\n\r?)|^)( *)\S")
# A cache to hold the regexs that actually remove the indent.
_dedent_regex = {}
def dedent(s):
"""
Remove excess indentation from docstring *s*.
Discards any leading blank lines, then removes up to n whitespace
characters from each line, where n is the number of leading
whitespace characters in the first line. It differs from
textwrap.dedent in its deletion of leading blank lines and its use
of the first non-blank line to determine the indentation.
It is also faster in most cases.
"""
# This implementation has a somewhat obtuse use of regular
# expressions. However, this function accounted for almost 30% of
# matplotlib startup time, so it is worthy of optimization at all
# costs.
if not s: # includes case of s is None
return ''
match = _find_dedent_regex.match(s)
if match is None:
return s
# This is the number of spaces to remove from the left-hand side.
nshift = match.end(1) - match.start(1)
if nshift == 0:
return s
# Get a regex that will remove *up to* nshift spaces from the
# beginning of each line. If it isn't in the cache, generate it.
unindent = _dedent_regex.get(nshift, None)
if unindent is None:
unindent = re.compile("\n\r? {0,%d}" % nshift)
_dedent_regex[nshift] = unindent
result = unindent.sub("\n", s).strip()
return result
def listFiles(root, patterns='*', recurse=1, return_folders=0):
"""
Recursively list files
from Parmar and Martelli in the Python Cookbook
"""
import os.path
import fnmatch
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
results = []
for dirname, dirs, files in os.walk(root):
# Append to results all relevant files (and perhaps folders)
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if return_folders or os.path.isfile(fullname):
for pattern in pattern_list:
if fnmatch.fnmatch(name, pattern):
results.append(fullname)
break
# Block recursion if recursion was disallowed
if not recurse:
break
return results
def get_recursive_filelist(args):
"""
Recurse all the files and dirs in *args* ignoring symbolic links
and return the files as a list of strings
"""
files = []
for arg in args:
if os.path.isfile(arg):
files.append(arg)
continue
if os.path.isdir(arg):
newfiles = listFiles(arg, recurse=1, return_folders=1)
files.extend(newfiles)
return [f for f in files if not os.path.islink(f)]
def pieces(seq, num=2):
"""Break up the *seq* into *num* tuples"""
start = 0
while 1:
item = seq[start:start + num]
if not len(item):
break
yield item
start += num
def exception_to_str(s=None):
if six.PY3:
sh = io.StringIO()
else:
sh = io.BytesIO()
if s is not None:
print(s, file=sh)
traceback.print_exc(file=sh)
return sh.getvalue()
def allequal(seq):
"""
Return *True* if all elements of *seq* compare equal. If *seq* is
0 or 1 length, return *True*
"""
if len(seq) < 2:
return True
val = seq[0]
for i in xrange(1, len(seq)):
thisval = seq[i]
if thisval != val:
return False
return True
def alltrue(seq):
"""
Return *True* if all elements of *seq* evaluate to *True*. If
*seq* is empty, return *False*.
"""
if not len(seq):
return False
for val in seq:
if not val:
return False
return True
def onetrue(seq):
"""
Return *True* if one element of *seq* is *True*. It *seq* is
empty, return *False*.
"""
if not len(seq):
return False
for val in seq:
if val:
return True
return False
def allpairs(x):
"""
return all possible pairs in sequence *x*
Condensed by Alex Martelli from this thread_ on c.l.python
.. _thread: http://groups.google.com/groups?q=all+pairs+group:*python*&hl=en&lr=&ie=UTF-8&selm=mailman.4028.1096403649.5135.python-list%40python.org&rnum=1
"""
return [(s, f) for i, f in enumerate(x) for s in x[i + 1:]]
class maxdict(dict):
"""
A dictionary with a maximum size; this doesn't override all the
relevant methods to constrain the size, just setitem, so use with
caution
"""
def __init__(self, maxsize):
dict.__init__(self)
self.maxsize = maxsize
self._killkeys = []
def __setitem__(self, k, v):
if k not in self:
if len(self) >= self.maxsize:
del self[self._killkeys[0]]
del self._killkeys[0]
self._killkeys.append(k)
dict.__setitem__(self, k, v)
class Stack(object):
"""
Implement a stack where elements can be pushed on and you can move
back and forth. But no pop. Should mimic home / back / forward
in a browser
"""
def __init__(self, default=None):
self.clear()
self._default = default
def __call__(self):
"""return the current element, or None"""
if not len(self._elements):
return self._default
else:
return self._elements[self._pos]
def __len__(self):
return self._elements.__len__()
def __getitem__(self, ind):
return self._elements.__getitem__(ind)
def forward(self):
"""move the position forward and return the current element"""
n = len(self._elements)
if self._pos < n - 1:
self._pos += 1
return self()
def back(self):
"""move the position back and return the current element"""
if self._pos > 0:
self._pos -= 1
return self()
def push(self, o):
"""
push object onto stack at current position - all elements
occurring later than the current position are discarded
"""
self._elements = self._elements[:self._pos + 1]
self._elements.append(o)
self._pos = len(self._elements) - 1
return self()
def home(self):
"""push the first element onto the top of the stack"""
if not len(self._elements):
return
self.push(self._elements[0])
return self()
def empty(self):
return len(self._elements) == 0
def clear(self):
"""empty the stack"""
self._pos = -1
self._elements = []
def bubble(self, o):
"""
raise *o* to the top of the stack and return *o*. *o* must be
in the stack
"""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
bubbles = []
for thiso in old:
if thiso == o:
bubbles.append(thiso)
else:
self.push(thiso)
for thiso in bubbles:
self.push(o)
return o
def remove(self, o):
'remove element *o* from the stack'
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
for thiso in old:
if thiso == o:
continue
else:
self.push(thiso)
def popall(seq):
'empty a list'
for i in xrange(len(seq)):
seq.pop()
def finddir(o, match, case=False):
"""
return all attributes of *o* which match string in match. if case
is True require an exact case match.
"""
if case:
names = [(name, name) for name in dir(o) if is_string_like(name)]
else:
names = [(name.lower(), name) for name in dir(o)
if is_string_like(name)]
match = match.lower()
return [orig for name, orig in names if name.find(match) >= 0]
def reverse_dict(d):
"""reverse the dictionary -- may lose data if values are not unique!"""
return dict([(v, k) for k, v in six.iteritems(d)])
def restrict_dict(d, keys):
"""
Return a dictionary that contains those keys that appear in both
d and keys, with values from d.
"""
return dict([(k, v) for (k, v) in six.iteritems(d) if k in keys])
def report_memory(i=0): # argument may go away
"""return the memory consumed by process"""
from matplotlib.compat.subprocess import Popen, PIPE
pid = os.getpid()
if sys.platform == 'sunos5':
try:
a2 = Popen(str('ps -p %d -o osz') % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Sun OS only if "
"the 'ps' program is found")
mem = int(a2[-1].strip())
elif sys.platform.startswith('linux'):
try:
a2 = Popen(str('ps -p %d -o rss,sz') % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Linux only if "
"the 'ps' program is found")
mem = int(a2[1].split()[1])
elif sys.platform.startswith('darwin'):
try:
a2 = Popen(str('ps -p %d -o rss,vsz') % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Mac OS only if "
"the 'ps' program is found")
mem = int(a2[1].split()[0])
elif sys.platform.startswith('win'):
try:
a2 = Popen([str("tasklist"), "/nh", "/fi", "pid eq %d" % pid],
stdout=PIPE).stdout.read()
except OSError:
raise NotImplementedError(
"report_memory works on Windows only if "
"the 'tasklist' program is found")
mem = int(a2.strip().split()[-2].replace(',', ''))
else:
raise NotImplementedError(
"We don't have a memory monitor for %s" % sys.platform)
return mem
_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
def safezip(*args):
"""make sure *args* are equal len before zipping"""
Nx = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != Nx:
raise ValueError(_safezip_msg % (Nx, i + 1, len(arg)))
return list(zip(*args))
def issubclass_safe(x, klass):
"""return issubclass(x, klass) and return False on a TypeError"""
try:
return issubclass(x, klass)
except TypeError:
return False
def safe_masked_invalid(x, copy=False):
x = np.array(x, subok=True, copy=copy)
if not x.dtype.isnative:
# Note that the argument to `byteswap` is 'inplace',
# thus if we have already made a copy, do the byteswap in
# place, else make a copy with the byte order swapped.
# Be explicit that we are swapping the byte order of the dtype
x = x.byteswap(copy).newbyteorder('S')
try:
xm = np.ma.masked_invalid(x, copy=False)
xm.shrink_mask()
except TypeError:
return x
return xm
class MemoryMonitor(object):
def __init__(self, nmax=20000):
self._nmax = nmax
self._mem = np.zeros((self._nmax,), np.int32)
self.clear()
def clear(self):
self._n = 0
self._overflow = False
def __call__(self):
mem = report_memory()
if self._n < self._nmax:
self._mem[self._n] = mem
self._n += 1
else:
self._overflow = True
return mem
def report(self, segments=4):
n = self._n
segments = min(n, segments)
dn = int(n / segments)
ii = list(xrange(0, n, dn))
ii[-1] = n - 1
print()
print('memory report: i, mem, dmem, dmem/nloops')
print(0, self._mem[0])
for i in range(1, len(ii)):
di = ii[i] - ii[i - 1]
if di == 0:
continue
dm = self._mem[ii[i]] - self._mem[ii[i - 1]]
print('%5d %5d %3d %8.3f' % (ii[i], self._mem[ii[i]],
dm, dm / float(di)))
if self._overflow:
print("Warning: array size was too small for the number of calls.")
def xy(self, i0=0, isub=1):
x = np.arange(i0, self._n, isub)
return x, self._mem[i0:self._n:isub]
def plot(self, i0=0, isub=1, fig=None):
if fig is None:
from .pylab import figure
fig = figure()
ax = fig.add_subplot(111)
ax.plot(*self.xy(i0, isub))
fig.canvas.draw()
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
*objects*
A list of objects to find cycles in. It is often useful to
pass in gc.garbage to find the cycles that are preventing some
objects from being garbage collected.
*outstream*
The stream for output.
*show_progress*
If True, print the number of objects reached as they are found.
"""
import gc
from types import FrameType
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % str(type(step)))
if isinstance(step, dict):
for key, val in six.iteritems(step):
if val is next:
outstream.write("[%s]" % repr(key))
break
if key is next:
outstream.write("[key] = %s" % repr(val))
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write("Examining: %r\n" % (obj,))
recurse(obj, obj, {}, [])
class Grouper(object):
"""
This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retreived by
using the object as an iterator.
The objects being joined must be hashable and weak-referenceable.
For example:
>>> from matplotlib.cbook import Grouper
>>> class Foo(object):
... def __init__(self, s):
... self.s = s
... def __repr__(self):
... return self.s
...
>>> a, b, c, d, e, f = [Foo(x) for x in 'abcdef']
>>> grp = Grouper()
>>> grp.join(a, b)
>>> grp.join(b, c)
>>> grp.join(d, e)
>>> sorted(map(tuple, grp))
[(a, b, c), (d, e)]
>>> grp.joined(a, b)
True
>>> grp.joined(a, c)
True
>>> grp.joined(a, d)
False
"""
def __init__(self, init=()):
mapping = self._mapping = {}
for x in init:
mapping[ref(x)] = [ref(x)]
def __contains__(self, item):
return ref(item) in self._mapping
def clean(self):
"""
Clean dead weak references from the dictionary
"""
mapping = self._mapping
to_drop = [key for key in mapping if key() is None]
for key in to_drop:
val = mapping.pop(key)
val.remove(key)
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more
arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(ref(a), [ref(a)])
for arg in args:
set_b = mapping.get(ref(arg))
if set_b is None:
set_a.append(ref(arg))
mapping[ref(arg)] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
self.clean()
def joined(self, a, b):
"""
Returns True if *a* and *b* are members of the same set.
"""
self.clean()
mapping = self._mapping
try:
return mapping[ref(a)] is mapping[ref(b)]
except KeyError:
return False
def remove(self, a):
self.clean()
mapping = self._mapping
seta = mapping.pop(ref(a), None)
if seta is not None:
seta.remove(ref(a))
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
self.clean()
class Token:
pass
token = Token()
# Mark each group as we come across if by appending a token,
# and don't yield it twice
for group in six.itervalues(self._mapping):
if not group[-1] is token:
yield [x() for x in group]
group.append(token)
# Cleanup the tokens
for group in six.itervalues(self._mapping):
if group[-1] is token:
del group[-1]
def get_siblings(self, a):
"""
Returns all of the items joined with *a*, including itself.
"""
self.clean()
siblings = self._mapping.get(ref(a), [ref(a)])
return [x() for x in siblings]
def simple_linear_interpolation(a, steps):
if steps == 1:
return a
steps = int(np.floor(steps))
new_length = ((len(a) - 1) * steps) + 1
new_shape = list(a.shape)
new_shape[0] = new_length
result = np.zeros(new_shape, a.dtype)
result[0] = a[0]
a0 = a[0:-1]
a1 = a[1:]
delta = ((a1 - a0) / steps)
for i in range(1, steps):
result[i::steps] = delta * i + a0
result[steps::steps] = a1
return result
def recursive_remove(path):
if os.path.isdir(path):
for fname in (glob.glob(os.path.join(path, '*')) +
glob.glob(os.path.join(path, '.*'))):
if os.path.isdir(fname):
recursive_remove(fname)
os.removedirs(fname)
else:
os.remove(fname)
#os.removedirs(path)
else:
os.remove(path)
def delete_masked_points(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments with only the unmasked points remaining.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
does not yield a Boolean array.
All input arguments that are not passed unchanged are returned
as ndarrays after removing the points or rows corresponding to
masks in any of the arguments.
A vastly simpler version of this function was originally
written as a helper for Axes.scatter().
"""
if not len(args):
return ()
if (is_string_like(args[0]) or not iterable(args[0])):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = []
seqlist = [False] * len(args)
for i, x in enumerate(args):
if (not is_string_like(x)) and iterable(x) and len(x) == nrecs:
seqlist[i] = True
if ma.isMA(x):
if x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
else:
x = np.asarray(x)
margs.append(x)
masks = [] # list of masks that are True where good
for i, x in enumerate(margs):
if seqlist[i]:
if x.ndim > 1:
continue # Don't try to get nan locations unless 1-D.
if ma.isMA(x):
masks.append(~ma.getmaskarray(x)) # invert the mask
xd = x.data
else:
xd = x
try:
mask = np.isfinite(xd)
if isinstance(mask, np.ndarray):
masks.append(mask)
except: # Fixme: put in tuple of possible exceptions?
pass
if len(masks):
mask = reduce(np.logical_and, masks)
igood = mask.nonzero()[0]
if len(igood) < nrecs:
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = x.take(igood, axis=0)
for i, x in enumerate(margs):
if seqlist[i] and ma.isMA(x):
margs[i] = x.filled()
return margs
def boxplot_stats(X, whis=1.5, bootstrap=None, labels=None,
autorange=False):
"""
Returns list of dictionaries of statistics used to draw a series
of box and whisker plots. The `Returns` section enumerates the
required keys of the dictionary. Users can skip this function and
pass a user-defined set of dictionaries to the new `axes.bxp` method
instead of relying on MPL to do the calculations.
Parameters
----------
X : array-like
Data that will be represented in the boxplots. Should have 2 or
fewer dimensions.
whis : float, string, or sequence (default = 1.5)
As a float, determines the reach of the whiskers past the first
and third quartiles (e.g., Q3 + whis*IQR, QR = interquartile
range, Q3-Q1). Beyond the whiskers, data are considered outliers
and are plotted as individual points. This can be set this to an
ascending sequence of percentile (e.g., [5, 95]) to set the
whiskers at specific percentiles of the data. Finally, `whis`
can be the string ``'range'`` to force the whiskers to the
minimum and maximum of the data. In the edge case that the 25th
and 75th percentiles are equivalent, `whis` can be automatically
set to ``'range'`` via the `autorange` option.
bootstrap : int, optional
Number of times the confidence intervals around the median
should be bootstrapped (percentile method).
labels : array-like, optional
Labels for each dataset. Length must be compatible with
dimensions of `X`.
autorange : bool, optional (False)
When `True` and the data are distributed such that the 25th and
75th percentiles are equal, ``whis`` is set to ``'range'`` such
that the whisker ends are at the minimum and maximum of the
data.
Returns
-------
bxpstats : list of dict
A list of dictionaries containing the results for each column
of data. Keys of each dictionary are the following:
======== ===================================
Key Value Description
======== ===================================
label tick label for the boxplot
mean arithemetic mean value
med 50th percentile
q1 first quartile (25th percentile)
q3 third quartile (75th percentile)
cilo lower notch around the median
cihi upper notch around the median
whislo end of the lower whisker
whishi end of the upper whisker
fliers outliers
======== ===================================
Notes
-----
Non-bootstrapping approach to confidence interval uses Gaussian-
based asymptotic approximation:
.. math::
\mathrm{med} \pm 1.57 \\times \\frac{\mathrm{iqr}}{\sqrt{N}}
General approach from:
McGill, R., Tukey, J.W., and Larsen, W.A. (1978) "Variations of
Boxplots", The American Statistician, 32:12-16.
"""
def _bootstrap_median(data, N=5000):
# determine 95% confidence intervals of the median
M = len(data)
percentiles = [2.5, 97.5]
ii = np.random.randint(M, size=(N, M))
bsData = x[ii]
estimate = np.median(bsData, axis=1, overwrite_input=True)
CI = np.percentile(estimate, percentiles)
return CI
def _compute_conf_interval(data, med, iqr, bootstrap):
if bootstrap is not None:
# Do a bootstrap estimate of notch locations.
# get conf. intervals around median
CI = _bootstrap_median(data, N=bootstrap)
notch_min = CI[0]
notch_max = CI[1]
else:
N = len(data)
notch_min = med - 1.57 * iqr / np.sqrt(N)
notch_max = med + 1.57 * iqr / np.sqrt(N)
return notch_min, notch_max
# output is a list of dicts
bxpstats = []
# convert X to a list of lists
X = _reshape_2D(X)
ncols = len(X)
if labels is None:
labels = repeat(None)
elif len(labels) != ncols:
raise ValueError("Dimensions of labels and X must be compatible")
input_whis = whis
for ii, (x, label) in enumerate(zip(X, labels), start=0):
# empty dict
stats = {}
if label is not None:
stats['label'] = label
# restore whis to the input values in case it got changed in the loop
whis = input_whis
# note tricksyness, append up here and then mutate below
bxpstats.append(stats)
# if empty, bail
if len(x) == 0:
stats['fliers'] = np.array([])
stats['mean'] = np.nan
stats['med'] = np.nan
stats['q1'] = np.nan
stats['q3'] = np.nan
stats['cilo'] = np.nan
stats['cihi'] = np.nan
stats['whislo'] = np.nan
stats['whishi'] = np.nan
stats['med'] = np.nan
continue
# up-convert to an array, just to be safe
x = np.asarray(x)
# arithmetic mean
stats['mean'] = np.mean(x)
# medians and quartiles
q1, med, q3 = np.percentile(x, [25, 50, 75])
# interquartile range
stats['iqr'] = q3 - q1
if stats['iqr'] == 0 and autorange:
whis = 'range'
# conf. interval around median
stats['cilo'], stats['cihi'] = _compute_conf_interval(
x, med, stats['iqr'], bootstrap
)
# lowest/highest non-outliers
if np.isscalar(whis):
if np.isreal(whis):
loval = q1 - whis * stats['iqr']
hival = q3 + whis * stats['iqr']
elif whis in ['range', 'limit', 'limits', 'min/max']:
loval = np.min(x)
hival = np.max(x)
else:
whismsg = ('whis must be a float, valid string, or '
'list of percentiles')
raise ValueError(whismsg)
else:
loval = np.percentile(x, whis[0])
hival = np.percentile(x, whis[1])
# get high extreme
wiskhi = np.compress(x <= hival, x)
if len(wiskhi) == 0 or np.max(wiskhi) < q3:
stats['whishi'] = q3
else:
stats['whishi'] = np.max(wiskhi)
# get low extreme
wisklo = np.compress(x >= loval, x)
if len(wisklo) == 0 or np.min(wisklo) > q1:
stats['whislo'] = q1
else:
stats['whislo'] = np.min(wisklo)
# compute a single array of outliers
stats['fliers'] = np.hstack([
np.compress(x < stats['whislo'], x),
np.compress(x > stats['whishi'], x)
])
# add in the remaining stats
stats['q1'], stats['med'], stats['q3'] = q1, med, q3
return bxpstats
# FIXME I don't think this is used anywhere
def unmasked_index_ranges(mask, compressed=True):
"""
Find index ranges where *mask* is *False*.
*mask* will be flattened if it is not already 1-D.
Returns Nx2 :class:`numpy.ndarray` with each row the start and stop
indices for slices of the compressed :class:`numpy.ndarray`
corresponding to each of *N* uninterrupted runs of unmasked
values. If optional argument *compressed* is *False*, it returns
the start and stop indices into the original :class:`numpy.ndarray`,
not the compressed :class:`numpy.ndarray`. Returns *None* if there
are no unmasked values.
Example::
y = ma.array(np.arange(5), mask = [0,0,1,0,0])
ii = unmasked_index_ranges(ma.getmaskarray(y))
# returns array [[0,2,] [2,4,]]
y.compressed()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)
# returns array [[0, 2], [3, 5]]
y.filled()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
Prior to the transforms refactoring, this was used to support
masked arrays in Line2D.
"""
mask = mask.reshape(mask.size)
m = np.concatenate(((1,), mask, (1,)))
indices = np.arange(len(mask) + 1)
mdif = m[1:] - m[:-1]
i0 = np.compress(mdif == -1, indices)
i1 = np.compress(mdif == 1, indices)
assert len(i0) == len(i1)
if len(i1) == 0:
return None # Maybe this should be np.zeros((0,2), dtype=int)
if not compressed:
return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)
seglengths = i1 - i0
breakpoints = np.cumsum(seglengths)
ic0 = np.concatenate(((0,), breakpoints[:-1]))
ic1 = breakpoints
return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)
# a dict to cross-map linestyle arguments
_linestyles = [('-', 'solid'),
('--', 'dashed'),
('-.', 'dashdot'),
(':', 'dotted')]
ls_mapper = dict(_linestyles)
# The ls_mapper maps short codes for line style to their full name used
# by backends
# The reverse mapper is for mapping full names to short ones
ls_mapper_r = dict([(ls[1], ls[0]) for ls in _linestyles])
def align_iterators(func, *iterables):
"""
This generator takes a bunch of iterables that are ordered by func
It sends out ordered tuples::
(func(row), [rows from all iterators matching func(row)])
It is used by :func:`matplotlib.mlab.recs_join` to join record arrays
"""
class myiter:
def __init__(self, it):
self.it = it
self.key = self.value = None
self.iternext()
def iternext(self):
try:
self.value = next(self.it)
self.key = func(self.value)
except StopIteration:
self.value = self.key = None
def __call__(self, key):
retval = None
if key == self.key:
retval = self.value
self.iternext()
elif self.key and key > self.key:
raise ValueError("Iterator has been left behind")
return retval
# This can be made more efficient by not computing the minimum key for each
# iteration
iters = [myiter(it) for it in iterables]
minvals = minkey = True
while True:
minvals = ([_f for _f in [it.key for it in iters] if _f])
if minvals:
minkey = min(minvals)
yield (minkey, [it(minkey) for it in iters])
else:
break
def is_math_text(s):
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
try:
s = six.text_type(s)
except UnicodeDecodeError:
raise ValueError(
"matplotlib display text must have all code points < 128 or use "
"Unicode strings")
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
return even_dollars
def _check_1d(x):
'''
Converts a sequence of less than 1 dimension, to an array of 1
dimension; leaves everything else untouched.
'''
if not hasattr(x, 'shape') or len(x.shape) < 1:
return np.atleast_1d(x)
else:
try:
x[:, None]
return x
except (IndexError, TypeError):
return np.atleast_1d(x)
def _reshape_2D(X):
"""
Converts a non-empty list or an ndarray of two or fewer dimensions
into a list of iterable objects so that in
for v in _reshape_2D(X):
v is iterable and can be used to instantiate a 1D array.
"""
if hasattr(X, 'shape'):
# one item
if len(X.shape) == 1:
if hasattr(X[0], 'shape'):
X = list(X)
else:
X = [X, ]
# several items
elif len(X.shape) == 2:
nrows, ncols = X.shape
if nrows == 1:
X = [X]
elif ncols == 1:
X = [X.ravel()]
else:
X = [X[:, i] for i in xrange(ncols)]
else:
raise ValueError("input `X` must have 2 or fewer dimensions")
if not hasattr(X[0], '__len__'):
X = [X]
else:
X = [np.ravel(x) for x in X]
return X
def violin_stats(X, method, points=100):
"""
Returns a list of dictionaries of data which can be used to draw a series
of violin plots. See the `Returns` section below to view the required keys
of the dictionary. Users can skip this function and pass a user-defined set
of dictionaries to the `axes.vplot` method instead of using MPL to do the
calculations.
Parameters
----------
X : array-like
Sample data that will be used to produce the gaussian kernel density
estimates. Must have 2 or fewer dimensions.
method : callable
The method used to calculate the kernel density estimate for each
column of data. When called via `method(v, coords)`, it should
return a vector of the values of the KDE evaluated at the values
specified in coords.
points : scalar, default = 100
Defines the number of points to evaluate each of the gaussian kernel
density estimates at.
Returns
-------
A list of dictionaries containing the results for each column of data.
The dictionaries contain at least the following:
- coords: A list of scalars containing the coordinates this particular
kernel density estimate was evaluated at.
- vals: A list of scalars containing the values of the kernel density
estimate at each of the coordinates given in `coords`.
- mean: The mean value for this column of data.
- median: The median value for this column of data.
- min: The minimum value for this column of data.
- max: The maximum value for this column of data.
"""
# List of dictionaries describing each of the violins.
vpstats = []
# Want X to be a list of data sequences
X = _reshape_2D(X)
for x in X:
# Dictionary of results for this distribution
stats = {}
# Calculate basic stats for the distribution
min_val = np.min(x)
max_val = np.max(x)
# Evaluate the kernel density estimate
coords = np.linspace(min_val, max_val, points)
stats['vals'] = method(x, coords)
stats['coords'] = coords
# Store additional statistics for this distribution
stats['mean'] = np.mean(x)
stats['median'] = np.median(x)
stats['min'] = min_val
stats['max'] = max_val
# Append to output
vpstats.append(stats)
return vpstats
class _NestedClassGetter(object):
# recipe from http://stackoverflow.com/a/11493777/741316
"""
When called with the containing class as the first argument,
and the name of the nested class as the second argument,
returns an instance of the nested class.
"""
def __call__(self, containing_class, class_name):
nested_class = getattr(containing_class, class_name)
# make an instance of a simple object (this one will do), for which we
# can change the __class__ later on.
nested_instance = _NestedClassGetter()
# set the class of the instance, the __init__ will never be called on
# the class but the original state will be set later on by pickle.
nested_instance.__class__ = nested_class
return nested_instance
class _InstanceMethodPickler(object):
"""
Pickle cannot handle instancemethod saving. _InstanceMethodPickler
provides a solution to this.
"""
def __init__(self, instancemethod):
"""Takes an instancemethod as its only argument."""
if six.PY3:
self.parent_obj = instancemethod.__self__
self.instancemethod_name = instancemethod.__func__.__name__
else:
self.parent_obj = instancemethod.im_self
self.instancemethod_name = instancemethod.im_func.__name__
def get_instancemethod(self):
return getattr(self.parent_obj, self.instancemethod_name)
def _step_validation(x, *args):
"""
Helper function of `pts_to_*step` functions
This function does all of the normalization required to the
input and generate the template for output
"""
args = tuple(np.asanyarray(y) for y in args)
x = np.asanyarray(x)
if x.ndim != 1:
raise ValueError("x must be 1 dimensional")
if len(args) == 0:
raise ValueError("At least one Y value must be passed")
return np.vstack((x, ) + args)
def pts_to_prestep(x, *args):
"""
Covert continuous line to pre-steps
Given a set of N points convert to 2 N -1 points
which when connected linearly give a step function
which changes values at the begining the intervals.
Parameters
----------
x : array
The x location of the steps
y1, y2, ... : array
Any number of y arrays to be turned into steps.
All must be the same length as ``x``
Returns
-------
x, y1, y2, .. : array
The x and y values converted to steps in the same order
as the input. If the input is length ``N``, each of these arrays
will be length ``2N + 1``
Examples
--------
>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
"""
# do normalization
vertices = _step_validation(x, *args)
# create the output array
steps = np.zeros((vertices.shape[0], 2 * len(x) - 1), np.float)
# do the to step conversion logic
steps[0, 0::2], steps[0, 1::2] = vertices[0, :], vertices[0, :-1]
steps[1:, 0::2], steps[1:, 1:-1:2] = vertices[1:, :], vertices[1:, 1:]
# convert 2D array back to tuple
return tuple(steps)
def pts_to_poststep(x, *args):
"""
Covert continuous line to pre-steps
Given a set of N points convert to 2 N -1 points
which when connected linearly give a step function
which changes values at the begining the intervals.
Parameters
----------
x : array
The x location of the steps
y1, y2, ... : array
Any number of y arrays to be turned into steps.
All must be the same length as ``x``
Returns
-------
x, y1, y2, .. : array
The x and y values converted to steps in the same order
as the input. If the input is length ``N``, each of these arrays
will be length ``2N + 1``
Examples
--------
>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
"""
# do normalization
vertices = _step_validation(x, *args)
# create the output array
steps = ma.zeros((vertices.shape[0], 2 * len(x) - 1), np.float)
# do the to step conversion logic
steps[0, ::2], steps[0, 1:-1:2] = vertices[0, :], vertices[0, 1:]
steps[1:, 0::2], steps[1:, 1::2] = vertices[1:, :], vertices[1:, :-1]
# convert 2D array back to tuple
return tuple(steps)
def pts_to_midstep(x, *args):
"""
Covert continuous line to pre-steps
Given a set of N points convert to 2 N -1 points
which when connected linearly give a step function
which changes values at the begining the intervals.
Parameters
----------
x : array
The x location of the steps
y1, y2, ... : array
Any number of y arrays to be turned into steps.
All must be the same length as ``x``
Returns
-------
x, y1, y2, .. : array
The x and y values converted to steps in the same order
as the input. If the input is length ``N``, each of these arrays
will be length ``2N + 1``
Examples
--------
>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
"""
# do normalization
vertices = _step_validation(x, *args)
# create the output array
steps = ma.zeros((vertices.shape[0], 2 * len(x)), np.float)
steps[0, 1:-1:2] = 0.5 * (vertices[0, :-1] + vertices[0, 1:])
steps[0, 2::2] = 0.5 * (vertices[0, :-1] + vertices[0, 1:])
steps[0, 0] = vertices[0, 0]
steps[0, -1] = vertices[0, -1]
steps[1:, 0::2], steps[1:, 1::2] = vertices[1:, :], vertices[1:, :]
# convert 2D array back to tuple
return tuple(steps)
STEP_LOOKUP_MAP = {'pre': pts_to_prestep,
'post': pts_to_poststep,
'mid': pts_to_midstep,
'step-pre': pts_to_prestep,
'step-post': pts_to_poststep,
'step-mid': pts_to_midstep}
def index_of(y):
"""
A helper function to get the index of an input to plot
against if x values are not explicitly given.
Tries to get `y.index` (works if this is a pd.Series), if that
fails, return np.arange(y.shape[0]).
This will be extended in the future to deal with more types of
labeled data.
Parameters
----------
y : scalar or array-like
The proposed y-value
Returns
-------
x, y : ndarray
The x and y values to plot.
"""
try:
return y.index.values, y.values
except AttributeError:
y = np.atleast_1d(y)
return np.arange(y.shape[0], dtype=float), y
def safe_first_element(obj):
if isinstance(obj, collections.Iterator):
# needed to accept `array.flat` as input.
# np.flatiter reports as an instance of collections.Iterator
# but can still be indexed via [].
# This has the side effect of re-setting the iterator, but
# that is acceptable.
try:
return obj[0]
except TypeError:
pass
raise RuntimeError("matplotlib does not support generators "
"as input")
return next(iter(obj))
def normalize_kwargs(kw, alias_mapping=None, required=(), forbidden=(),
allowed=None):
"""Helper function to normalize kwarg inputs
The order they are resolved are:
1. aliasing
2. required
3. forbidden
4. allowed
This order means that only the canonical names need appear in
`allowed`, `forbidden`, `required`
Parameters
----------
alias_mapping, dict, optional
A mapping between a canonical name to a list of
aliases, in order of precedence from lowest to highest.
If the canonical value is not in the list it is assumed to have
the highest priority.
required : iterable, optional
A tuple of fields that must be in kwargs.
forbidden : iterable, optional
A list of keys which may not be in kwargs
allowed : tuple, optional
A tuple of allowed fields. If this not None, then raise if
`kw` contains any keys not in the union of `required`
and `allowed`. To allow only the required fields pass in
``()`` for `allowed`
Raises
------
TypeError
To match what python raises if invalid args/kwargs are passed to
a callable.
"""
# deal with default value of alias_mapping
if alias_mapping is None:
alias_mapping = dict()
# make a local so we can pop
kw = dict(kw)
# output dictionary
ret = dict()
# hit all alias mappings
for canonical, alias_list in six.iteritems(alias_mapping):
# the alias lists are ordered from lowest to highest priority
# so we know to use the last value in this list
tmp = []
seen = []
for a in alias_list:
try:
tmp.append(kw.pop(a))
seen.append(a)
except KeyError:
pass
# if canonical is not in the alias_list assume highest priority
if canonical not in alias_list:
try:
tmp.append(kw.pop(canonical))
seen.append(canonical)
except KeyError:
pass
# if we found anything in this set of aliases put it in the return
# dict
if tmp:
ret[canonical] = tmp[-1]
if len(tmp) > 1:
warnings.warn("Saw kwargs {seen!r} which are all aliases for "
"{canon!r}. Kept value from {used!r}".format(
seen=seen, canon=canonical, used=seen[-1]))
# at this point we know that all keys which are aliased are removed, update
# the return dictionary from the cleaned local copy of the input
ret.update(kw)
fail_keys = [k for k in required if k not in ret]
if fail_keys:
raise TypeError("The required keys {keys!r} "
"are not in kwargs".format(keys=fail_keys))
fail_keys = [k for k in forbidden if k in ret]
if fail_keys:
raise TypeError("The forbidden keys {keys!r} "
"are in kwargs".format(keys=fail_keys))
if allowed is not None:
allowed_set = set(required) | set(allowed)
fail_keys = [k for k in ret if k not in allowed_set]
if fail_keys:
raise TypeError("kwargs contains {keys!r} which are not in "
"the required {req!r} or "
"allowed {allow!r} keys".format(
keys=fail_keys, req=required,
allow=allowed))
return ret
def get_label(y, default_name):
try:
return y.name
except AttributeError:
return default_name
# Numpy > 1.6.x deprecates putmask in favor of the new copyto.
# So long as we support versions 1.6.x and less, we need the
# following local version of putmask. We choose to make a
# local version of putmask rather than of copyto because the
# latter includes more functionality than the former. Therefore
# it is easy to make a local version that gives full putmask
# behavior, but duplicating the full copyto behavior would be
# more difficult.
try:
np.copyto
except AttributeError:
_putmask = np.putmask
else:
def _putmask(a, mask, values):
return np.copyto(a, values, where=mask)
_lockstr = """\
LOCKERROR: matplotlib is trying to acquire the lock
{!r}
and has failed. This maybe due to any other process holding this
lock. If you are sure no other matplotlib process is running try
removing these folders and trying again.
"""
class Locked(object):
"""
Context manager to handle locks.
Based on code from conda.
(c) 2012-2013 Continuum Analytics, Inc. / https://www.continuum.io/
All Rights Reserved
conda is distributed under the terms of the BSD 3-clause license.
Consult LICENSE_CONDA or https://opensource.org/licenses/BSD-3-Clause.
"""
LOCKFN = '.matplotlib_lock'
class TimeoutError(RuntimeError):
pass
def __init__(self, path):
self.path = path
self.end = "-" + str(os.getpid())
self.lock_path = os.path.join(self.path, self.LOCKFN + self.end)
self.pattern = os.path.join(self.path, self.LOCKFN + '-*')
self.remove = True
def __enter__(self):
retries = 50
sleeptime = 0.1
while retries:
files = glob.glob(self.pattern)
if files and not files[0].endswith(self.end):
time.sleep(sleeptime)
retries -= 1
else:
break
else:
err_str = _lockstr.format(self.pattern)
raise self.TimeoutError(err_str)
if not files:
try:
os.makedirs(self.lock_path)
except OSError:
pass
else: # PID lock already here --- someone else will remove it.
self.remove = False
def __exit__(self, exc_type, exc_value, traceback):
if self.remove:
for path in self.lock_path, self.path:
try:
os.rmdir(path)
except OSError:
pass
|
andyraib/data-storage
|
python_scripts/env/lib/python3.6/site-packages/matplotlib/cbook.py
|
Python
|
apache-2.0
| 83,371
|
[
"Gaussian"
] |
00f6c922beb07699ef8cd317587fcde7b448d93771d3cfb14ee96b8cda155279
|
from __future__ import print_function, division
import unittest, numpy as np
from pyscf import gto, scf
from pyscf.nao import gw as gw_c
class KnowValues(unittest.TestCase):
def test_gw(self):
""" This is GW """
mol = gto.M( verbose = 0, atom = '''H 0.0 0.0 -0.3707; H 0.0 0.0 0.3707''', basis = 'cc-pvdz', )
gto_mf = scf.RHF(mol)
gto_mf.kernel()
#print('gto_mf.mo_energy:', gto_mf.mo_energy)
gw = gw_c(mf=gto_mf, gto=mol, verbosity=0,)
gw.kernel_gw()
self.assertAlmostEqual(gw.mo_energy_gw[0,0,0], -0.59709476270318296)
self.assertAlmostEqual(gw.mo_energy_gw[0,0,1], 0.19071318743971943)
if __name__ == "__main__": unittest.main()
|
gkc1000/pyscf
|
pyscf/nao/test/test_0060_gw_h2_ae.py
|
Python
|
apache-2.0
| 687
|
[
"PySCF"
] |
76cb3f3666594db83f97c1d1c8411d17bbd264a159986b6614b14adf3a029c4c
|
"""
SystemLoggingHandler is the implementation of the Logging service
in the DISET framework.
The following methods are available in the Service interface::
addMessages()
"""
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.FrameworkSystem.private.logging.Message import tupleToMessage
from DIRAC.FrameworkSystem.DB.SystemLoggingDB import SystemLoggingDB
__RCSID__ = "$Id$"
# This is a global instance of the SystemLoggingDB class
gLogDB = False
def initializeSystemLoggingHandler( serviceInfo ):
""" Check that we can connect to the DB and that the tables are properly created or updated
"""
global gLogDB
gLogDB = SystemLoggingDB()
res = gLogDB._connect()
if not res['OK']:
return res
return S_OK()
class SystemLoggingHandler( RequestHandler ):
""" This is server
"""
def __addMessage( self, messageObject, site, nodeFQDN ):
"""
This is the function that actually adds the Message to
the log Database
"""
credentials = self.getRemoteCredentials()
if credentials.has_key( 'DN' ):
userDN = credentials['DN']
else:
userDN = 'unknown'
if credentials.has_key( 'group' ):
userGroup = credentials['group']
else:
userGroup = 'unknown'
remoteAddress = self.getRemoteAddress()[0]
return gLogDB.insertMessage( messageObject, site, nodeFQDN, userDN, userGroup, remoteAddress )
types_addMessages = [ list, basestring, basestring ]
#A normal exported function (begins with export_)
def export_addMessages( self, messagesList, site, nodeFQDN ):
"""
This is the interface to the service
Inputs:
msgList contains a list of Message Objects.
Outputs:
S_OK if no exception was raised
S_ERROR if an exception was raised
"""
for messageTuple in messagesList:
messageObject = tupleToMessage( messageTuple )
result = self.__addMessage( messageObject, site, nodeFQDN )
if not result['OK']:
gLogger.error( 'The Log Message could not be inserted into the DB',
'because: "%s"' % result['Message'] )
return S_ERROR( result['Message'] )
return S_OK()
|
Andrew-McNab-UK/DIRAC
|
FrameworkSystem/Service/SystemLoggingHandler.py
|
Python
|
gpl-3.0
| 2,278
|
[
"DIRAC"
] |
eb8abca3f81452429690730d9ef6131193177828a414e33e931951aa226fe6e6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.