text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# test_generation.py
# This file is part of the NNGT module
# Distributed as a free software, in the hope that it will be useful, under the
# terms of the GNU General Public License.
"""
Test the connect group and type methods.
"""
import pytest
import numpy as np
import nngt
import nngt.generation as ng
@pytest.mark.mpi_skip
def test_fixed():
''' Fixed degree with type '''
pop = nngt.NeuralPop.exc_and_inhib(1000)
igroup = pop["inhibitory"]
egroup = pop["excitatory"]
net = nngt.Network(population=pop)
deg_e = 50
ng.connect_neural_types(net, 1, [-1, 1], graph_model="fixed_degree",
degree=deg_e, degree_type="out-degree")
deg_i = 100
ng.connect_neural_types(net, -1, [-1, 1], graph_model="fixed_degree",
degree=deg_i, degree_type="out-degree")
edeg = net.get_degrees("out", nodes=egroup.ids)
ideg = net.get_degrees("out", nodes=igroup.ids)
assert np.all(edeg == deg_e)
assert np.all(ideg == deg_i)
edeg = net.get_degrees("in", nodes=egroup.ids)
ideg = net.get_degrees("in", nodes=igroup.ids)
avg_deg = (deg_e*egroup.size + deg_i*igroup.size) / pop.size
std_deg = np.sqrt(avg_deg)
assert avg_deg - std_deg < np.average(edeg) < avg_deg + std_deg
assert avg_deg - std_deg < np.average(ideg) < avg_deg + std_deg
@pytest.mark.mpi_skip
def test_gaussian():
''' Gaussian degree with groups '''
pop = nngt.NeuralPop.exc_and_inhib(1000)
igroup = pop["inhibitory"]
egroup = pop["excitatory"]
net = nngt.Network(population=pop)
avg_e = 50
std_e = 5
ng.connect_groups(net, egroup, [igroup, egroup],
graph_model="gaussian_degree", avg=avg_e, std=std_e,
degree_type="out-degree")
avg_i = 100
std_i = 5
ng.connect_groups(net, igroup, [igroup, egroup],
graph_model="gaussian_degree", avg=avg_i, std=std_i,
degree_type="out-degree")
edeg = net.get_degrees("out", nodes=egroup.ids)
ideg = net.get_degrees("out", nodes=igroup.ids)
# call only on root process (for mpi) and not if using distributed backend
if nngt.on_master_process() and nngt.get_config("backend") != "nngt":
assert avg_e - std_e < np.average(edeg) < avg_e + std_e
assert avg_i - std_i < np.average(ideg) < avg_i + std_i
elif nngt.get_config("mpi") and nngt.get_config("backend") == "nngt":
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
num_mpi = comm.Get_size()
edeg = comm.gather(edeg[rank::num_mpi], root=0)
ideg = comm.gather(ideg[rank::num_mpi], root=0)
if nngt.on_master_process():
assert avg_e - std_e < np.average(edeg) < avg_e + std_e
assert avg_i - std_i < np.average(ideg) < avg_i + std_i
edeg = net.get_degrees("in", nodes=egroup.ids)
ideg = net.get_degrees("in", nodes=igroup.ids)
avg_deg = (avg_e*egroup.size + avg_i*igroup.size) / pop.size
std_deg = np.sqrt(avg_deg)
# call only on root process (for mpi) unless using distributed backend
if nngt.on_master_process() and nngt.get_config("backend") != "nngt":
assert avg_deg - std_deg < np.average(edeg) < avg_deg + std_deg
assert avg_deg - std_deg < np.average(ideg) < avg_deg + std_deg
elif nngt.get_config("mpi") and nngt.get_config("backend") == "nngt":
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
num_mpi = comm.Get_size()
edeg = comm.gather(edeg, root=0)
ideg = comm.gather(ideg, root=0)
if nngt.on_master_process():
edeg = np.sum(edeg, axis=0)
ideg = np.sum(ideg, axis=0)
assert avg_deg - std_deg < np.average(edeg) < avg_deg + std_deg
assert avg_deg - std_deg < np.average(ideg) < avg_deg + std_deg
def test_group_vs_type():
''' Gaussian degree with groups and types '''
# first with groups
nngt.seed(0)
pop = nngt.NeuralPop.exc_and_inhib(1000)
igroup = pop["inhibitory"]
egroup = pop["excitatory"]
net1 = nngt.Network(population=pop)
all_groups = list(pop.keys()) # necessary to have same order as types
avg_e = 50
std_e = 5
ng.connect_groups(net1, egroup, all_groups, graph_model="gaussian_degree",
avg=avg_e, std=std_e, degree_type="out-degree")
avg_i = 100
std_i = 5
ng.connect_groups(net1, igroup, all_groups, graph_model="gaussian_degree",
avg=avg_i, std=std_i, degree_type="out-degree")
# then with types
nngt.seed(0)
pop = nngt.NeuralPop.exc_and_inhib(1000)
igroup = pop["inhibitory"]
egroup = pop["excitatory"]
net2 = nngt.Network(population=pop)
avg_e = 50
std_e = 5
ng.connect_neural_types(net2, 1, [-1, 1], graph_model="gaussian_degree",
avg=avg_e, std=std_e, degree_type="out-degree")
avg_i = 100
std_i = 5
ng.connect_neural_types(net2, -1, [-1, 1], graph_model="gaussian_degree",
avg=avg_i, std=std_i, degree_type="out-degree")
# call only on root process (for mpi) unless using distributed backend
if nngt.on_master_process() or nngt.get_config("backend") == "nngt":
# check that both networks are equals
assert np.all(net1.get_degrees() == net2.get_degrees())
if __name__ == "__main__":
import os
if os.environ.get("MPI"):
nngt.set_config("mpi", True)
if os.environ.get("GL", ""):
nngt.set_config("backend", os.environ["GL"])
if not nngt.get_config("mpi"):
test_fixed()
test_gaussian()
test_group_vs_type()
|
Silmathoron/NNGT
|
testing/test_connect_group_type.py
|
Python
|
gpl-3.0
| 5,795
|
[
"Gaussian"
] |
281f1ba098fbfeb058378a5af9bff3bb053a6ad3c55ea25a80fb77347716e14b
|
"""Splinter plugin for pytest.
Provides easy interface for the browser from your tests providing the `browser` fixture
which is an object of splinter Browser class.
"""
import functools # pragma: no cover
try:
from httplib import HTTPException
except ImportError:
from http.client import HTTPException
import mimetypes # pragma: no cover
import os.path
import re
import pytest # pragma: no cover
import splinter # pragma: no cover
from _pytest import junitxml
from _pytest.tmpdir import tmpdir
from selenium.webdriver.support import wait
from .webdriver_patches import patch_webdriver # pragma: no cover
from .splinter_patches import patch_webdriverelement # pragma: no cover
import logging
LOGGER = logging.getLogger(__name__)
NAME_RE = re.compile(r'[\W]')
def _visit(self, url):
"""Override splinter's visit to avoid unnecessary checks and add wait_until instead."""
self.__dict__.pop('status_code', None)
self.driver.get(url)
self.wait_for_condition(self.visit_condition, timeout=self.visit_condition_timeout)
def _wait_for_condition(self, condition=None, timeout=None, poll_frequency=0.5, ignored_exceptions=None):
"""Wait for given javascript condition."""
condition = functools.partial(condition or self.visit_condition, self)
timeout = timeout or self.visit_condition_timeout
return wait.WebDriverWait(
self.driver, timeout, poll_frequency=poll_frequency, ignored_exceptions=ignored_exceptions
).until(
lambda browser: condition()
)
def _get_status_code(self):
"""Lazy status code get."""
inst_status_code = self.__dict__.get('status_code')
if inst_status_code:
return inst_status_code
self.connect(self.url)
return self.status_code
def _set_status_code(self, value):
"""Lazy status code set."""
self.__dict__['status_code'] = value
def Browser(*args, **kwargs):
"""Emulate splinter's Browser."""
visit_condition = kwargs.pop('visit_condition')
visit_condition_timeout = kwargs.pop('visit_condition_timeout')
browser = splinter.Browser(*args, **kwargs)
browser.wait_for_condition = functools.partial(_wait_for_condition, browser)
if hasattr(browser, 'driver'):
browser.visit_condition = visit_condition
browser.visit_condition_timeout = visit_condition_timeout
browser.visit = functools.partial(_visit, browser)
browser.__class__.status_code = property(_get_status_code, _set_status_code)
browser.__splinter_browser__ = True
return browser
@pytest.fixture(scope='session') # pragma: no cover
def splinter_close_browser():
"""Close browser fixture."""
return True
@pytest.fixture(scope='session') # pragma: no cover
def splinter_webdriver(request):
"""Webdriver fixture."""
return request.config.option.splinter_webdriver
@pytest.fixture(scope='session') # pragma: no cover
def splinter_remote_url(request):
"""Remote webdriver url.
:return: URL of remote webdriver.
"""
return request.config.option.splinter_remote_url
@pytest.fixture(scope='session') # pragma: no cover
def splinter_selenium_socket_timeout(request):
"""Internal Selenium socket timeout (communication between webdriver and the browser).
:return: Seconds.
"""
return request.config.option.splinter_webdriver_socket_timeout
@pytest.fixture(scope='session') # pragma: no cover
def splinter_selenium_implicit_wait(request):
"""Selenium implicit wait timeout.
:return: Seconds.
"""
return request.config.option.splinter_webdriver_implicit_wait
@pytest.fixture(scope='session') # pragma: no cover
def splinter_selenium_speed(request):
"""Selenium speed.
:return: Seconds.
"""
return request.config.option.splinter_webdriver_speed
@pytest.fixture(scope='session') # pragma: no cover
def splinter_browser_load_condition():
"""The condition that has to be `True` to assume that the page is fully loaded.
One example is to wait for jQuery, then the condition could be::
@pytest.fixture
def splinter_browser_load_condition():
def condition(browser):
return browser.evaluate_script('typeof $ === "undefined" || !$.active')
return condition
"""
return lambda browser: True
@pytest.fixture(scope='session') # pragma: no cover
def splinter_browser_load_timeout():
"""The timeout in seconds in which the page is expected to be fully loaded."""
return 10
@pytest.fixture(scope='session') # pragma: no cover
def splinter_file_download_dir(session_tmpdir):
"""Browser file download directory."""
return session_tmpdir.ensure('splinter', 'download', dir=True).strpath
@pytest.fixture(scope='session') # pragma: no cover
def splinter_download_file_types():
"""Browser file types to download. Comma-separated."""
return ','.join(mimetypes.types_map.values())
@pytest.fixture(scope='session')
def splinter_firefox_profile_preferences():
"""Firefox profile preferences."""
return {
'browser.cache.memory.enable': False,
'browser.sessionhistory.max_total_viewers': 0,
'network.http.pipelining': True,
'network.http.pipelining.maxrequests': 8
}
@pytest.fixture(scope='session')
def splinter_firefox_profile_directory():
"""Firefox profile directory."""
return os.path.join(os.path.dirname(__file__), 'profiles', 'firefox')
@pytest.fixture(scope='session')
def splinter_driver_kwargs():
"""Webdriver kwargs."""
return {}
@pytest.fixture(scope='session')
def splinter_window_size():
"""Browser window size. (width, height)."""
return (1366, 768)
@pytest.fixture(scope='session')
def splinter_session_scoped_browser(request):
"""Flag to keep single browser per test session."""
return request.config.option.splinter_session_scoped_browser == 'true'
@pytest.fixture(scope='session')
def splinter_make_screenshot_on_failure(request):
"""Flag to make browser screenshot on test failure."""
return request.config.option.splinter_make_screenshot_on_failure == 'true'
@pytest.fixture(scope='session') # pragma: no cover
def splinter_screenshot_dir(request):
"""Browser screenshot directory."""
return os.path.abspath(request.config.option.splinter_screenshot_dir)
@pytest.fixture(scope='session')
def splinter_webdriver_executable(request):
"""Webdriver executable directory."""
executable = request.config.option.splinter_webdriver_executable
return os.path.abspath(executable) if executable else None
@pytest.fixture(scope='session')
def browser_pool(request, splinter_close_browser):
"""Browser 'pool' to emulate session scope but with possibility to recreate browser."""
pool = {}
def fin():
for browser in pool.values():
try:
browser.quit()
except (IOError, OSError):
pass
if splinter_close_browser:
request.addfinalizer(fin)
return pool
@pytest.fixture(scope='session')
def browser_patches(splinter_selenium_socket_timeout):
"""Browser monkey patches."""
patch_webdriver(splinter_selenium_socket_timeout)
patch_webdriverelement()
@pytest.fixture(scope='session')
def session_tmpdir(request):
"""pytest tmpdir which is session-scoped."""
return tmpdir(request)
@pytest.fixture(scope='session')
def splinter_browser_class(request):
"""Browser class to use for browser instance creation."""
return Browser
def get_args(driver=None,
download_dir=None,
download_ftypes=None,
firefox_pref=None,
firefox_prof_dir=None,
remote_url=None,
executable=None,
driver_kwargs=None):
"""Construct arguments to be passed to webdriver on initialization."""
kwargs = {}
if driver == 'firefox':
kwargs['profile_preferences'] = dict({
'browser.download.folderList': 2,
'browser.download.manager.showWhenStarting': False,
'browser.download.dir': download_dir,
'browser.helperApps.neverAsk.saveToDisk': download_ftypes,
'browser.helperApps.alwaysAsk.force': False,
'pdfjs.disabled': True, # disable internal ff pdf viewer to allow auto pdf download
}, **firefox_pref)
kwargs['profile'] = firefox_prof_dir
elif driver == 'remote':
kwargs['url'] = remote_url
elif driver in ('phantomjs', 'chrome'):
if executable:
kwargs['executable_path'] = executable
if driver_kwargs:
kwargs.update(driver_kwargs)
return kwargs
@pytest.fixture(scope='session')
def browser_instance_getter(
browser_patches,
splinter_session_scoped_browser,
splinter_browser_load_condition,
splinter_browser_load_timeout,
splinter_download_file_types,
splinter_driver_kwargs,
splinter_file_download_dir,
splinter_firefox_profile_preferences,
splinter_firefox_profile_directory,
splinter_make_screenshot_on_failure,
splinter_remote_url,
splinter_screenshot_dir,
splinter_selenium_implicit_wait,
splinter_selenium_socket_timeout,
splinter_selenium_speed,
splinter_webdriver,
splinter_webdriver_executable,
splinter_window_size,
splinter_browser_class,
session_tmpdir,
browser_pool,
):
"""Splinter browser instance getter. To be used for getting of plugin.Browser's instances.
:return: function(parent). Each time this function will return new instance of plugin.Browser class.
"""
def get_browser():
kwargs = get_args(driver=splinter_webdriver,
download_dir=splinter_file_download_dir,
download_ftypes=splinter_download_file_types,
firefox_pref=splinter_firefox_profile_preferences,
firefox_prof_dir=splinter_firefox_profile_directory,
remote_url=splinter_remote_url,
executable=splinter_webdriver_executable,
driver_kwargs=splinter_driver_kwargs)
return splinter_browser_class(
splinter_webdriver, visit_condition=splinter_browser_load_condition,
visit_condition_timeout=splinter_browser_load_timeout,
wait_time=splinter_selenium_implicit_wait, **kwargs
)
def prepare_browser(request, parent):
browser_key = id(parent)
browser = browser_pool.get(browser_key)
if not splinter_session_scoped_browser:
browser = get_browser()
if splinter_close_browser:
request.addfinalizer(browser.quit)
elif not browser:
browser = browser_pool[browser_key] = get_browser()
try:
if splinter_webdriver not in browser.driver_name.lower():
raise IOError('webdriver does not match')
if hasattr(browser, 'driver'):
browser.driver.implicitly_wait(splinter_selenium_implicit_wait)
browser.driver.set_speed(splinter_selenium_speed)
if splinter_window_size:
browser.driver.set_window_size(*splinter_window_size)
browser.cookies.delete()
if hasattr(browser, 'driver'):
browser.visit_condition = splinter_browser_load_condition
browser.visit_condition_timeout = splinter_browser_load_timeout
browser.visit('about:blank')
except (IOError, HTTPException):
# we lost browser, try to restore the justice
try:
browser.quit()
except Exception: # NOQA
pass
browser = browser_pool[browser_key] = get_browser()
prepare_browser(request, parent)
return browser
return prepare_browser
@pytest.yield_fixture(autouse=True)
def browser_screenshot(request, splinter_screenshot_dir, session_tmpdir):
"""Make browser screenshot on test failure."""
yield
for name, value in request._funcargs.items():
if hasattr(value, '__splinter_browser__'):
browser = value
if splinter_make_screenshot_on_failure and request.node.splinter_failure:
slaveoutput = getattr(request.config, 'slaveoutput', None)
names = junitxml.mangle_testnames(request.node.nodeid.split("::"))
classname = '.'.join(names[:-1])
screenshot_dir = os.path.join(splinter_screenshot_dir, classname)
screenshot_file_name = '{0}-{1}.png'.format(
names[-1][:128 - len(name) - 5], name)
if not slaveoutput:
if not os.path.exists(screenshot_dir):
os.makedirs(screenshot_dir)
else:
screenshot_dir = session_tmpdir.ensure('screenshots', dir=True).strpath
screenshot_path = os.path.join(screenshot_dir, screenshot_file_name)
LOGGER.info('Saving screenshot to %s', screenshot_path)
try:
browser.driver.save_screenshot(screenshot_path)
with open(screenshot_path) as fd:
if slaveoutput is not None:
slaveoutput.setdefault('screenshots', []).append({
'class_name': classname,
'file_name': screenshot_file_name,
'content': fd.read()
})
except Exception as e: # NOQA
request.config.warn('SPL504', "Could not save screenshot: {0}".format(e))
@pytest.mark.tryfirst
def pytest_runtest_makereport(item, call, __multicall__):
"""Assign the report to the item for futher usage."""
rep = __multicall__.execute()
if rep.outcome != 'passed':
item.splinter_failure = rep
else:
item.splinter_failure = None
return rep
@pytest.fixture
def browser(request, browser_instance_getter):
"""Browser fixture."""
return browser_instance_getter(request, browser)
@pytest.fixture(scope='session')
def session_browser(request, browser_instance_getter):
"""Session scoped browser fixture."""
return browser_instance_getter(request, session_browser)
class SplinterXdistPlugin(object):
"""Plugin class to defer pytest-xdist hook handler."""
def pytest_testnodedown(self, node, error):
"""Copy screenshots back from remote nodes to have them on the master."""
config_screenshot_dir = splinter_screenshot_dir(node)
for screenshot in getattr(node, 'slaveoutput', {}).get('screenshots', []):
screenshot_dir = os.path.join(config_screenshot_dir, screenshot['class_name'])
if not os.path.exists(screenshot_dir):
os.makedirs(screenshot_dir)
with open(os.path.join(screenshot_dir, screenshot['file_name']), 'w') as fd:
fd.write(screenshot['content'])
def pytest_configure(config):
"""Register pytest-splinter's deferred plugin."""
if config.pluginmanager.getplugin('xdist'):
config.pluginmanager.register(SplinterXdistPlugin())
def pytest_addoption(parser): # pragma: no cover
"""Pytest hook to add custom command line option(s)."""
group = parser.getgroup("splinter", "splinter integration for browser testing")
group.addoption(
"--splinter-webdriver",
help="pytest-splinter webdriver", type="choice", choices=list(splinter.browser._DRIVERS.keys()),
dest='splinter_webdriver', metavar="DRIVER", default='firefox')
group.addoption(
"--splinter-remote-url",
help="pytest-splinter remote webdriver url ", metavar="URL", dest='splinter_remote_url', default=None)
group.addoption(
"--splinter-implicit-wait",
help="pytest-splinter selenium implicit wait, seconds", type="int",
dest='splinter_webdriver_implicit_wait', metavar="SECONDS", default=5)
group.addoption(
"--splinter-speed",
help="pytest-splinter selenium speed, seconds", type="int",
dest='splinter_webdriver_speed', metavar="SECONDS", default=0)
group.addoption(
"--splinter-socket-timeout",
help="pytest-splinter socket timeout, seconds", type="int",
dest='splinter_webdriver_socket_timeout', metavar="SECONDS", default=120)
group.addoption(
"--splinter-session-scoped-browser",
help="pytest-splinter should use a single browser instance per test session. Defaults to true.", action="store",
dest='splinter_session_scoped_browser', metavar="false|true", type="choice", choices=['false', 'true'],
default='true')
group.addoption(
"--splinter-make-screenshot-on-failure",
help="pytest-splinter should take browser screenshots on test failure. Defaults to true.", action="store",
dest='splinter_make_screenshot_on_failure', metavar="false|true", type="choice", choices=['false', 'true'],
default='true')
group.addoption(
"--splinter-screenshot-dir",
help="pytest-splinter browser screenshot directory. Defaults to the current directory.", action="store",
dest='splinter_screenshot_dir', metavar="DIR", default='.')
group.addoption(
"--splinter-webdriver-executable",
help="pytest-splinter webdrive executable path. Defaults to unspecified in which case it is taken from PATH",
action="store",
dest='splinter_webdriver_executable', metavar="DIR", default='')
|
pelme/pytest-splinter
|
pytest_splinter/plugin.py
|
Python
|
mit
| 17,698
|
[
"VisIt"
] |
5eaa0d8ea0a47359a4f7fbd951131e96bd4dff045ce1f9e376fdec4657965eef
|
import numpy as np
from astropy.table import Table
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib
import pickle
from os.path import isfile, join
from os import listdir
import astropy.io.fits as ts
from TheCannon import dataset,apogee
from astropy.table import Table
import numpy as np
import os
from astropy.io import fits
import pickle
import AnniesLasso_2 as tc
import time
from os import listdir
from os.path import isfile, join
from astropy.io import fits
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy.time import Time
fail = 0
class make_table():
def read_data(self):
pkl_file = open('/Users/caojunzhi/Desktop/NYU/Laboratory/task 2016.8.1-12.23/My codes/AnniesLasso_v2/dr13_rc_fits.pkl', 'rb')
path_fits = pickle.load(pkl_file)
pkl_file.close()
pkl_file = open('/Users/caojunzhi/Desktop/NYU/Laboratory/task 2016.8.1-12.23/My codes/AnniesLasso_v2/dr13_rc_ori.pkl', 'rb')
path_ori = pickle.load(pkl_file)
pkl_file.close()
N = len(path_fits)
print(N)
star_name = []
star_visit = []
BJD = []
inf_labels = np.array([0,0,0])
chi_inf = []
chi_mix = []
parameters= np.array([0,1,0])
un_cov = np.zeros((3, 3))
fiber= []
VBARY = []
SHIFT = []
SNR = []
for i in range(0,N):
try:
print("reading star %d %.2f %%" % (i + 1, (i + 1) / N * 100))
star = fits.open(path_fits[i])
dat = Table.read(path_ori[i])
ni = len(star[1].data)
star_name_i = path_fits[i].replace(".fits", "")
star_name_i = star_name_i.replace("/Users/caojunzhi/Desktop/Data/dr13_red_clump/", "")
SNR_i = []
star_name_ii = []
for j in range(0, ni - 2):
star_name_ii = np.append(star_name_ii,star_name_i)
SNR_i = np.append(SNR_i, star[0].header["SNR"])
star_visit_i = np.array(dat[0]["FILE"])
chi_inf_i = star[9].data[2:ni]
chi_mix_i = star[10].data[2:ni]
BJD_i = star[13].data
fiber_i = star[12].data
VBARY_i = star[11].data
SHIFT_i = star[1].data[2:ni]
inf_labels_i = star[8].data[2:ni, :]
parameters_i = star[0].data[2:ni, :]
un_cov_i = star[3].data[:, :, 2:ni]
except IndexError:
print("This one fails")
except OSError:
print("This one fails")
else:
SNR = np.append(SNR,SNR_i)
star_visit = np.append(star_visit, star_visit_i)
star_name = np.append(star_name,star_name_ii)
chi_inf = np.append(chi_inf, chi_inf_i)
chi_mix = np.append(chi_mix, chi_mix_i)
BJD = np.append(BJD, BJD_i)
fiber = np.append(fiber, fiber_i)
VBARY = np.append(VBARY, VBARY_i)
SHIFT = np.append(SHIFT, SHIFT_i)
inf_labels = np.vstack((inf_labels, inf_labels_i))
parameters = np.vstack((parameters, parameters_i))
un_cov = np.dstack((un_cov, un_cov_i))
n_visit = len(un_cov[0,0,:])
star_name = np.array(star_name)
self.star_name = star_name
SNR = np.array(SNR)
self.SNR =SNR
star_visit = np.array(star_visit)
star_visit = star_visit.ravel()
self.star_visit = star_visit
chi_inf = np.array(chi_inf)
chi_inf = chi_inf.ravel()
self.chi_inf = chi_inf
chi_mix = np.array(chi_mix)
chi_mix = chi_mix.ravel()
self.chi_mix = chi_mix
BJD = np.array(BJD).ravel()
self.BJD = BJD
fiber = np.array(fiber).ravel()
self.fiber = fiber
VBARY = np.array(VBARY).ravel()
self.VBARY = VBARY
SHIFT = np.array(SHIFT).ravel()
self.SHIFT =SHIFT
inf_labels = inf_labels[1:n_visit,:]
self.inf_labels = inf_labels
parameters = parameters[1:n_visit,:]
self.parameters = parameters
un_cov = un_cov[:,:,1:n_visit]
self.un_cov = un_cov
print("check shape")
print(n_visit - 1)
print(star_name.shape,SNR.shape,star_visit.shape,chi_inf.shape,chi_mix.shape)
print(BJD.shape,fiber.shape,inf_labels.shape,parameters.shape,un_cov.shape,VBARY.shape,SHIFT.shape)
# Input the path of the table:
def write_table(self):
# save them in the header
path = "/Users/caojunzhi/Downloads/upload_20170322/red_clump_dr13.fits"
self.table_path = path
prihdr = fits.Header()
prihdr['COMMENT'] = "id MJD HJD VELIO VELIOUN Shift ShiftUN Shiftabs ShiftabsUN"
prihdu = fits.PrimaryHDU(data=self.un_cov,header=prihdr)
# Table list
col1 = fits.Column(name='APOGEEID', format="25A", array=self.star_name)
col2 = fits.Column(name='VISIT', format="25A", array=self.star_visit)
col3 = fits.Column(name='BJD', format="E", array= self.BJD)
col4 = fits.Column(name='TEFF', format='E', array=self.inf_labels[:,0])
col5 = fits.Column(name='LOGG', format="E", array=self.inf_labels[:,1])
col6 = fits.Column(name='FEH', format='E', array=self.inf_labels[:,2])
col7 = fits.Column(name='A', format="E", array=self.parameters[:,0])
col8 = fits.Column(name='B', format="E", array=self.parameters[:,1])
col9 = fits.Column(name='C', format="E", array=self.parameters[:,2])
col10 = fits.Column(name='CHIINF', format="E", array=self.chi_inf)
col11 = fits.Column(name='CHIMIX', format="E", array=self.chi_mix)
col12 = fits.Column(name='VBARY', format="E", array=self.VBARY)
col13 = fits.Column(name='VSHIFT', format="E", array=self.SHIFT/1000)
col14 = fits.Column(name='FIBER', format="E", array=self.fiber)
col15 = fits.Column(name='SNR', format="E", array=self.SNR)
cols = fits.ColDefs(
[col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11, col12, col13, col14, col15])
tbhdu = fits.BinTableHDU.from_columns(cols)
thdulist = fits.HDUList([prihdu, tbhdu])
print("saving table")
thdulist.writeto(path, clobber=True)
thdulist.close()
def check(self,path):
star = fits.open(path)
table = Table.read(path)
self.star = star
self.table = table
print(table[0])
print(star[0].data.shape)
# construct table:
start_time = time.time()
model = make_table()
model.read_data()
model.write_table()
path = "/Users/caojunzhi/Downloads/upload_20170322/red_clump_dr13.fits"
#model.check(path)
print("The number of fails %d"%fail)
stop_time = time.time()
print("The time we use is %.2f"%(stop_time-start_time))
|
peraktong/Cannon-Experiment
|
DR13_red_clump/0323_make_table_red_clump.py
|
Python
|
mit
| 7,098
|
[
"VisIt"
] |
ad32e3d78f416100525ba08eabdb00f10f6812fb5dc98028e4cb66116f24e6cd
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2002 Gary Shao
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from fontstyle import FontStyle
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
log = logging.getLogger(".paragraphstyle")
#-------------------------------------------------------------------------
#
# Paragraph alignment
#
#-------------------------------------------------------------------------
PARA_ALIGN_CENTER = 0
PARA_ALIGN_LEFT = 1
PARA_ALIGN_RIGHT = 2
PARA_ALIGN_JUSTIFY = 3
#------------------------------------------------------------------------
#
# ParagraphStyle
#
#------------------------------------------------------------------------
class ParagraphStyle(object):
"""
Defines the characteristics of a paragraph. The characteristics are:
font (a FontStyle instance), right margin, left margin, first indent,
top margin, bottom margin, alignment, level, top border, bottom border,
right border, left border, padding, and background color.
"""
def __init__(self, source=None):
"""
@param source: if not None, then the ParagraphStyle is created
using the values of the source instead of the default values.
"""
if source:
self.font = FontStyle(source.font)
self.rmargin = source.rmargin
self.lmargin = source.lmargin
self.first_indent = source.first_indent
self.tmargin = source.tmargin
self.bmargin = source.bmargin
self.align = source.align
self.level = source.level
self.top_border = source.top_border
self.bottom_border = source.bottom_border
self.right_border = source.right_border
self.left_border = source.left_border
self.pad = source.pad
self.bgcolor = source.bgcolor
self.description = source.description
self.tabs = source.tabs
else:
self.font = FontStyle()
self.rmargin = 0
self.lmargin = 0
self.tmargin = 0
self.bmargin = 0
self.first_indent = 0
self.align = PARA_ALIGN_LEFT
self.level = 0
self.top_border = 0
self.bottom_border = 0
self.right_border = 0
self.left_border = 0
self.pad = 0
self.bgcolor = (255, 255, 255)
self.description = ""
self.tabs = []
def set_description(self, text):
"""
Set the desciption of the paragraph
"""
self.description = text
def get_description(self):
"""
Return the desciption of the paragraph
"""
return self.description
def set(self, rmargin=None, lmargin=None, first_indent=None,
tmargin=None, bmargin=None, align=None,
tborder=None, bborder=None, rborder=None, lborder=None,
pad=None, bgcolor=None, font=None):
"""
Allows the values of the object to be set.
@param rmargin: right indent in centimeters
@param lmargin: left indent in centimeters
@param first_indent: first line indent in centimeters
@param tmargin: space above paragraph in centimeters
@param bmargin: space below paragraph in centimeters
@param align: alignment type (PARA_ALIGN_LEFT, PARA_ALIGN_RIGHT, PARA_ALIGN_CENTER, or PARA_ALIGN_JUSTIFY)
@param tborder: non zero indicates that a top border should be used
@param bborder: non zero indicates that a bottom border should be used
@param rborder: non zero indicates that a right border should be used
@param lborder: non zero indicates that a left border should be used
@param pad: padding in centimeters
@param bgcolor: background color of the paragraph as an RGB tuple.
@param font: FontStyle instance that defines the font
"""
if font is not None:
self.font = FontStyle(font)
if pad is not None:
self.set_padding(pad)
if tborder is not None:
self.set_top_border(tborder)
if bborder is not None:
self.set_bottom_border(bborder)
if rborder is not None:
self.set_right_border(rborder)
if lborder is not None:
self.set_left_border(lborder)
if bgcolor is not None:
self.set_background_color(bgcolor)
if align is not None:
self.set_alignment(align)
if rmargin is not None:
self.set_right_margin(rmargin)
if lmargin is not None:
self.set_left_margin(lmargin)
if first_indent is not None:
self.set_first_indent(first_indent)
if tmargin is not None:
self.set_top_margin(tmargin)
if bmargin is not None:
self.set_bottom_margin(bmargin)
def set_header_level(self, level):
"""
Set the header level for the paragraph. This is useful for
numbered paragraphs. A value of 1 indicates a header level
format of X, a value of two implies X.X, etc. A value of zero
means no header level.
"""
self.level = level
def get_header_level(self):
"Return the header level of the paragraph"
return self.level
def set_font(self, font):
"""
Set the font style of the paragraph.
@param font: FontStyle object containing the font definition to use.
"""
self.font = FontStyle(font)
def get_font(self):
"Return the FontStyle of the paragraph"
return self.font
def set_padding(self, val):
"""
Set the paragraph padding in centimeters
@param val: floating point value indicating the padding in centimeters
"""
self.pad = val
def get_padding(self):
"""Return a the padding of the paragraph"""
return self.pad
def set_top_border(self, val):
"""
Set the presence or absence of top border.
@param val: True indicates a border should be used, False indicates
no border.
"""
self.top_border = val
def get_top_border(self):
"Return 1 if a top border is specified"
return self.top_border
def set_bottom_border(self, val):
"""
Set the presence or absence of bottom border.
@param val: True indicates a border should be used, False
indicates no border.
"""
self.bottom_border = val
def get_bottom_border(self):
"Return 1 if a bottom border is specified"
return self.bottom_border
def set_left_border(self, val):
"""
Set the presence or absence of left border.
@param val: True indicates a border should be used, False
indicates no border.
"""
self.left_border = val
def get_left_border(self):
"Return 1 if a left border is specified"
return self.left_border
def set_right_border(self, val):
"""
Set the presence or absence of rigth border.
@param val: True indicates a border should be used, False
indicates no border.
"""
self.right_border = val
def get_right_border(self):
"Return 1 if a right border is specified"
return self.right_border
def get_background_color(self):
"""
Return a tuple indicating the RGB components of the background
color
"""
return self.bgcolor
def set_background_color(self, color):
"""
Set the background color of the paragraph.
@param color: tuple representing the RGB components of a color
(0,0,0) to (255,255,255)
"""
self.bgcolor = color
def set_alignment(self, align):
"""
Set the paragraph alignment.
@param align: PARA_ALIGN_LEFT, PARA_ALIGN_RIGHT, PARA_ALIGN_CENTER,
or PARA_ALIGN_JUSTIFY
"""
self.align = align
def get_alignment(self):
"Return the alignment of the paragraph"
return self.align
def get_alignment_text(self):
"""
Return a text string representing the alginment, either 'left',
'right', 'center', or 'justify'
"""
if self.align == PARA_ALIGN_LEFT:
return "left"
elif self.align == PARA_ALIGN_CENTER:
return "center"
elif self.align == PARA_ALIGN_RIGHT:
return "right"
elif self.align == PARA_ALIGN_JUSTIFY:
return "justify"
return "unknown"
def set_left_margin(self, value):
"sets the left indent in centimeters"
self.lmargin = value
def set_right_margin(self, value):
"sets the right indent in centimeters"
self.rmargin = value
def set_first_indent(self, value):
"sets the first line indent in centimeters"
self.first_indent = value
def set_top_margin(self, value):
"sets the space above paragraph in centimeters"
self.tmargin = value
def set_bottom_margin(self, value):
"sets the space below paragraph in centimeters"
self.bmargin = value
def get_left_margin(self):
"returns the left indent in centimeters"
return self.lmargin
def get_right_margin(self):
"returns the right indent in centimeters"
return self.rmargin
def get_first_indent(self):
"returns the first line indent in centimeters"
return self.first_indent
def get_top_margin(self):
"returns the space above paragraph in centimeters"
return self.tmargin
def get_bottom_margin(self):
"returns the space below paragraph in centimeters"
return self.bmargin
def set_tabs(self, tab_stops):
assert isinstance(tab_stops, list)
self.tabs = tab_stops
def get_tabs(self):
return self.tabs
|
arunkgupta/gramps
|
gramps/gen/plug/docgen/paragraphstyle.py
|
Python
|
gpl-2.0
| 11,375
|
[
"Brian"
] |
ece7775f10d255bc231476efac2abb396d08f313b1c233f0762987fd33a68606
|
import matplotlib.pyplot as pl
from astroML.plotting.tools import draw_ellipse
def fake_data_plot(x_true, y_true, x, y, samples=None, mus=None, Vs=None):
"""
Two column plot of fake data in xd-demo.ipynb
"""
fig = pl.figure(figsize=(5, 3.75))
fig.subplots_adjust(left=0.1, right=0.95,
bottom=0.1, top=0.95,
wspace=0.02, hspace=0.02)
if samples == None:
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax3 = ax4 = None
xcheck=(None, None)
else:
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax3.scatter(samples[:, 0], samples[:, 1], s=4, lw=0, c='k')
ax4 = fig.add_subplot(224)
for i in range(mus.shape[0]):
draw_ellipse(mus[i], Vs[i], scales=[2], ax=ax4,
ec='k', fc='gray', alpha=0.2)
xcheck=(0, 1)
ax1.scatter(x_true, y_true, s=4, lw=0, c='k')
ax2.scatter(x, y, s=4, lw=0, c='k')
titles = ["True Distribution", "Noisy Distribution",
"Extreme Deconvolution\n resampling",
"Extreme Deconvolution\n cluster locations"]
ax = [ax1, ax2, ax3, ax4]
for i in range(4):
if ax[i] is not None:
ax[i].set_xlim(-1, 13)
ax[i].set_ylim(-6, 16)
ax[i].xaxis.set_major_locator(pl.MultipleLocator(4))
ax[i].yaxis.set_major_locator(pl.MultipleLocator(5))
ax[i].text(0.05, 0.95, titles[i],
ha='left', va='top', transform=ax[i].transAxes)
if i in xcheck:
ax[i].xaxis.set_major_formatter(pl.NullFormatter())
else:
ax[i].set_xlabel('$x$')
if i in (1, 3):
ax[i].yaxis.set_major_formatter(pl.NullFormatter())
else:
ax[i].set_ylabel('$y$')
pl.show()
def sdss_description_plot(x, fs=4, bins=64, tol=0.175):
"""
Generate a plot to describe what the SDSS data looks like.
"""
fig = pl.figure(figsize=(2 * fs, 2 * fs))
fig.subplots_adjust(left=0.1, right=0.95,
bottom=0.1, top=0.95,
wspace=0.25, hspace=0.25)
pl.subplot(221)
pl.hist(x[:, 0], bins=bins, alpha=0.3, color='k', normed=True)
pl.xlim(16, 25)
pl.xlabel('r magnitude')
pl.ylabel('Relative Frequency')
pl.subplot(222)
pl.plot(x[:, 0], x[:, -2], '.k', alpha=0.3, ms=1)
pl.plot([17, 23], [tol, tol], 'r', lw=2)
pl.xlim(17, 23)
pl.ylim(-0.1, 0.4)
pl.xlabel('r magnitude')
pl.ylabel('\'star\' - \'galaxy\' mag.')
ax = pl.subplot(223)
ind = x[:, -1] < 0.25 * tol
pl.plot(x[ind, 2], x[ind, 3], '.k', alpha=0.3, ms=1)
ax.text(0.5, 0.95, '\'Stars\'', ha='center', va='top',
transform=ax.transAxes, fontsize=16)
pl.xlim(-1, 3)
pl.ylim(-1, 3)
pl.xlabel('g - r magnitude')
pl.ylabel('r - i magnitude')
ax = pl.subplot(224)
ind = x[:, -1] > 2 * tol
pl.plot(x[ind, 2], x[ind, 3], '.k', alpha=0.3, ms=1)
ax.text(0.5, 0.95, '\'Galaxies\'', ha='center', va='top',
transform=ax.transAxes, fontsize=16)
pl.xlim(-1, 3)
pl.ylim(-1, 3)
pl.xlabel('g - r magnitude')
pl.ylabel('r - i magnitude')
pl.show()
def sdss_results_plot(x, samples, mus, Vs, fs=4, ind=[0, -2], tol=0.175):
"""
Plot the results of the XD run.
"""
fig = pl.figure(figsize=(3 * fs, fs))
fig.subplots_adjust(left=0.1, right=0.95,
bottom=0.1, top=0.95,
wspace=0.25, hspace=0.25)
pl.subplot(131)
pl.plot(x[:, 0], x[:, -2], '.k', alpha=0.5, ms=1)
pl.plot([17, 23], [tol, tol], 'r', lw=2)
pl.xlim(17, 23)
pl.ylim(-0.1, 0.4)
pl.xlabel('r magnitude')
pl.ylabel('\'star\' - \'galaxy\' mag.')
ax = pl.subplot(132)
for i in range(mus.shape[0]):
draw_ellipse(mus[i, ind], Vs[i, ind][:, ind], scales=[2], ax=ax,
ec='k', fc='gray', alpha=0.2)
pl.xlim(17, 23)
pl.ylim(-0.1, 0.4)
pl.xlabel('r magnitude')
pl.subplot(133)
pl.plot(samples[:, 0], samples[:, -2], '.k', alpha=0.5, ms=1)
pl.plot([17, 23], [tol, tol], 'r', lw=2)
pl.xlim(17, 23)
pl.ylim(-0.1, 0.4)
pl.xlabel('r magnitude')
pl.show()
|
rossfadely/xd-demo
|
demo_utils.py
|
Python
|
mit
| 4,411
|
[
"Galaxy"
] |
ddff77ad80a9d18ef519e11e258d0d092e3647955cb08c7ba0815e3887c2b548
|
# Copyright (C) 2016
# Jakub Krajniak (jkrajniak at gmail.com)
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***********************************************
espressopp.integrator.LangevinThermostatOnGroup
***********************************************
Thermalize particles in the ParticleGroup only.
.. function:: espressopp.integrator.LangevinThermostatOnGroup(system, particle_group)
:param system: The system object.
:type system: espressopp.System
:param particle_group: The particle group.
:type particle_group: espressopp.ParticleGroup
Example
###########
>>> pg = espressopp.ParticleGroup(system.storage)
>>> for pid in range(10):
>>> pg.add(pid)
>>> thermostat = espressopp.integrator.LangevinThermostatOnGroup(system, pg)
>>> thermostat.temperature = 1.0
>>> thermostat.gamma = 1.0
>>> integrator.addExtension(thermostat)
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_LangevinThermostatOnGroup
class LangevinThermostatOnGroupLocal(ExtensionLocal, integrator_LangevinThermostatOnGroup):
def __init__(self, system, particle_group):
if pmi.workerIsActive():
cxxinit(self, integrator_LangevinThermostatOnGroup, system, particle_group)
if pmi.isController :
class LangevinThermostatOnGroup(Extension):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.integrator.LangevinThermostatOnGroupLocal',
pmiproperty = [ 'gamma', 'temperature']
)
|
hidekb/espressopp
|
src/integrator/LangevinThermostatOnGroup.py
|
Python
|
gpl-3.0
| 2,236
|
[
"ESPResSo"
] |
8abe9003f15962f6794d62c86863ca89d0158f3f9dc48e2c87f7a20f3ea606d3
|
# coding=utf-8
from __future__ import absolute_import
from flask import url_for
from flask_login import current_user
import pytest
from firefly.six import unicode
from firefly.models.user import User
from firefly.views.api.consts import OK
@pytest.mark.usefixtures('client_class')
class TestUser:
def setup(self):
self.users = []
for x in range(3):
self.users.append(
User.create_user(
username='user' + str(x),
password='user' + str(x),
email='user' + str(x) + '@a.com'
)
)
def login(self, user_no):
form = {
'email': 'user0@a.com',
'password': 'user0'
}
rv = self.client.post(
url_for('home.login'), data=form,
follow_redirects=True
)
assert current_user.is_authenticated()
assert url_for('security.logout') in rv.data
def test_user_settings(self):
LOCATION = 'Beijing'
WEBSITE = 'http://firefly.dev'
GITHUB_ID = 'firefly'
self.login(0)
url = url_for('user.settings')
assert self.users[0].location is None
assert self.users[0].website is None
assert self.users[0].github_id is None
form = {
'location': LOCATION,
'website': WEBSITE,
'github_id': GITHUB_ID
}
rv = self.client.post(url, data=form)
assert rv.status_code == 302
user = User.objects.filter(id=self.users[0].id).first()
assert user
assert user.location == LOCATION
assert user.website == WEBSITE
assert user.github_id == GITHUB_ID
def test_follow_user_api(self):
# test follow
self.login(0)
assert self.users[0].following == []
url = url_for('api.followuserapi', id=self.users[1].id)
rv = self.client.put(url, buffered=True)
assert rv.status_code == 202
assert rv.json['status'] == OK
self.users[0].reload()
self.users[1].reload()
assert self.users[0].following == [self.users[1]]
assert self.users[1].follower == [self.users[0]]
# test unfollow
url = url_for('api.followuserapi', id=self.users[1].id)
rv = self.client.delete(url, buffered=True)
assert rv.status_code == 204
self.users[0].reload()
self.users[1].reload()
assert self.users[0].following == []
assert self.users[1].follower == []
def test_block_user_api(self):
# test block
self.login(0)
assert self.users[0].blocked_user_id == []
url = url_for('api.blockuserapi', id=self.users[1].id)
rv = self.client.put(url, buffered=True)
assert rv.status_code == 202
assert rv.json['status'] == OK
self.users[0].reload()
assert self.users[0].blocked_user_id == [unicode(self.users[1].id)]
# test unblock
url = url_for('api.blockuserapi', id=self.users[1].id)
rv = self.client.delete(url, buffered=True)
assert rv.status_code == 204
self.users[0].reload()
assert self.users[0].following == []
|
matrixorz/firefly
|
tests/test_user.py
|
Python
|
mit
| 3,213
|
[
"Firefly"
] |
a95f66203808fb830efbd0aa26e5cb9b87bf1492cd286a4b03529a62489d030c
|
"""
generate toy datasets
based on code by Mark Rogers
"""
from math import sin, pi
from random import gauss
import numpy
import datafunc
##
# Class generators:
##
def sineClass(xlim=[0,1], ylim=[0,1], n=20, sigma = 0.04) :
"""
Generates a 2-D noisy sine wave
Parameters:
xlim - list of length 2 that delimits the x value range
ylim - list of length 2 that delimits the y value range
n - number of data points
Note: for use with PyML demo2d, only use x and y values
between -1 and 1
"""
minx = min(xlim)
dx = float(max(xlim)-minx)/n
yrange = max(ylim)-min(ylim)
miny = min(ylim)
gamma = float(yrange)/2.0
X = []
for i in xrange(n) :
xval = i*dx
newx = minx + xval + gauss(0,sigma)
newy = miny + gamma*sin(xval*pi*2) + gauss(0,sigma)
X.append([newx, newy])
return X
def multivariate_normal(mu, sigma=0.1, n=20) :
"""
a wrapper around numpy's random.multivariate_normal function
Generates data from a Gaussian distribution with mean mu
and standard deviation sigma
Parameters:
mu - mean
sigma - variance (either a float, list or square matrix)
n - number of points to generate
Note: for use with PyML demo2d, only use mu1 and mu2
values that keep populations between -1 and 1
"""
dim = len(mu)
if type(sigma) == type(1.0) or type(sigma) == type(1) :
sigma = numpy.diag([sigma] * dim)
else :
sigma = numpy.array(sigma)
if sigma.ndim == 1 :
sigma = numpy.diag(sigma)
else :
assert sigma.shape[0] == sigma.shape[1]
return numpy.random.multivariate_normal(mu, sigma, n)
def gaussianData(mu, sigma, n) :
numClasses = len(mu)
if len(sigma) == 1 :
sigma = [sigma for i in range(numClasses)]
if len(n) == 1 :
n = [n for i in range(numClasses)]
Y = []
for i in range(numClasses) :
Y.extend([str(i) for j in range(n[i])])
X = []
for i in range(numClasses) :
print mu[i], sigma[i], n[i]
X.extend(multivariate_normal(mu[i], sigma[i], n[i]).tolist())
return datafunc.VectorDataSet(X, L = Y)
def noisyData() :
"""
Creates two populations, usually linearly-separable, but with
vastly different variance. Simulates a problem where one
population has significantly more noise than another. Data are
output in a CSV format suitable for creating a PyML VectorDataSet
(labelsColumn=1).
"""
pid = 0
for label in [-1,1] :
if label < 0 :
X,Y = gaussCloud(-0.5, 0.0, sigma=0.05, n=20)
else :
X,Y = gaussCloud(0.3, 0.0, sigma=0.25, n=20)
for i in xrange(len(X)) :
pid += 1
print "%(p)d,%(l)d,%(x)f,%(y)f" % {'p':pid, 'l':label, 'x':X[i], 'y':Y[i]}
def sineData(n = 30) :
"""
Uses sine-wave populations to create two class populations that
meander close to each other. Data are output in a CSV format
suitable for creating a PyML VectorDataSet (labelsColumn=1).
"""
pid = 0
lim = 0.8
X = []
Y = []
for label in [-1,1] :
if label > 0 :
X.extend(sineClass([-lim,lim], [0, 0.6], n))
else :
X.extend(sineClass([-lim,lim], [-0.4, 0.2], n))
Y.extend([str(label) for i in range(n)])
return datafunc.VectorDataSet(X, L = Y)
def separableData() :
"""
Creates two linearly-separable populations, one centered
at (-.5,0) and the other at (0.5,0). Data are output in
a CSV format suitable for creating a PyML VectorDataSet
(labelsColumn=1).
"""
pid = 0
for label in [-1,1] :
if label < 0 :
X,Y = gaussCloud(-0.5, 0.0, sigma=0.2, n=20)
else :
X,Y = gaussCloud(0.5, 0.0, sigma=0.2, n=20)
for i in xrange(len(X)) :
pid += 1
print "%(p)d,%(l)d,%(x)f,%(y)f" % {'p':pid, 'l':label, 'x':X[i], 'y':Y[i]}
## Main:
USAGE = """
Usage: python generate.py type
Where 'type' is one of:
l - two similar, linearly-separable populations
n - two linearly-separable populations, one with more
noise than the other
s - two populations generated by sine waves (with some noise)
"""
if __name__ == '__main__' :
import sys
if len(sys.argv) != 2 :
print USAGE
sys.exit(1)
type = sys.argv[1]
if type == 'l' :
separableData()
elif type == 'n' :
noisyData()
elif type == 's' :
curvyData()
else :
print "Unrecognized data generation type:", type
|
cathywu/Sentiment-Analysis
|
PyML-0.7.9/PyML/datagen/toydata.py
|
Python
|
gpl-2.0
| 4,824
|
[
"Gaussian"
] |
e4ccf4ec48044ea2a372c99f4a958838b8cfb2f415c81b5ae0974ebb80c7d361
|
../../../../../../../share/pyshared/orca/scripts/apps/pidgin/__init__.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/pidgin/__init__.py
|
Python
|
gpl-3.0
| 72
|
[
"ORCA"
] |
967fa4d0fdcc728035cbef4a341b9a7491e90c09eeeae824a8a932799730e14a
|
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2020, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
#
from collections import OrderedDict
import torch
import torchvision.models
import torchvision.models.resnet
from torch import nn as nn
import nupic.research
import nupic.research.frameworks.pytorch.models.resnets
from nupic.research.frameworks.pytorch.restore_utils import load_state_from_checkpoint
from nupic.torch.compatibility import upgrade_to_masked_sparseweights
def init_resnet50_batch_norm(model):
"""
Initialize ResNet50 batch norm modules
See https://arxiv.org/pdf/1706.02677.pdf
:param model: Resnet 50 model
"""
for m in model.modules():
if isinstance(m, torchvision.models.resnet.BasicBlock):
# initialized the last BatchNorm in each BasicBlock to 0
m.bn2.weight = nn.Parameter(torch.zeros_like(m.bn2.weight))
elif isinstance(m, torchvision.models.resnet.Bottleneck):
# initialized the last BatchNorm in each Bottleneck to 0
m.bn3.weight = nn.Parameter(torch.zeros_like(m.bn3.weight))
elif isinstance(m, (
nupic.research.frameworks.pytorch.models.resnets.BasicBlock,
nupic.research.frameworks.pytorch.models.resnets.Bottleneck
)):
# initialized the last BatchNorm in each BasicBlock to 0
*_, last_bn = filter(lambda x: isinstance(x, nn.BatchNorm2d),
m.regular_path)
last_bn.weight.data.zero_()
elif isinstance(m, nn.Linear):
# initialized linear layers weights from a gaussian distribution
m.weight.data.normal_(0, 0.01)
def get_compatible_state_dict(state_dict, model):
"""
Make sure checkpoint is compatible with model
"""
# Copy metadata attribute inserted into the OrderedDict by pytorch serializer
metadata = getattr(state_dict, "_metadata", None)
state_dict = upgrade_to_masked_sparseweights(state_dict)
if model.state_dict().keys() != state_dict.keys():
state_dict = OrderedDict(
zip(model.state_dict().keys(), state_dict.values()))
# Restore pytorch serializer metadata
if metadata is not None:
state_dict._metadata = metadata
return state_dict
def create_model(model_class, model_args, init_batch_norm, device=None,
checkpoint_file=None, load_checkpoint_args=None):
"""
Create imagenet experiment model with option to load state from checkpoint
:param model_class:
The model class. Must inherit from torch.nn.Module
:param model_args:
The model constructor arguments
:param init_batch_norm:
Whether or not to initialize batch norm modules
:param device:
Model device
:param checkpoint_file:
Optional checkpoint file to load model state
:param load_checkpoint_args:
Additional args for load_state_from_checkpoint
:return: Configured model
"""
model = model_class(**model_args)
if init_batch_norm:
init_resnet50_batch_norm(model)
if device is not None:
model.to(device)
if checkpoint_file is not None:
restore_checkpoint(model, checkpoint_file, load_checkpoint_args, device)
return model
def restore_checkpoint(model, checkpoint_file, load_checkpoint_args, device=None):
"""Load model parameters from checkpoint"""
load_ckpt_args = load_checkpoint_args or {}
load_ckpt_args.setdefault("state_dict_transform", get_compatible_state_dict)
load_state_from_checkpoint(model, checkpoint_file, device, **load_ckpt_args)
|
mrcslws/nupic.research
|
src/nupic/research/frameworks/vernon/network_utils.py
|
Python
|
agpl-3.0
| 4,364
|
[
"Gaussian"
] |
381fa8aad53f337325f714fdb9e7c9a0f55e33e92429e9892dbc40345f35d0d7
|
"""
Executes a set of implementations as a program.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
from zeroinstall import _
import os, sys
from logging import info, debug
from string import Template
from zeroinstall.injector.model import SafeException, EnvironmentBinding, ExecutableBinding, Command, Dependency
from zeroinstall.injector import namespaces, qdom
from zeroinstall.support import basedir
def do_env_binding(binding, path):
"""Update this process's environment by applying the binding.
@param binding: the binding to apply
@type binding: L{model.EnvironmentBinding}
@param path: the selected implementation
@type path: str"""
if binding.insert is not None and path is None:
# Skip insert bindings for package implementations
debug("not setting %s as we selected a package implementation", binding.name)
return
os.environ[binding.name] = binding.get_value(path,
os.environ.get(binding.name, None))
info("%s=%s", binding.name, os.environ[binding.name])
def test_selections(selections, prog_args, dry_run, main):
"""Run the program in a child process, collecting stdout and stderr.
@return: the output produced by the process
@since: 0.27
"""
import tempfile
output = tempfile.TemporaryFile(prefix = '0launch-test')
try:
child = os.fork()
if child == 0:
# We are the child
try:
try:
os.dup2(output.fileno(), 1)
os.dup2(output.fileno(), 2)
execute_selections(selections, prog_args, dry_run, main)
except:
import traceback
traceback.print_exc()
finally:
sys.stdout.flush()
sys.stderr.flush()
os._exit(1)
info(_("Waiting for test process to finish..."))
pid, status = os.waitpid(child, 0)
assert pid == child
output.seek(0)
results = output.read()
if status != 0:
results += _("Error from child process: exit code = %d") % status
finally:
output.close()
return results
def _process_args(args, element):
"""Append each <arg> under <element> to args, performing $-expansion."""
for child in element.childNodes:
if child.uri == namespaces.XMLNS_IFACE and child.name == 'arg':
args.append(Template(child.content).substitute(os.environ))
class Setup(object):
"""@since: 1.2"""
stores = None
selections = None
_exec_bindings = None
_checked_runenv = False
def __init__(self, stores, selections):
"""@param stores: where to find cached implementations
@type stores: L{zerostore.Stores}"""
self.stores = stores
self.selections = selections
def build_command(self, command_iface, command_name, user_command = None):
"""Create a list of strings to be passed to exec to run the <command>s in the selections.
@param command_iface: the interface of the program being run
@type command_iface: str
@param command_name: the name of the command being run
@type command_name: str
@param user_command: a custom command to use instead
@type user_command: L{model.Command}
@return: the argument list
@rtype: [str]"""
assert command_name or user_command
prog_args = []
sels = self.selections.selections
while command_name or user_command:
command_sel = sels[command_iface]
if user_command is None:
command = command_sel.get_command(command_name)
else:
command = user_command
user_command = None
command_args = []
# Add extra arguments for runner
runner = command.get_runner()
if runner:
command_iface = runner.interface
command_name = runner.command
_process_args(command_args, runner.qdom)
else:
command_iface = None
command_name = None
# Add main program path
command_path = command.path
if command_path is not None:
if command_sel.id.startswith('package:'):
prog_path = command_path
else:
if command_path.startswith('/'):
raise SafeException(_("Command path must be relative, but '%s' starts with '/'!") %
command_path)
prog_path = os.path.join(command_sel.get_path(self.stores), command_path)
assert prog_path is not None
if not os.path.exists(prog_path):
raise SafeException(_("File '%(program_path)s' does not exist.\n"
"(implementation '%(implementation_id)s' + program '%(main)s')") %
{'program_path': prog_path, 'implementation_id': command_sel.id,
'main': command_path})
command_args.append(prog_path)
# Add extra arguments for program
_process_args(command_args, command.qdom)
prog_args = command_args + prog_args
# Each command is run by the next, but the last one is run by exec, and we
# need a path for that.
if command.path is None:
raise SafeException("Missing 'path' attribute on <command>")
return prog_args
def prepare_env(self):
"""Do all the environment bindings in the selections (setting os.environ)."""
self._exec_bindings = []
def _do_bindings(impl, bindings, iface):
for b in bindings:
self.do_binding(impl, b, iface)
def _do_deps(deps):
for dep in deps:
dep_impl = sels.get(dep.interface, None)
if dep_impl is None:
assert dep.importance != Dependency.Essential, dep
else:
_do_bindings(dep_impl, dep.bindings, dep.interface)
sels = self.selections.selections
for selection in sels.values():
_do_bindings(selection, selection.bindings, selection.interface)
_do_deps(selection.dependencies)
# Process commands' dependencies' bindings too
for command in selection.get_commands().values():
_do_bindings(selection, command.bindings, selection.interface)
_do_deps(command.requires)
# Do these after <environment>s, because they may do $-expansion
for binding, iface in self._exec_bindings:
self.do_exec_binding(binding, iface)
self._exec_bindings = None
def do_binding(self, impl, binding, iface):
"""Called by L{prepare_env} for each binding.
Sub-classes may wish to override this.
@param impl: the selected implementation
@type impl: L{selections.Selection}
@param binding: the binding to be processed
@type binding: L{model.Binding}
@param iface: the interface containing impl
@type iface: L{model.Interface}
"""
if isinstance(binding, EnvironmentBinding):
if impl.id.startswith('package:'):
path = None # (but still do the binding, e.g. for values)
else:
path = impl.get_path(self.stores)
do_env_binding(binding, path)
elif isinstance(binding, ExecutableBinding):
if isinstance(iface, Dependency):
import warnings
warnings.warn("Pass an interface URI instead", DeprecationWarning, 2)
iface = iface.interface
self._exec_bindings.append((binding, iface))
def do_exec_binding(self, binding, iface):
assert iface is not None
name = binding.name
if '/' in name or name.startswith('.') or "'" in name:
raise SafeException("Invalid <executable> name '%s'" % name)
exec_dir = basedir.save_cache_path(namespaces.config_site, namespaces.config_prog, 'executables', name)
exec_path = os.path.join(exec_dir, name)
if not self._checked_runenv:
self._check_runenv()
if not os.path.exists(exec_path):
# Symlink ~/.cache/0install.net/injector/executables/$name/$name to runenv.py
os.symlink('../../runenv.py', exec_path)
os.chmod(exec_dir, 0o500)
if binding.in_path:
path = os.environ["PATH"] = exec_dir + os.pathsep + os.environ["PATH"]
info("PATH=%s", path)
else:
os.environ[name] = exec_path
info("%s=%s", name, exec_path)
import json
args = self.build_command(iface, binding.command)
os.environ["0install-runenv-" + name] = json.dumps(args)
def _check_runenv(self):
# Create the runenv.py helper script under ~/.cache if missing or out-of-date
main_dir = basedir.save_cache_path(namespaces.config_site, namespaces.config_prog)
runenv = os.path.join(main_dir, 'runenv.py')
expected_contents = "#!%s\nfrom zeroinstall.injector import _runenv; _runenv.main()\n" % sys.executable
actual_contents = None
if os.path.exists(runenv):
with open(runenv) as s:
actual_contents = s.read()
if actual_contents != expected_contents:
import tempfile
tmp = tempfile.NamedTemporaryFile('w', dir = main_dir, delete = False)
info("Updating %s", runenv)
tmp.write(expected_contents)
tmp.close()
os.chmod(tmp.name, 0555)
os.rename(tmp.name, runenv)
self._checked_runenv = True
def execute_selections(selections, prog_args, dry_run = False, main = None, wrapper = None, stores = None):
"""Execute program. On success, doesn't return. On failure, raises an Exception.
Returns normally only for a successful dry run.
@param selections: the selected versions
@type selections: L{selections.Selections}
@param prog_args: arguments to pass to the program
@type prog_args: [str]
@param dry_run: if True, just print a message about what would have happened
@type dry_run: bool
@param main: the name of the binary to run, or None to use the default
@type main: str
@param wrapper: a command to use to actually run the binary, or None to run the binary directly
@type wrapper: str
@since: 0.27
@precondition: All implementations are in the cache.
"""
#assert stores is not None
if stores is None:
from zeroinstall import zerostore
stores = zerostore.Stores()
setup = Setup(stores, selections)
commands = selections.commands
if main is not None:
# Replace first command with user's input
if main.startswith('/'):
main = main[1:] # User specified a path relative to the package root
else:
old_path = commands[0].path
assert old_path, "Can't use a relative replacement main when there is no original one!"
main = os.path.join(os.path.dirname(old_path), main) # User main is relative to command's name
# Copy all child nodes (e.g. <runner>) except for the arguments
user_command_element = qdom.Element(namespaces.XMLNS_IFACE, 'command', {'path': main})
if commands:
for child in commands[0].qdom.childNodes:
if child.uri == namespaces.XMLNS_IFACE and child.name == 'arg':
continue
user_command_element.childNodes.append(child)
user_command = Command(user_command_element, None)
else:
user_command = None
setup.prepare_env()
prog_args = setup.build_command(selections.interface, selections.command, user_command) + prog_args
if wrapper:
prog_args = ['/bin/sh', '-c', wrapper + ' "$@"', '-'] + list(prog_args)
if dry_run:
print(_("Would execute: %s") % ' '.join(prog_args))
else:
info(_("Executing: %s"), prog_args)
sys.stdout.flush()
sys.stderr.flush()
try:
os.execv(prog_args[0], prog_args)
except OSError as ex:
raise SafeException(_("Failed to run '%(program_path)s': %(exception)s") % {'program_path': prog_args[0], 'exception': str(ex)})
|
dabrahams/zeroinstall
|
zeroinstall/injector/run.py
|
Python
|
lgpl-2.1
| 10,692
|
[
"VisIt"
] |
cc31877c03f259509260ed32d3042cc521eb72b6259d5386b2469a18d696faaf
|
#!/usr/bin/python
from stemming.porter2 import stem
import DictionaryServices, re, operator
#ignore_list = set(['the', 'noun', 'adjective', 'the', 'for', 'not', 'when', 'shall', 'will', 'and', 'with', 'brit', 'informal', 'but', 'that', 'from', 'verb', 'their', 'derivatives', 'adverb', 'late', 'latin', 'origin'])
start = "love"
max_depth = 100
min_word_len = 3
most_freq_cutoff = 100
visited = set([])
freqs = dict()
stems = dict()
unigram_freqs = dict()
def visit(word, depth):
if depth > max_depth:
return
if word in visited:
return
word_stem = stem(word)
visited.add(word)
if not word in freqs:
freqs[word] = dict()
stems[word] = dict()
text = DictionaryServices.DCSCopyTextDefinition(None, word, (0, len(word)))
if not text or len(text) == 0:
return
# We don't care about any of the origin/etymology data, so remove it
text = text.split('ORIGIN')[0]
# Remove any punctuation, weird characters, etc.
filtered_text = re.sub(r'[\W\d]+', ' ', text).lower()
words = filtered_text.split()
for w in words:
w_stem = stem(w)
if w != word and len(w) >= min_word_len and w_stem != word_stem:
if not w in freqs:
freqs[w] = dict()
stems[w] = dict()
if w_stem not in stems[word]:
freqs[word][w] = 1 if w not in freqs[word] else freqs[word][w] + 1
stems[word][w_stem] = w
else:
same_stem = stems[word][w_stem]
freqs[word][same_stem] = freqs[word][same_stem] + 1
if word_stem not in stems[w]:
freqs[w][word] = 1 if word not in freqs[w] else freqs[w][word] + 1
stems[w][word_stem] = word
else:
same_stem = stems[w][word_stem]
freqs[w][same_stem] = freqs[w][same_stem] + 1
unigram_freqs[w] = 1 if w not in unigram_freqs else unigram_freqs[w] + 1
visit(w, depth + 1)
visit(start, 1)
# Find the words that appeared most frequently. They're probably shit.
ordered_by_freq = map(lambda x: x[0], sorted(unigram_freqs.iteritems(), key=operator.itemgetter(1), reverse=True))
shit_words = ordered_by_freq[0:most_freq_cutoff]
ordered_by_freq = ordered_by_freq[most_freq_cutoff:]
# Remove shit words
for shit_word in shit_words:
del freqs[shit_word]
for w in freqs:
if shit_word in freqs[w]:
del freqs[w][shit_word]
# We only care about words that appear reasonably often... Say the 80%
words_to_use = ordered_by_freq[0:int(len(ordered_by_freq)*0.8)]
sorted_words = sorted(words_to_use)
for word in sorted_words:
if len(freqs[word]) >= 5:
x = sorted(freqs[word].iteritems(), key=operator.itemgetter(1), reverse=True)
sorted_matches = map(lambda x: x[0], x)
print word + ': ' + ', '.join(sorted_matches[0:6])
|
jbowens/taboo
|
wordgen/osx-dict/explore.py
|
Python
|
mit
| 2,913
|
[
"VisIt"
] |
1637311aff571844d88443dab0d81f4b5bfe2bc2d1682d323cdced5c23b8de8c
|
########################################################################
# File : ModuleFactory.py
# Author : Stuart Paterson
########################################################################
""" The Module Factory instantiates a given Module based on a given input
string and set of arguments to be passed. This allows for VO specific
module utilities to be used in various contexts.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR, gLogger
class ModuleFactory(object):
#############################################################################
def __init__(self):
"""Standard constructor"""
self.log = gLogger
#############################################################################
def getModule(self, importString, argumentsDict):
"""This method returns the Module instance given the import string and
arguments dictionary.
"""
try:
moduleName = importString.split(".")[-1]
modulePath = importString.replace(".%s" % (moduleName), "")
importModule = __import__("%s.%s" % (modulePath, moduleName), globals(), locals(), [moduleName])
except Exception as x:
msg = "ModuleFactory could not import %s.%s" % (modulePath, moduleName)
self.log.warn(x)
self.log.warn(msg)
return S_ERROR(msg)
try:
# FIXME: should we use imp module?
moduleStr = "importModule.%s(argumentsDict)" % (moduleName)
moduleInstance = eval(moduleStr)
except Exception as x:
msg = "ModuleFactory could not instantiate %s()" % (moduleName)
self.log.warn(x)
self.log.warn(msg)
return S_ERROR(msg)
return S_OK(moduleInstance)
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
ic-hep/DIRAC
|
src/DIRAC/Core/Utilities/ModuleFactory.py
|
Python
|
gpl-3.0
| 1,990
|
[
"DIRAC"
] |
2a60f92d4bf57fd47833301506c283ab11d8cbd8271fbaa248a777a8e52cfead
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Copyright (C) 2020 Stoq Tecnologia <http://www.stoq.com.br>
# All rights reserved
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., or visit: http://www.gnu.org/.
#
# Author(s): Stoq Team <dev@stoq.com.br>
#
import logging
import datetime
from uuid import UUID
from flask import abort, make_response, jsonify, request
from storm.expr import And, Join, LeftJoin, Ne, Or, Select
from storm.info import ClassAlias
from stoqlib.domain.fiscal import Invoice, CfopData
from stoqlib.domain.overrides import ProductBranchOverride
from stoqlib.domain.payment.card import CreditCardData, CreditProvider
from stoqlib.domain.payment.method import PaymentMethod
from stoqlib.domain.payment.payment import Payment
from stoqlib.domain.payment.group import PaymentGroup
from stoqlib.domain.person import (Branch, Company, Individual, LoginUser, SalesPerson,
Person, ClientCategory, Client)
from stoqlib.domain.product import Product
from stoqlib.domain.profile import UserProfile
from stoqlib.domain.system import TransactionEntry
from stoqlib.domain.sale import Sale, SaleItem
from stoqlib.domain.sellable import Sellable, SellableCategory
from stoqlib.domain.station import BranchStation
from stoqlib.domain.taxes import InvoiceItemIcms
from stoqlib.lib.configparser import get_config
from stoqlib.lib.formatters import raw_document
from stoqlib.lib.parameters import sysparam
from stoqserver.api.decorators import store_provider, b1food_login_required, info_logger
from stoqserver.lib.baseresource import BaseResource
log = logging.getLogger(__name__)
global b1food_token
# just a random token untill we have a domain to persist this.
def generate_b1food_token(size=128):
import string
import random
chars = string.ascii_uppercase + string.ascii_lowercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
b1food_token = generate_b1food_token()
def _check_required_params(data, required_params):
for param in required_params:
if param not in data:
message = 'Missing parameter \'%s\'' % param
log.error(message)
abort(400, message)
def _parse_request_list(request_list):
return_list = []
if request_list:
request_list = request_list.replace('[', '').replace(']', '')
return_list = request_list.split(',')
return return_list
def _check_if_uuid(branch_ids):
for branch_id in branch_ids:
try:
UUID(branch_id)
except ValueError:
message = 'os IDs das lojas devem ser do tipo UUID'
log.error(message)
abort(400, message)
def _get_category_info(sellable):
return {
'idGrupo': sellable.category and sellable.category.id,
'codigo': sellable.category and sellable.category.id,
'descricao': sellable.category and sellable.category.description,
'idGrupoPai': sellable.category and sellable.category.category_id,
'dataAlteracao': sellable.category and
sellable.category.te.te_server.strftime('%Y-%m-%d %H:%M:%S -0300'),
'ativo': True,
}
def _get_network_info():
# We do not have this infos on database, so until we have them we get from config
config = get_config()
return {
'id': config.get('B1Food', 'network_id') or '',
'name': config.get('B1Food', 'network_name') or '',
}
def _get_client_category_code(client_category):
return client_category and client_category.id
def _get_sellable_code(sellable):
# FIXME: it must be just code
# return sellable.code or sellable.barcode
return sellable.code
def _get_station_code(station):
return station.id
def _get_station_nickname(station):
return station.name
def _get_person_names(person):
name = person.name
if ' ' in name:
firstname, lastname = name.split(' ', maxsplit=1)
else:
firstname = name
lastname = '.'
return {
'primeiroNome': firstname,
'segundoNome': None,
'sobrenome': lastname,
'apelido': firstname,
}
card_type_map = {
'credit': 'CREDITO',
'debit': 'DEBITO'
}
# FIXME: Include the values for the following providers:
# PGTO ONLINE RIOMAR
# TRANSFERENCIA PICPAY
# TRANSFERENCIA PIX
card_name_map = {
'CREDITO IFOOD': 'PGTO ONLINE IFOOD',
'CREDITO RAPPI': 'PGTO ONLINE ZAPPAY'
}
method_name_map = {
'bill': 'BOLETO',
'check': 'CHEQUE',
'credit': 'CREDITO',
'deposit': 'DEPOSITO',
'money': 'DINHEIRO',
'multiple': 'MULTIPLO',
'online': 'ONLINE',
'store_credit': 'CARTAO DA LOJA',
'trade': 'TROCA'
}
def _get_card_name(card_type, provider_short_name):
card_name = ' '.join([card_type_map[card_type], provider_short_name]).upper()
return card_name_map.get(card_name, card_name)
def _get_card_description(card_type, provider_short_name):
return _get_card_name(card_type, provider_short_name)
def _get_payment_method_name(payment_method_name):
return method_name_map[payment_method_name]
def _get_payment_method_code(payment_method):
return payment_method.id
def _get_payment_method_description(payment_method):
return _get_payment_method_name(payment_method.method_name)
def _get_payment_method_with_provider_code(card_type, provider):
return (card_type + '_' + provider.short_name.replace(" ", "")).lower()
def _get_credit_provider_description(credit_provider):
return credit_provider.short_name
def _get_payments_info(payments_list, login_user, sale):
payments = []
for payment in payments_list:
if payment.method.method_name == 'card':
card_type = payment.card_data.card_type
provider = payment.card_data.provider
payments.append({
'id': _get_payment_method_with_provider_code(card_type, provider),
'codigo': _get_payment_method_with_provider_code(card_type, provider),
'nome': _get_card_name(payment.card_data.card_type,
payment.card_data.provider.short_name),
'descricao': _get_card_description(payment.card_data.card_type,
payment.card_data.provider.short_name),
'valor': float(payment.base_value or 0),
'troco': float(payment.base_value - payment.value),
'valorRecebido': float(payment.value or 0),
'idAtendente': login_user.id,
'codAtendente': login_user.username,
'nomeAtendente': sale.salesperson.person.name,
})
continue
payments.append({
'id': payment.method.id,
'codigo': _get_payment_method_code(payment.method),
'nome': _get_payment_method_name(payment.method.method_name),
'descricao': _get_payment_method_description(payment.method),
'valor': float(payment.base_value or 0),
'troco': float(payment.base_value - payment.value),
'valorRecebido': float(payment.value or 0),
'idAtendente': login_user.id,
'codAtendente': login_user.username,
'nomeAtendente': sale.salesperson.person.name,
})
return payments
class B1foodLoginResource(BaseResource):
method_decorators = [info_logger]
routes = ['/b1food/oauth/authenticate']
def get(self):
data = request.args
if 'client_id' not in data:
abort(400, 'Missing client_id')
client_id = data['client_id']
config = get_config()
config_client_id = config.get("B1Food", "client_id") or ""
access_token = config.get("B1Food", "access_token") or ""
if client_id != config_client_id and config_client_id != "":
log.error('Login failed for client_id %s', client_id)
abort(403, 'Login failed for client_id {}'.format(client_id))
return make_response(jsonify({
'token_type': 'Bearer',
'expires_in': -1,
'access_token': access_token
}), 200)
class IncomeCenterResource(BaseResource):
method_decorators = [b1food_login_required, info_logger]
routes = ['/b1food/terceiros/restful/centrosrenda']
def get(self):
return []
class B1FoodSaleItemResource(BaseResource):
method_decorators = [b1food_login_required, store_provider, info_logger]
routes = ['/b1food/terceiros/restful/itemvenda']
def get(self, store):
data = request.args
required_params = ['dtinicio', 'dtfim']
_check_required_params(data, required_params)
initial_date = datetime.datetime.strptime(data['dtinicio'], '%Y-%m-%d')
end_date = datetime.datetime.combine(datetime.datetime.strptime(
data['dtfim'], '%Y-%m-%d').date(), datetime.time.max)
request_branches = data.get('lojas')
request_documents = data.get('consumidores')
request_invoice_keys = data.get('operacaocupom')
branch_ids = _parse_request_list(request_branches)
_check_if_uuid(branch_ids)
documents = _parse_request_list(request_documents)
invoice_keys = _parse_request_list(request_invoice_keys)
if data.get('usarDtMov') and data.get('usarDtMov') == '1':
clauses = [Sale.confirm_date >= initial_date, Sale.confirm_date <= end_date]
else:
clauses = [Sale.open_date >= initial_date, Sale.open_date <= end_date]
ClientPerson = ClassAlias(Person, 'person_client')
ClientIndividual = ClassAlias(Individual, 'individual_client')
ClientCompany = ClassAlias(Company, 'company_client')
SalesPersonPerson = ClassAlias(Person, 'person_sales_person')
SalesPersonIndividual = ClassAlias(Individual, 'individual_sales_person')
tables = [
Sale,
Join(Branch, Sale.branch_id == Branch.id),
LeftJoin(Client, Client.id == Sale.client_id),
LeftJoin(Person, Person.id == Client.person_id),
Join(BranchStation, Sale.station_id == BranchStation.id),
LeftJoin(ClientPerson, Client.person_id == ClientPerson.id),
LeftJoin(ClientIndividual, Client.person_id == ClientIndividual.person_id),
LeftJoin(Individual, Client.person_id == Individual.person_id),
LeftJoin(ClientCompany, Client.person_id == ClientCompany.person_id),
LeftJoin(Company, Client.person_id == Company.person_id),
LeftJoin(SalesPerson, SalesPerson.id == Sale.salesperson_id),
LeftJoin(SalesPersonPerson, SalesPerson.person_id == SalesPersonPerson.id),
LeftJoin(SalesPersonIndividual,
SalesPerson.person_id == SalesPersonIndividual.person_id),
Join(LoginUser, LoginUser.person_id == SalesPerson.person_id),
Join(Invoice, Sale.invoice_id == Invoice.id),
]
sale_item_tables = [
SaleItem,
Join(Sellable, SaleItem.sellable_id == Sellable.id),
Join(Product, SaleItem.sellable_id == Product.id),
LeftJoin(SellableCategory, Sellable.category_id == SellableCategory.id),
LeftJoin(TransactionEntry, SellableCategory.te_id == TransactionEntry.id)
]
sale_objs = (Sale, ClientCompany, ClientIndividual, LoginUser, Branch, BranchStation,
Client, ClientPerson, SalesPerson, SalesPersonPerson)
sale_items_objs = (SaleItem, Sellable, SellableCategory, Product, TransactionEntry)
if len(branch_ids) > 0:
clauses.append(Branch.id.is_in(branch_ids))
if len(documents) > 0:
clauses.append(Or(Individual.cpf.is_in(documents), Company.cnpj.is_in(documents)))
if len(invoice_keys) > 0:
clauses.append(Invoice.key.is_in(invoice_keys))
if data.get('cancelados') and data.get('cancelados') == '0':
clauses.append(Sale.status != Sale.STATUS_CANCELLED)
if data.get('cancelados') and data.get('cancelados') == '1':
clauses.append(Sale.status == Sale.STATUS_CANCELLED)
data = list(store.using(*tables).find(sale_objs, And(*clauses)))
sale_ids = [i[0].id for i in data]
sale_items = list(store.using(*sale_item_tables).find(sale_items_objs,
SaleItem.sale_id.is_in(sale_ids)))
sales = {}
for item in sale_items:
sales.setdefault(item[0].sale_id, [])
sales[item[0].sale_id].append(item[0])
response = []
for row in data:
sale, company, individual, login_user = row[:4]
for item in sales[sale.id]:
discount = item.item_discount
sellable = item.sellable
station = sale.station
salesperson = sale.salesperson
cpf = individual and individual.cpf
cnpj = company and company.cnpj
document = cpf or cnpj or ''
if cpf:
document_type = 'CPF'
elif cnpj:
document_type = 'CNPJ'
else:
document_type = ''
network = _get_network_info()
res_item = {
'idItemVenda': item.id,
'valorUnitario': float(item.base_price),
'valorBruto': float(item.base_price * item.quantity),
'valorUnitarioLiquido': float(item.price),
'valorLiquido': float(item.price * item.quantity),
'idOrigem': None,
'codOrigem': None,
'desconto': float(discount),
'acrescimo': 0,
'maquinaId': station.id,
'nomeMaquina': station.name,
'maquinaCod': _get_station_code(station),
'quantidade': float(item.quantity),
'redeId': network['id'],
'lojaId': sale.branch.id,
'idMaterial': sellable.id,
'codMaterial': _get_sellable_code(sellable),
'descricao': sellable.description,
'grupo': _get_category_info(sellable),
'operacaoId': sale.id,
'atendenteId': login_user.id,
'atendenteCod': login_user.username,
'atendenteNome': salesperson.person.name,
'isTaxa': False,
'isRepique': False,
'isGorjeta': False,
'isEntrega': False, # FIXME maybe should be true if external order
'consumidores': [{
'documento': raw_document(document),
'tipo': document_type
}],
'cancelado': sale.status == Sale.STATUS_CANCELLED,
'dtLancamento': sale.confirm_date.strftime('%Y-%m-%d'),
'horaLancamento': sale.confirm_date.strftime('%H:%M'),
'tipoDescontoId': sale.client_category and sale.client_category.id,
'tipoDescontoCod': _get_client_category_code(sale.client_category),
'tipoDescontoNome': sale.client_category and sale.client_category.name,
}
response.append(res_item)
return response
class B1FoodSellableResource(BaseResource):
method_decorators = [b1food_login_required, store_provider, info_logger]
routes = ['/b1food/terceiros/restful/material']
def _get_response_item(self, sellable, branch_id):
res_item = {
'idMaterial': sellable.id,
'codigo': _get_sellable_code(sellable),
'descricao': sellable.description,
'unidade': sellable.unit and sellable.unit.description,
'dataAlteracao': sellable.te.te_server.strftime('%Y-%m-%d %H:%M:%S -0300'),
'ativo': sellable.status == Sellable.STATUS_AVAILABLE,
'redeId': self.network['id'],
'lojaId': branch_id,
'isTaxa': False,
'isRepique': False,
'isGorjeta': False,
'isEntrega': False,
'grupo': _get_category_info(sellable)
}
return res_item
def get(self, store):
data = request.args
request_available = data.get('ativo')
request_branches = data.get('lojas')
branch_ids = _parse_request_list(request_branches)
database_branches = store.find(Branch)
database_branch_ids = [branch.id for branch in database_branches]
not_found_ids = [branch_id for branch_id in branch_ids
if branch_id not in database_branch_ids]
if not_found_ids:
message = 'Branch(es) %s not found' % not_found_ids
log.error(message)
abort(404, message)
delivery = sysparam.get_object(store, 'DELIVERY_SERVICE')
if request_available == '1':
sellables = Sellable.get_available_sellables(store)
elif request_available == '0':
# We want to exclude not available sellables and the ones
# that are not products, example "Entrega"
sellables = store.find(Sellable, And(Ne(Sellable.id, delivery.sellable.id),
Ne(Sellable.description, 'Entrega'),
Ne(Sellable.status, Sellable.STATUS_AVAILABLE)))
else:
sellables = store.find(Sellable, And(Ne(Sellable.id, delivery.sellable.id),
Ne(Sellable.description, 'Entrega')))
self.network = _get_network_info()
response = []
if not branch_ids:
for sellable in sellables:
res_item = self._get_response_item(sellable, branch_id=None)
response.append(res_item)
return response
query = ProductBranchOverride.branch_id.is_in(branch_ids)
pbos = store.find(ProductBranchOverride, query)
branch_sellable_ids = {}
for pbo in pbos:
branch_sellable_ids.setdefault(pbo.branch_id, [])
branch_sellable_ids[pbo.branch_id].append(pbo.product_id)
for branch_id in branch_ids:
if not branch_sellable_ids.get(branch_id):
branch_sellables = sellables
else:
sellable_ids = branch_sellable_ids[branch_id]
branch_sellables = [sellable for sellable in sellables
if sellable.id in sellable_ids]
for sellable in branch_sellables:
res_item = self._get_response_item(sellable, branch_id=branch_id)
response.append(res_item)
return response
class B1FoodPaymentsResource(BaseResource):
method_decorators = [b1food_login_required, store_provider, info_logger]
routes = ['/b1food/terceiros/restful/movimentocaixa']
def _get_payments_sum(self, payments):
# FIXME We opted to not use sale.get_total_paid() to prevent extra queries
# We reimplemented this private method without out payments, purchase and
# renegotiation. For now we raises exceptions hopping that our client do
# not use those features
out_payments = [p for p in payments if p.payment_type == Payment.TYPE_OUT]
if len(out_payments) > 0:
raise Exception("Inconsistent database, please contact support.")
in_payments = [p for p in payments if p.payment_type == Payment.TYPE_IN]
return sum([payment.value for payment in in_payments])
def get(self, store):
data = request.args
required_params = ['dtinicio', 'dtfim']
_check_required_params(data, required_params)
initial_date = datetime.datetime.strptime(data['dtinicio'], '%Y-%m-%d')
end_date = datetime.datetime.combine(datetime.datetime.strptime(
data['dtfim'], '%Y-%m-%d').date(), datetime.time.max)
request_branches = data.get('lojas')
request_documents = data.get('consumidores')
request_invoice_keys = data.get('operacaocupom')
branch_ids = _parse_request_list(request_branches)
_check_if_uuid(branch_ids)
documents = _parse_request_list(request_documents)
invoice_keys = _parse_request_list(request_invoice_keys)
clauses = [Sale.confirm_date >= initial_date, Sale.confirm_date <= end_date]
ClientPerson = ClassAlias(Person, 'person_client')
ClientIndividual = ClassAlias(Individual, 'individual_client')
ClientCompany = ClassAlias(Company, 'company_client')
SalesPersonPerson = ClassAlias(Person, 'person_sales_person')
SalesPersonIndividual = ClassAlias(Individual, 'individual_sales_person')
tables = [
Sale,
Join(Branch, Sale.branch_id == Branch.id),
LeftJoin(Client, Client.id == Sale.client_id),
LeftJoin(Person, Person.id == Client.person_id),
Join(BranchStation, Sale.station_id == BranchStation.id),
LeftJoin(ClientPerson, Client.person_id == ClientPerson.id),
LeftJoin(ClientIndividual, Client.person_id == ClientIndividual.person_id),
LeftJoin(Individual, Client.person_id == Individual.person_id),
LeftJoin(ClientCompany, Client.person_id == ClientCompany.person_id),
LeftJoin(Company, Client.person_id == Company.person_id),
LeftJoin(SalesPerson, SalesPerson.id == Sale.salesperson_id),
LeftJoin(SalesPersonPerson, SalesPerson.person_id == SalesPersonPerson.id),
LeftJoin(SalesPersonIndividual,
SalesPerson.person_id == SalesPersonIndividual.person_id),
Join(LoginUser, LoginUser.person_id == SalesPerson.person_id),
Join(PaymentGroup, PaymentGroup.id == Sale.group_id),
Join(Invoice, Sale.invoice_id == Invoice.id),
]
payment_tables = [
Payment,
Join(PaymentMethod, Payment.method_id == PaymentMethod.id),
Join(PaymentGroup, Payment.group_id == PaymentGroup.id),
]
sale_objs = (Sale, ClientCompany, ClientIndividual, LoginUser, Branch, PaymentGroup,
BranchStation, Client, ClientPerson, SalesPerson, SalesPersonPerson)
payment_objs = (Payment, PaymentMethod, PaymentGroup)
if len(branch_ids) > 0:
clauses.append(Branch.id.is_in(branch_ids))
if len(documents) > 0:
clauses.append(Or(Individual.cpf.is_in(documents), Company.cnpj.is_in(documents)))
if len(invoice_keys) > 0:
clauses.append(Invoice.key.is_in(invoice_keys))
if data.get('cancelados') and data.get('cancelados') == '0':
clauses.append(Sale.status != Sale.STATUS_CANCELLED)
if data.get('cancelados') and data.get('cancelados') == '1':
clauses.append(Sale.status == Sale.STATUS_CANCELLED)
data = list(store.using(*tables).find(sale_objs, And(*clauses)))
group_ids = [i[0].group_id for i in data]
payments_list = list(store.using(*payment_tables).find(payment_objs,
Payment.group_id.is_in(group_ids)))
sale_payments = {}
for payment in payments_list:
sale_payments.setdefault(payment[0].group_id, [])
sale_payments[payment[0].group_id].append(payment[0])
response = []
for row in data:
sale, company, individual, login_user, branch, group = row[:6]
cpf = individual and individual.cpf
cnpj = company and company.cnpj
document = cpf or cnpj or ''
if cpf:
document_type = 'CPF'
elif cnpj:
document_type = 'CNPJ'
else:
document_type = ''
network = _get_network_info()
payment_methods = _get_payments_info(sale_payments[sale.group_id], login_user, sale)
change = sum(payment['troco'] for payment in payment_methods)
res_item = {
'idMovimentoCaixa': sale.id,
'redeId': network['id'],
'rede': network['name'],
'lojaId': branch.id,
'loja': branch.name,
'hora': sale.confirm_date.strftime('%H'),
'cancelado': sale.status == Sale.STATUS_CANCELLED,
'idAtendente': login_user.id,
'codAtendente': login_user.username,
'nomeAtendente': sale.salesperson.person.name,
'vlDesconto': float(sale.discount_value),
'vlAcrescimo': float(sale.surcharge_value),
'vlTotalReceber': float(sale.total_amount),
'vlTotalRecebido': float(self._get_payments_sum(sale_payments[sale.group_id])),
'vlTrocoFormasPagto': change,
'vlServicoRecebido': 0,
'vlRepique': 0,
'vlTaxaEntrega': 0,
'numPessoas': 1,
'operacaoId': sale.id,
'maquinaId': sale.station.id,
'nomeMaquina': sale.station.name,
'maquinaCod': _get_station_code(sale.station),
'maquinaPortaFiscal': None,
'meiosPagamento': payment_methods,
'consumidores': [{
'documento': raw_document(document),
'tipo': document_type,
}],
# FIXME B1Food expect this date to be the same as the emission date
# we want the emission date of nfe_data for this field
# https://gitlab.com/stoqtech/private/stoq-plugin-nfe/-/issues/111
'dataContabil': sale.confirm_date.strftime('%Y-%m-%d %H:%M:%S -0300'),
'periodoId': None,
'periodoCod': None,
'periodoNome': None,
'centroRendaId': None,
'centroRendaCod': None,
'centroRendaNome': None,
}
response.append(res_item)
return response
class B1FoodPaymentMethodResource(BaseResource):
method_decorators = [b1food_login_required, store_provider, info_logger]
routes = ['/b1food/terceiros/restful/meio-pagamento']
def get(self, store):
data = request.args
request_is_active = data.get('ativo')
payment_methods = store.find(PaymentMethod)
card_payment_method = payment_methods.find(method_name='card').one()
if request_is_active == '1':
payment_methods = PaymentMethod.get_active_methods(store)
if request_is_active == '0':
payment_methods = payment_methods.find(is_active=False)
network = _get_network_info()
response = []
for payment_method in payment_methods:
# PaymentMethod 'card' will be replaced by its referring CreditProviders
if payment_method == card_payment_method:
continue
res_item = {
'ativo': payment_method.is_active,
'id': payment_method.id,
'codigo': _get_payment_method_code(payment_method),
'nome': _get_payment_method_name(payment_method.method_name),
'redeId': network['id'],
'lojaId': None
}
response.append(res_item)
select = Select((CreditCardData.card_type, CreditCardData.provider_id),
distinct=True)
result = store.execute(select)
for item in result:
card_type = item[0]
provider_id = item[1]
provider = store.get(CreditProvider, provider_id)
is_provider_active = card_payment_method and card_payment_method.is_active \
and provider.visible
if request_is_active == '1' and not is_provider_active:
continue
if request_is_active == '0' and is_provider_active:
continue
res_item = {
'ativo': is_provider_active,
'id': _get_payment_method_with_provider_code(card_type, provider),
'codigo': _get_payment_method_with_provider_code(card_type, provider),
'nome': _get_card_name(card_type, provider.short_name),
'redeId': network['id'],
'lojaId': None
}
response.append(res_item)
return response
class B1FoodStationResource(BaseResource):
method_decorators = [b1food_login_required, store_provider, info_logger]
routes = ['/b1food/terceiros/restful/terminais']
def get(self, store):
data = request.args
request_branches = data.get('lojas')
active = data.get('ativo')
branch_ids = _parse_request_list(request_branches)
_check_if_uuid(branch_ids)
tables = [BranchStation]
clauses = []
if active is not None:
is_active = active == '1'
clauses.append(BranchStation.is_active == is_active)
if len(branch_ids) > 0:
tables.append(Join(Branch, BranchStation.branch_id == Branch.id))
clauses.append(Branch.id.is_in(branch_ids))
if len(clauses) > 0:
stations = store.using(*tables).find(BranchStation, And(*clauses))
else:
stations = store.using(*tables).find(BranchStation)
network = _get_network_info()
response = []
for station in stations:
response.append({
'ativo': station.is_active,
'id': station.id,
'codigo': station.id,
'nome': station.name,
'apelido': _get_station_nickname(station),
'portaFiscal': None,
'redeId': network['id'],
'lojaId': station.branch.id,
'dataAlteracao': station.te.te_server.strftime('%Y-%m-%d %H:%M:%S -0300'),
'dataCriacao': station.te.te_time.strftime('%Y-%m-%d %H:%M:%S -0300'),
})
return response
class B1FoodReceiptsResource(BaseResource):
method_decorators = [b1food_login_required, store_provider, info_logger]
routes = ['/b1food/terceiros/restful/comprovante']
def get(self, store):
data = request.args
required_params = ['dtinicio', 'dtfim']
_check_required_params(data, required_params)
initial_date = datetime.datetime.strptime(data['dtinicio'], '%Y-%m-%d')
end_date = datetime.datetime.combine(datetime.datetime.strptime(
data['dtfim'], '%Y-%m-%d').date(), datetime.time.max)
request_branches = data.get('lojas')
request_documents = data.get('consumidores')
request_invoice_keys = data.get('operacaocupom')
branch_ids = _parse_request_list(request_branches)
_check_if_uuid(branch_ids)
documents = _parse_request_list(request_documents)
invoice_keys = _parse_request_list(request_invoice_keys)
if data.get('usarDtMov') and data.get('usarDtMov') == '1':
clauses = [Sale.confirm_date >= initial_date, Sale.confirm_date <= end_date]
else:
clauses = [Sale.open_date >= initial_date, Sale.open_date <= end_date]
ClientPerson = ClassAlias(Person, 'person_client')
ClientIndividual = ClassAlias(Individual, 'individual_client')
ClientCompany = ClassAlias(Company, 'company_client')
SalesPersonPerson = ClassAlias(Person, 'person_sales_person')
SalesPersonIndividual = ClassAlias(Individual, 'individual_sales_person')
tables = [
Sale,
Join(Branch, Sale.branch_id == Branch.id),
LeftJoin(Client, Client.id == Sale.client_id),
Join(Person, Person.id == Client.person_id),
Join(BranchStation, Sale.station_id == BranchStation.id),
LeftJoin(ClientPerson, Client.person_id == ClientPerson.id),
LeftJoin(ClientIndividual, Client.person_id == ClientIndividual.person_id),
LeftJoin(Individual, Client.person_id == Individual.person_id),
LeftJoin(ClientCompany, Client.person_id == ClientCompany.person_id),
LeftJoin(Company, Client.person_id == Company.person_id),
LeftJoin(SalesPerson, SalesPerson.id == Sale.salesperson_id),
LeftJoin(SalesPersonPerson, SalesPerson.person_id == SalesPersonPerson.id),
LeftJoin(SalesPersonIndividual,
SalesPerson.person_id == SalesPersonIndividual.person_id),
Join(LoginUser, LoginUser.person_id == SalesPerson.person_id),
Join(PaymentGroup, PaymentGroup.id == Sale.group_id),
Join(Invoice, Invoice.id == Sale.invoice_id),
]
sale_item_tables = [
SaleItem,
Join(Sellable, SaleItem.sellable_id == Sellable.id),
Join(Product, SaleItem.sellable_id == Product.id),
LeftJoin(SellableCategory, Sellable.category_id == SellableCategory.id),
LeftJoin(TransactionEntry, SellableCategory.te_id == TransactionEntry.id),
LeftJoin(InvoiceItemIcms, InvoiceItemIcms.id == SaleItem.icms_info_id),
LeftJoin(CfopData, CfopData.id == SaleItem.cfop_id)
]
payment_tables = [
Payment,
Join(PaymentMethod, Payment.method_id == PaymentMethod.id),
Join(PaymentGroup, Payment.group_id == PaymentGroup.id),
]
sale_objs = (Sale, ClientCompany, ClientIndividual, LoginUser, Invoice, Branch,
PaymentGroup, BranchStation, Client, ClientPerson, SalesPerson,
SalesPersonPerson)
sale_items_objs = (SaleItem, Sellable, SellableCategory, Product, CfopData,
TransactionEntry, InvoiceItemIcms)
payment_objs = (Payment, PaymentMethod, PaymentGroup)
if len(branch_ids) > 0:
clauses.append(Branch.id.is_in(branch_ids))
if len(documents) > 0:
clauses.append(Or(Individual.cpf.is_in(documents), Company.cnpj.is_in(documents)))
if len(invoice_keys) > 0:
clauses.append(Invoice.key.is_in(invoice_keys))
if data.get('cancelados') and data.get('cancelados') == '0':
clauses.append(Sale.status != Sale.STATUS_CANCELLED)
if data.get('cancelados') and data.get('cancelados') == '1':
clauses.append(Sale.status == Sale.STATUS_CANCELLED)
data = list(store.using(*tables).find(sale_objs, And(*clauses)))
sale_ids = [i[0].id for i in data]
group_ids = [i[0].group_id for i in data]
sale_items = list(store.using(*sale_item_tables).find(sale_items_objs,
SaleItem.sale_id.is_in(sale_ids)))
payments_list = list(store.using(*payment_tables).find(payment_objs,
Payment.group_id.is_in(group_ids)))
sale_payments = {}
for payment in payments_list:
sale_payments.setdefault(payment[0].group_id, [])
sale_payments[payment[0].group_id].append(payment[0])
sales = {}
for item in sale_items:
sales.setdefault(item[0].sale_id, [])
sales[item[0].sale_id].append(item[0])
response = []
for row in data:
sale, company, individual, login_user, invoice = row[:5]
items = []
for item in sales[sale.id]:
discount = item.item_discount
product = item.sellable.product
items.append({
'ordem': None,
'idMaterial': item.sellable.id,
'codigo': _get_sellable_code(item.sellable),
'descricao': item.sellable.description,
'quantidade': float(item.quantity),
'valorBruto': float(item.base_price * item.quantity),
'valorUnitario': float(item.base_price),
'valorUnitarioLiquido': float(item.price),
'valorLiquido': float(item.price * item.quantity),
'codNcm': product.ncm,
'idOrigem': None,
'codOrigem': None,
'cfop': str(item.cfop.code),
'desconto': float(max(discount, 0)),
'acrescimo': float(-1 * min(discount, 0)),
'cancelado': sale.status == Sale.STATUS_CANCELLED,
'maquinaId': sale.station.id,
'nomeMaquina': sale.station.name,
'maquinaCod': _get_station_code(sale.station),
'isTaxa': None,
'isRepique': None,
'isGorjeta': None,
'isEntrega': None,
})
payment_methods = _get_payments_info(sale_payments[sale.group_id], login_user, sale)
change = sum(payment['troco'] for payment in payment_methods)
res_item = {
'maquinaCod': _get_station_code(sale.station),
'nomeMaquina': sale.station.name,
'nfNumero': invoice.invoice_number,
'nfSerie': invoice.series,
'denominacao': invoice.mode,
'valor': float(sale.total_amount),
'maquinaId': sale.station.id,
'desconto': float(sale.discount_value or 0),
'acrescimo': float(sale.surcharge_value or 0),
'chaveNfe': invoice.key,
# FIXME B1Food expect this date to be the same as the emission date
# we want the emission date of nfe_data for this field
# https://gitlab.com/stoqtech/private/stoq-plugin-nfe/-/issues/111
'dataContabil': sale.confirm_date.strftime('%Y-%m-%d'),
'dataEmissao': sale.confirm_date.strftime('%Y-%m-%d %H:%M:%S -0300'),
'idOperacao': sale.id,
'troco': change,
'pagamentos': float(sale.paid),
'dataMovimento': sale.confirm_date.strftime('%Y-%m-%d %H:%M:%S -0300'),
'cancelado': sale.status == Sale.STATUS_CANCELLED,
'detalhes': items,
'meios': payment_methods,
}
response.append(res_item)
return response
class B1FoodTillResource(BaseResource):
method_decorators = [b1food_login_required, store_provider, info_logger]
routes = ['/b1food/terceiros/restful/periodos']
def get(self, store):
return []
class B1FoodRolesResource(BaseResource):
method_decorators = [b1food_login_required, store_provider, info_logger]
routes = ['/b1food/terceiros/restful/cargos']
def get(self, store):
data = request.args
request_is_active = data.get('ativo')
# Since our domain does not have an is_active attribute for UserProfile,
# treat them all as actives
if request_is_active == '0':
return []
profiles = store.find(UserProfile)
network = _get_network_info()
response = []
for profile in profiles:
response.append({
'ativo': True,
'id': profile.id,
'codigo': profile.id,
'dataCriacao': profile.te.te_time.strftime('%Y-%m-%d %H:%M:%S -0300'),
'dataAlteracao': profile.te.te_server.strftime('%Y-%m-%d %H:%M:%S -0300'),
'nome': profile.name,
'redeId': network['id'],
'lojaId': None
})
return response
class B1FoodBranchResource(BaseResource):
method_decorators = [b1food_login_required, store_provider, info_logger]
routes = ['/b1food/terceiros/restful/rede-loja']
def get(self, store):
data = request.args
tables = [Branch]
query = None
active = data.get('ativo')
if active is not None:
is_active = active == '1'
query = Branch.is_active == is_active
if query:
branches = store.using(*tables).find(Branch, query)
else:
branches = store.using(*tables).find(Branch)
network = _get_network_info()
response = [{
'idRede': network['id'],
'nome': network['name'],
'ativo': True,
'idRedePai': None,
'lojas': [],
}]
for branch in branches:
response[0]['lojas'].append({
'idLoja': branch.id,
'nome': branch.name,
'ativo': branch.is_active
})
return response
class B1FoodDiscountCategoryResource(BaseResource):
method_decorators = [b1food_login_required, store_provider, info_logger]
routes = ['/b1food/terceiros/restful/tiposdescontos']
def get(self, store):
data = request.args
request_is_active = data.get('ativo')
# Since our domain does not have an is_active attribute for EmployeeRole,
# treat them all as actives
if request_is_active == '0':
return []
categories = store.find(ClientCategory)
network = _get_network_info()
response = []
for category in categories:
response.append({
'ativo': True,
'id': category.id,
'codigo': category.id,
'dataCriacao': category.te.te_time.strftime('%Y-%m-%d %H:%M:%S -0300'),
'dataAlteracao': category.te.te_server.strftime('%Y-%m-%d %H:%M:%S -0300'),
'nome': category.name,
'redeId': network['id'],
'lojaId': None
})
return response
class B1FoodLoginUserResource(BaseResource):
method_decorators = [b1food_login_required, store_provider, info_logger]
routes = ['/b1food/terceiros/restful/funcionarios']
def get(self, store):
data = request.args
request_is_active = data.get('ativo')
users = store.find(LoginUser)
if request_is_active == '1':
users = LoginUser.get_active_users(store)
elif request_is_active == '0':
users = users.find(is_active=False)
network = _get_network_info()
response = []
for user in users:
person_names = _get_person_names(user.person)
profile = user.profile
response.append({
'id': user.id,
'codigo': user.username,
'dataCriacao': user.te.te_time.strftime('%Y-%m-%d %H:%M:%S -0300'),
'dataAlteracao': user.te.te_server.strftime('%Y-%m-%d %H:%M:%S -0300'),
'primeiroNome': person_names['primeiroNome'],
'segundoNome': person_names['segundoNome'],
'sobrenome': person_names['sobrenome'],
'apelido': person_names['apelido'],
# FIXME: Assert every one has profile in database or
# create a default profile for all?
'idCargo': profile.id if profile else None,
'codCargo': profile.id if profile else None,
'nomeCargo': profile.name if profile else None,
'redeId': network['id'],
'lojaId': None,
'ativo': user.is_active,
})
return response
|
stoq/stoq-server
|
stoqserver/api/resources/b1food.py
|
Python
|
gpl-2.0
| 44,437
|
[
"VisIt"
] |
6ef829975bb8ebd62e8db5be7ec34afc1f0e3568891df42cba2320323cdf460d
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffycontam(RPackage):
"""structured corruption of cel file data to demonstrate QA
effectiveness."""
homepage = "https://www.bioconductor.org/packages/affyContam/"
url = "https://git.bioconductor.org/packages/affyContam"
version('1.34.0', git='https://git.bioconductor.org/packages/affyContam', commit='03529f26d059c19e069cdda358dbf7789b6d4c40')
depends_on('r@3.4.0:3.4.9', when=('@1.34.0'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-affy', type=('build', 'run'))
depends_on('r-affydata', type=('build', 'run'))
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-affycontam/package.py
|
Python
|
lgpl-2.1
| 1,837
|
[
"Bioconductor"
] |
c31cc4cb36bbbfffc858ef4002b7f9da931faea4936c45a3be74c9bbdae750ae
|
""" Hidden Markov model (HMM) with Gaussian observations
This example shows how a hidden Markov model can be implemented
as a dynamic Bayesian network.
The DBN is be unrolled for N time-slices and inference/learning
is performed in the unrolled static network with condition
PDFs shared across time slices.
x1 _ x2 ... xN
| | |
y1 y2 ... yN
"""
# Imports
import numpy as np
import models
import cpds
import inference
from gauss import Gauss
def test_ghmm():
"""
Testing: GHMM
"""
# Handles for readability.
inf = np.inf
# HMM parameters
# prior
pi = np.array([0.6, 0.4])
# state transition matrix
A = np.array([[0.7, 0.3],
[0.2, 0.8]])
# state emission probabilities
B = np.array([Gauss(mean=np.array([1.0, 2.0]),
cov=np.eye(2)),
Gauss(mean=np.array([0.0, -1.0]),
cov=np.eye(2))
])
# DBN
intra = np.array([[0,1],[0,0]]) # Intra-slice dependencies
inter = np.array([[1,0],[0,0]]) # Inter-slice dependencies
node_sizes = np.array([2,inf])
discrete_nodes = [0]
continuous_nodes = [1]
node_cpds = [cpds.TabularCPD(pi),
cpds.GaussianCPD(B),
cpds.TabularCPD(A)]
dbn = models.DBN(intra, inter, node_sizes, discrete_nodes,
continuous_nodes, node_cpds)
inference_engine = inference.JTreeUnrolledDBNInferenceEngine()
inference_engine.model = dbn
inference_engine.initialize(T=5)
dbn.inference_engine = inference_engine
# INERENCE
evidence = [[None,[1.0,2.0]]
,[None,[3.0,4.0]],[None,[5.0,6.0]],[None,[7.0,8.0]],[None,[9.0,10.0]]]
dbn.enter_evidence(evidence)
print "Likelihood of single sample: %f"%dbn.sum_product()
# LEARNING
samples = [[[None, [-0.9094,-3.3056]],
[None, [ 2.7887, 2.3908]],
[None, [ 1.0203, 1.5940]],
[None, [-0.5349, 2.2214]],
[None, [-0.3745, 1.1607]]],
[[None, [ 0.7914, 2.7559]],
[None, [ 0.3757,-2.3454]],
[None, [ 2.4819, 2.0327]],
[None, [ 2.8705, 0.7910]],
[None, [ 0.2174, 1.2327]]]
]
print "\nEM parameter learning:"
dbn.learn_params_EM(samples,max_iter = 10)
print "\nPrior (pi):"
print dbn.node_cpds[0]
print "\nTransition matrx (A):"
print dbn.node_cpds[2]
print "\nEmission probabilities (B):"
print dbn.node_cpds[1]
|
bhrzslm/uncertainty-reasoning
|
my_engine/others/GrMPy/lib/GrMPy/Tests/test_ghmm.py
|
Python
|
mit
| 2,546
|
[
"Gaussian"
] |
d994bd64474e9566f10f60a73e118bbbebc1c1ceae9850832679b4cfc997b7b6
|
#!/usr/bin/env python
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/384')
from data_384 import Fmat_original
def data_to_mu(fvec, groups):
m,n = np.shape(fvec)
print m,n
mu = np.matrix(np.zeros((groups,n)))
DIVS = m/groups
for i in range(n):
index = 0
while (index < groups):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),i]
#if index == 1:
#print temp_fvec
mu[index,i] = round(scp.mean(temp_fvec),4)
index = index+1
return mu
if __name__ == '__main__':
Fmat_force = Fmat_original[0:121,0:140]
Fmat_area = Fmat_original[121:242,0:140]
groups = 30
test_trials = 7
m,n = np.shape(Fmat_force)
Fmat_f = data_to_mu(Fmat_force,groups)
Fmat_a = data_to_mu(Fmat_area,groups)
f_maxarr = [np.max(Fmat_f[:,0:35]), np.max(Fmat_f[:,35:70]), np.max(Fmat_f[:,70:105]), np.max(Fmat_f[:,105:140])]
f_minarr = [np.min(Fmat_f[:,0:35]), np.min(Fmat_f[:,35:70]), np.min(Fmat_f[:,70:105]), np.min(Fmat_f[:,105:140])]
a_maxarr = [np.max(Fmat_a[:,0:35]), np.max(Fmat_a[:,35:70]), np.max(Fmat_a[:,70:105]), np.max(Fmat_a[:,105:140])]
a_minarr = [np.min(Fmat_a[:,0:35]), np.min(Fmat_a[:,35:70]), np.min(Fmat_a[:,70:105]), np.min(Fmat_a[:,105:140])]
f = open('train.txt', 'w')
g = open('test.txt', 'w')
for i in range(n):
seqf = list(Fmat_f[0:groups,i].flat)
seqa = list(Fmat_a[0:groups,i].flat)
if i < (n/4):
for j in range(np.size(seqf)):
f_idx = int((seqf[j]-f_minarr[0])*groups/(f_maxarr[0]-f_minarr[0]))
a_idx = int((seqa[j]-a_minarr[0])*groups/(a_maxarr[0]-a_minarr[0]))
idstr = 'RF'+str(f_idx)+str(a_idx)
if i < (n/4 - test_trials):
f.write(str(seqf[j])+' '+str(seqa[j])+' '+idstr+'\n')
else:
g.write(str(seqf[j])+' '+str(seqa[j])+' '+idstr+'\n')
if i < (n/4 - test_trials):
f.write('\n')
else:
g.write('\n')
elif i >= (n/4) and i < (n/2):
for j in range(np.size(seqf)):
f_idx = int((seqf[j]-f_minarr[1])*groups/(f_maxarr[1]-f_minarr[1]))
a_idx = int((seqa[j]-a_minarr[1])*groups/(a_maxarr[1]-a_minarr[1]))
idstr = 'RM'+str(f_idx)+str(a_idx)
if i < (n/2 - test_trials):
f.write(str(seqf[j])+' '+str(seqa[j])+' '+idstr+'\n')
else:
g.write(str(seqf[j])+' '+str(seqa[j])+' '+idstr+'\n')
if i < (n/2 - test_trials):
f.write('\n')
else:
g.write('\n')
elif i >= (n/2) and i < 3*(n/4):
for j in range(np.size(seqf)):
f_idx = int((seqf[j]-f_minarr[2])*groups/(f_maxarr[2]-f_minarr[2]))
a_idx = int((seqa[j]-a_minarr[2])*groups/(a_maxarr[2]-a_minarr[2]))
idstr = 'SF'+str(f_idx)+str(a_idx)
if i < (3*(n/4) - test_trials):
f.write(str(seqf[j])+' '+str(seqa[j])+' '+idstr+'\n')
else:
g.write(str(seqf[j])+' '+str(seqa[j])+' '+idstr+'\n')
if i < (3*(n/4) - test_trials):
f.write('\n')
else:
g.write('\n')
else:
for j in range(np.size(seqf)):
f_idx = int((seqf[j]-f_minarr[3])*groups/(f_maxarr[3]-f_minarr[3]))
a_idx = int((seqa[j]-a_minarr[3])*groups/(a_maxarr[3]-a_minarr[3]))
idstr = 'SM'+str(f_idx)+str(a_idx)
if i < (n - test_trials):
f.write(str(seqf[j])+' '+str(seqa[j])+' '+idstr+'\n')
else:
g.write(str(seqf[j])+' '+str(seqa[j])+' '+idstr+'\n')
if i < (n - test_trials):
f.write('\n')
else:
g.write('\n')
|
tapomayukh/projects_in_python
|
classification/Classification_with_CRF/old_crfsuite_package/data_conversion.py
|
Python
|
mit
| 4,634
|
[
"Mayavi"
] |
01b63712cb9f059b394aded291be73f84381bf350972f533a469a7994e7f69dc
|
# -*- coding: utf-8 -*-
"""
generators.py: Trivial result generation functions for illustrating
tdda.referencetest
Source repository: http://github.com/tdda/tdda
License: MIT
Copyright (c) Stochastic Solutions Limited 2016
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import math
def generate_string():
"""
Returns an HTML string that should display identically to the result
stored in tdda/examples/reference/string_result, but which has
four (non-semantic) differences:
- The Copyright line is different
- The version number line is different
- It has a newline at the top, unlike the reference file.
- It is missing a newline at the end of the string, which the
reference file includes.
"""
version = '1.0.0'
copyright_year = '2016'
inc_gendate = False
date = datetime.datetime.now().strftime(' %Y-%m-%dT%H:%M:%S')
generation_date = date if inc_gendate else ''
spiral = generate_spiral()
return """<!DOCTYPE html>
<html>
<head>
<!--
Copyright (c) Stochastic Solutions, %(copyright_year)s
Version %(version)s
%(generation_date)s
-->
<meta charset="UTF-8"/>
<style type="text/css">
body {font-family: Palatino, "Palatino Linotype", Times;
text-align: center}
h1 {font-size: large; text-align: center;}
div {padding: 12px 0 12px 0;}
</style>
<title>
Python-Generated HTML Example for tdda.referencetest
</title>
</head>
<body>
<h1>Python-Generated HTML Example for tdda.referencetest</h1>
<div>
This page is generated by Python (as a string).
</div>
%(spiral)s
<div>
It's not terribly exciting.
But it will serve to illustrate tdda.referencetest.
</div>
</body>
</html>
""" % locals()
def generate_file(path):
html = '''<!DOCTYPE html>
<html>
<head>
<!--
Copyright (c) Stochastic Solutions, 2016
Version 1.0.0
-->
<meta charset="UTF-8"/>
<style type="text/css">
body {font-family: Palatino, "Palatino Linotype", Times;
text-align: center;}
h1 {font-size: large; text-align: center;}
div {padding-top: 12px; padding-bottom: 12px;}
</style>
<title>
Python-Generated HTML Example for tdda.referencetest
</title>
</head>
<body>
<h1>Python-Generated HTML Example for tdda.referencetest</h1>
<div>
This page is generated by Python (as a file).
</div>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="200" height="200">
<style type="text/css">text { font-family: Helvetica, Verdana, Sans;}</style>
<!-- TDDA Test Output Version 0.0.1
Copyright (c) Stochastic Solutions Limited 2016 -->
<rect x="0" y="0" width="96" height="96" style="fill:#C08080"/>
<rect x="105" y="0" width="96" height="96" style="fill:#F0C080"/>
<rect x="0" y="104" width="96" height="96" style="fill:#F0C080"/>
<rect x="104" y="104" width="96" height="96" style="fill:#C08080"/>
<text x="50" y="78" font-size="80" text-anchor="middle">T</text>
<text x="150" y="78" font-size="80" text-anchor="middle">D</text>
<text x="50" y="178" font-size="80" text-anchor="middle">D</text>
<text x="150" y="178" font-size="80" text-anchor="middle">A</text>
</svg>
<div>
It will serve to illustrate tdda.referencetest.
</div>
</body>
</html>
'''
with open(path, 'w') as f:
f.write(html)
def generate_spiral():
points = ' '.join('%d %d L' % (t / 20 * math.cos(t * 2 * math.pi / 100),
t / 20 * math.sin(t * 2 * math.pi / 100))
for t in range(1000))
colour = 'red'
return '''<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="100" height="100">
<g transform="translate(50,50)">
<path d="M 0 0 %(points)s z" stroke="%(colour)s" fill="none"/>
</g>
</svg>''' % locals()
|
tdda/tdda
|
tdda/referencetest/examples/generators.py
|
Python
|
mit
| 4,047
|
[
"exciting"
] |
cfada21a802dc6dab252d3a355f2cd1ed6f697be5f0b0c1a6b641ae080ac824c
|
"""Joint variant calling with multiple samples: aka squaring off, or backfilling.
Handles the N+1 problem of variant calling by combining and recalling samples
previously calling individually (or in smaller batches). Recalls at all positions found
variable in any of the input samples within each batch. Takes a general approach supporting
GATK's incremental joint discovery (http://www.broadinstitute.org/gatk/guide/article?id=3893)
and FreeBayes's N+1 approach (https://groups.google.com/d/msg/freebayes/-GK4zI6NsYY/Wpcp8nt_PVMJ)
as implemented in bcbio.variation.recall (https://github.com/chapmanb/bcbio.variation.recall).
"""
import collections
import math
import os
import pysam
import toolz as tz
from bcbio import broad, utils
from bcbio.bam import ref
from bcbio.distributed.split import grouped_parallel_split_combine
from bcbio.pipeline import config_utils, region
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import bamprep, gatkjoint, genotype, multi
SUPPORTED = {"general": ["freebayes", "platypus", "samtools"],
"gatk": ["gatk-haplotype"],
"gvcf": ["strelka2"],
"sentieon": ["haplotyper"]}
# ## CWL joint calling targets
def batch_for_jointvc(items):
batch_groups = collections.defaultdict(list)
for data in [utils.to_single_data(x) for x in items]:
vc = dd.get_variantcaller(data)
if genotype.is_joint(data):
batches = dd.get_batches(data) or dd.get_sample_name(data)
if not isinstance(batches, (list, tuple)):
batches = [batches]
else:
batches = [dd.get_sample_name(data)]
for b in batches:
data = utils.deepish_copy(data)
data["vrn_file_gvcf"] = data["vrn_file"]
batch_groups[(b, vc)].append(data)
return list(batch_groups.values())
def run_jointvc(items):
items = [utils.to_single_data(x) for x in items]
data = items[0]
if not dd.get_jointcaller(data):
data["config"]["algorithm"]["jointcaller"] = "%s-joint" % dd.get_variantcaller(data)
# GenomicsDBImport uses 1-based coordinates. That's unexpected, convert over to these.
chrom, coords = data["region"].split(":")
start, end = coords.split("-")
ready_region = "%s:%s-%s" % (chrom, int(start) + 1, end)
str_region = ready_region.replace(":", "_")
batches = dd.get_batches(data) or dd.get_sample_name(data)
if not isinstance(batches, (list, tuple)):
batches = [batches]
out_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "joint",
dd.get_variantcaller(data), str_region)),
"%s-%s-%s.vcf.gz" % (batches[0], dd.get_variantcaller(data), str_region))
joint_out = square_batch_region(data, ready_region, [], [d["vrn_file"] for d in items], out_file)[0]
data["vrn_file_region"] = joint_out["vrn_file"]
return data
def concat_batch_variantcalls_jointvc(items):
concat_out = genotype.concat_batch_variantcalls(items, region_block=False, skip_jointcheck=True)
return {"vrn_file_joint": concat_out["vrn_file"]}
def finalize_jointvc(items):
return [utils.to_single_data(x) for x in items]
def _get_callable_regions(data):
"""Retrieve regions to parallelize by from callable regions or chromosomes.
"""
import pybedtools
callable_files = data.get("callable_regions")
if callable_files:
assert len(callable_files) == 1
regions = [(r.chrom, int(r.start), int(r.stop)) for r in pybedtools.BedTool(callable_files[0])]
else:
work_bam = list(tz.take(1, filter(lambda x: x and x.endswith(".bam"), data["work_bams"])))
if work_bam:
with pysam.Samfile(work_bam[0], "rb") as pysam_bam:
regions = [(chrom, 0, length) for (chrom, length) in zip(pysam_bam.references,
pysam_bam.lengths)]
else:
regions = [(r.name, 0, r.size) for r in
ref.file_contigs(dd.get_ref_file(data), data["config"])]
return regions
def _split_by_callable_region(data):
"""Split by callable or variant regions.
We expect joint calling to be deep in numbers of samples per region, so prefer
splitting aggressively by regions.
"""
batch = tz.get_in(("metadata", "batch"), data)
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
name = batch if batch else tz.get_in(("rgnames", "sample"), data)
out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "joint", jointcaller, name))
utils.safe_makedir(os.path.join(out_dir, "inprep"))
parts = []
for feat in _get_callable_regions(data):
region_dir = utils.safe_makedir(os.path.join(out_dir, feat[0]))
region_prep_dir = os.path.join(region_dir, "inprep")
if not os.path.exists(region_prep_dir):
os.symlink(os.path.join(os.pardir, "inprep"), region_prep_dir)
region_outfile = os.path.join(region_dir, "%s-%s.vcf.gz" % (batch, region.to_safestr(feat)))
parts.append((feat, data["work_bams"], data["vrn_files"], region_outfile))
out_file = os.path.join(out_dir, "%s-joint.vcf.gz" % name)
return out_file, parts
def _is_jointcaller_compatible(data):
"""Match variant caller inputs to compatible joint callers.
"""
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
variantcaller = tz.get_in(("config", "algorithm", "variantcaller"), data)
if isinstance(variantcaller, (list, tuple)) and len(variantcaller) == 1:
variantcaller = variantcaller[0]
return jointcaller == "%s-joint" % variantcaller or not variantcaller
def square_off(samples, run_parallel):
"""Perform joint calling at all variants within a batch.
"""
to_process = []
extras = []
for data in [utils.to_single_data(x) for x in samples]:
added = False
if tz.get_in(("metadata", "batch"), data):
for add in genotype.handle_multiple_callers(data, "jointcaller", require_bam=False):
if _is_jointcaller_compatible(add):
added = True
to_process.append([add])
if not added:
extras.append([data])
processed = grouped_parallel_split_combine(to_process, _split_by_callable_region,
multi.group_batches_joint, run_parallel,
"square_batch_region", "concat_variant_files",
"vrn_file", ["region", "sam_ref", "config"])
return _combine_to_jointcaller(processed) + extras
def _combine_to_jointcaller(processed):
"""Add joint calling information to variants, while collapsing independent regions.
"""
by_vrn_file = collections.OrderedDict()
for data in (x[0] for x in processed):
key = (tz.get_in(("config", "algorithm", "jointcaller"), data), data["vrn_file"])
if key not in by_vrn_file:
by_vrn_file[key] = []
by_vrn_file[key].append(data)
out = []
for grouped_data in by_vrn_file.values():
cur = grouped_data[0]
out.append([cur])
return out
def want_gvcf(items):
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), items[0])
want_gvcf = any("gvcf" in dd.get_tools_on(d) for d in items)
return jointcaller or want_gvcf
def get_callers():
return ["%s-joint" % x for x in SUPPORTED["general"]] + \
["%s-merge" % x for x in SUPPORTED["general"]] + \
["%s-joint" % x for x in SUPPORTED["gatk"]] + \
["%s-joint" % x for x in SUPPORTED["gvcf"]] + \
["%s-joint" % x for x in SUPPORTED["sentieon"]]
def square_batch_region(data, region, bam_files, vrn_files, out_file):
"""Perform squaring of a batch in a supplied region, with input BAMs
"""
from bcbio.variation import sentieon, strelka2
if not utils.file_exists(out_file):
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
if jointcaller in ["%s-joint" % x for x in SUPPORTED["general"]]:
_square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, "square")
elif jointcaller in ["%s-merge" % x for x in SUPPORTED["general"]]:
_square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, "merge")
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["gatk"]]:
gatkjoint.run_region(data, region, vrn_files, out_file)
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["gvcf"]]:
strelka2.run_gvcfgenotyper(data, region, vrn_files, out_file)
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["sentieon"]]:
sentieon.run_gvcftyper(vrn_files, out_file, region, data)
else:
raise ValueError("Unexpected joint calling approach: %s." % jointcaller)
if region:
data["region"] = region
data = _fix_orig_vcf_refs(data)
data["vrn_file"] = out_file
return [data]
def _fix_orig_vcf_refs(data):
"""Supply references to initial variantcalls if run in addition to batching.
"""
variantcaller = tz.get_in(("config", "algorithm", "variantcaller"), data)
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
if variantcaller or jointcaller:
data["vrn_file_orig"] = data["vrn_file"]
for i, sub in enumerate(data.get("group_orig", [])):
sub_vrn = sub.pop("vrn_file", None)
if sub_vrn:
sub["vrn_file_orig"] = sub_vrn
data["group_orig"][i] = sub
return data
def _square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file,
todo="square"):
"""Run squaring or merging analysis using bcbio.variation.recall.
"""
ref_file = tz.get_in(("reference", "fasta", "base"), data)
cores = tz.get_in(("config", "algorithm", "num_cores"), data, 1)
resources = config_utils.get_resources("bcbio-variation-recall", data["config"])
# adjust memory by cores but leave room for run program memory
memcores = int(math.ceil(float(cores) / 5.0))
jvm_opts = config_utils.adjust_opts(resources.get("jvm_opts", ["-Xms250m", "-Xmx2g"]),
{"algorithm": {"memory_adjust": {"direction": "increase",
"magnitude": memcores}}})
# Write unique VCFs and BAMs to input file
input_file = "%s-inputs.txt" % os.path.splitext(out_file)[0]
with open(input_file, "w") as out_handle:
out_handle.write("\n".join(sorted(list(set(vrn_files)))) + "\n")
if todo == "square":
out_handle.write("\n".join(sorted(list(set(bam_files)))) + "\n")
variantcaller = tz.get_in(("config", "algorithm", "jointcaller"), data).replace("-joint", "")
cmd = ["bcbio-variation-recall", todo] + jvm_opts + broad.get_default_jvm_opts() + \
["-c", cores, "-r", bamprep.region_to_gatk(region)]
if todo == "square":
cmd += ["--caller", variantcaller]
cmd += [out_file, ref_file, input_file]
bcbio_env = utils.get_bcbio_env()
cmd = " ".join(str(x) for x in cmd)
do.run(cmd, "%s in region: %s" % (cmd, bamprep.region_to_gatk(region)), env=bcbio_env)
return out_file
|
lbeltrame/bcbio-nextgen
|
bcbio/variation/joint.py
|
Python
|
mit
| 11,473
|
[
"pysam"
] |
e7c54e3c2bdcbeb2cf488f1a336a8a348877c68a3f04569ec1ee01665b7a4fcb
|
from cStringIO import StringIO
import numpy as np
import warnings
import xray
from xray.backends.common import AbstractWritableDataStore
from xray.conventions import (is_valid_nc3_name, coerce_nc3_dtype,
encode_cf_variable)
from xray.utils import Frozen
class ScipyDataStore(AbstractWritableDataStore):
"""Store for reading and writing data via scipy.io.netcdf.
This store has the advantage of being able to be initialized with a
StringIO object, allow for serialization without writing to disk.
It only supports the NetCDF3 file-format.
"""
def __init__(self, filename_or_obj, mode='r', mmap=None, version=1):
import scipy
if mode != 'r' and scipy.__version__ < (0, 13):
warnings.warn('scipy %s detected; '
'the minimal recommended version is 0.13. '
'Older version of this library do not reliably '
'read and write files.'
% scipy.__version__, ImportWarning)
import scipy.io
# if filename is a NetCDF3 bytestring we store it in a StringIO
if (isinstance(filename_or_obj, basestring)
and filename_or_obj.startswith('CDF')):
# TODO: this check has the unfortunate side-effect that
# paths to files cannot start with 'CDF'.
filename_or_obj = StringIO(filename_or_obj)
self.ds = scipy.io.netcdf.netcdf_file(
filename_or_obj, mode=mode, mmap=mmap, version=version)
def open_store_variable(self, var):
return xray.Variable(var.dimensions, var.data, var._attributes)
@property
def attrs(self):
return Frozen(self.ds._attributes)
@property
def dimensions(self):
return Frozen(self.ds.dimensions)
def set_dimension(self, name, length):
if name in self.dimensions:
raise ValueError('%s does not support modifying dimensions'
% type(self).__name__)
self.ds.createDimension(name, length)
def _validate_attr_key(self, key):
if not is_valid_nc3_name(key):
raise ValueError("Not a valid attribute name")
def _cast_attr_value(self, value):
if isinstance(value, basestring):
value = unicode(value)
else:
value = coerce_nc3_dtype(np.atleast_1d(value))
if value.ndim > 1:
raise ValueError("netCDF attributes must be 1-dimensional")
return value
def set_attribute(self, key, value):
self._validate_attr_key(key)
setattr(self.ds, key, self._cast_attr_value(value))
def set_variable(self, name, variable):
variable = encode_cf_variable(variable)
data = coerce_nc3_dtype(variable.values)
self.set_necessary_dimensions(variable)
self.ds.createVariable(name, data.dtype, variable.dimensions)
scipy_var = self.ds.variables[name]
if data.ndim == 0:
scipy_var.assignValue(data)
else:
scipy_var[:] = data[:]
for k, v in variable.attrs.iteritems():
self._validate_attr_key(k)
setattr(scipy_var, k, self._cast_attr_value(v))
def del_attribute(self, key):
delattr(self.ds, key)
def sync(self):
self.ds.flush()
def close(self):
self.ds.close()
def __exit__(self, type, value, tb):
self.close()
|
takluyver/xray
|
xray/backends/scipy_.py
|
Python
|
apache-2.0
| 3,452
|
[
"NetCDF"
] |
94dcbf62b6e12eab24e2af0d19ab47368a73d23486b8927531a1a0ab6afe308b
|
# This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2012 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Module with the always_seq decorator. """
import sys
import inspect
from types import FunctionType
import ast
from myhdl import AlwaysError, intbv
from myhdl._util import _isGenFunc, _dedent
from myhdl._cell_deref import _cell_deref
from myhdl._delay import delay
from myhdl._Signal import _Signal, _WaiterList,_isListOfSigs
from myhdl._Waiter import _Waiter, _EdgeWaiter, _EdgeTupleWaiter
from myhdl._instance import _Instantiator
# evacuate this later
AlwaysSeqError = AlwaysError
class _error:
pass
_error.EdgeType = "first argument should be an edge"
_error.ResetType = "reset argument should be a ResetSignal"
_error.ArgType = "decorated object should be a classic (non-generator) function"
_error.NrOfArgs = "decorated function should not have arguments"
_error.SigAugAssign = "signal assignment does not support augmented assignment"
_error.EmbeddedFunction = "embedded functions in always_seq function not supported"
class ResetSignal(_Signal):
def __init__(self, val, active, async):
""" Construct a ResetSignal.
This is to be used in conjunction with the always_seq decorator,
as the reset argument.
"""
_Signal.__init__(self, bool(val))
self.active = bool(active)
self.async = async
def always_seq(edge, reset):
if not isinstance(edge, _WaiterList):
raise AlwaysSeqError(_error.EdgeType)
edge.sig._read = True
edge.sig._used = True
if reset is not None:
if not isinstance(reset, ResetSignal):
raise AlwaysSeqError(_error.ResetType)
reset._read = True
reset._used = True
def _always_seq_decorator(func):
if not isinstance(func, FunctionType):
raise AlwaysSeqError(_error.ArgType)
if _isGenFunc(func):
raise AlwaysSeqError(_error.ArgType)
if func.func_code.co_argcount > 0:
raise AlwaysSeqError(_error.NrOfArgs)
return _AlwaysSeq(func, edge, reset)
return _always_seq_decorator
class _AlwaysSeq(_Instantiator):
def __init__(self, func, edge, reset):
self.func = func
self.senslist = senslist = [edge]
self.reset = reset
if reset is not None:
active = self.reset.active
async = self.reset.async
if async:
if active:
senslist.append(reset.posedge)
else:
senslist.append(reset.negedge)
self.gen = self.genfunc()
else:
self.gen = self.genfunc_no_reset()
if len(self.senslist) == 1:
W = _EdgeWaiter
else:
W = _EdgeTupleWaiter
self.waiter = W(self.gen)
# find symdict
# similar to always_comb, but in class constructor
varnames = func.func_code.co_varnames
symdict = {}
for n, v in func.func_globals.items():
if n not in varnames:
symdict[n] = v
# handle free variables
if func.func_code.co_freevars:
for n, c in zip(func.func_code.co_freevars, func.func_closure):
try:
obj = _cell_deref(c)
symdict[n] = obj
except NameError:
raise NameError(n)
self.symdict = symdict
# now infer outputs to be reset
s = inspect.getsource(func)
s = _dedent(s)
tree = ast.parse(s)
# print ast.dump(tree)
v = _SigNameVisitor(symdict)
v.visit(tree)
sigregs = self.sigregs = []
varregs = self.varregs = []
for n in v.outputs:
reg = self.symdict[n]
if isinstance(reg, _Signal):
sigregs.append(reg)
elif isinstance(reg, intbv):
varregs.append((n, reg, int(reg)))
else:
assert _isListOfSigs(reg)
for e in reg:
sigregs.append(e)
def reset_sigs(self):
for s in self.sigregs:
s.next = s._init
def reset_vars(self):
for v in self.varregs:
# only intbv's for now
n, reg, init = v
reg._val = init
def genfunc(self):
senslist = self.senslist
if len(senslist) == 1:
senslist = senslist[0]
reset_sigs = self.reset_sigs
reset_vars = self.reset_vars
func = self.func
while 1:
yield senslist
if self.reset == self.reset.active:
reset_sigs()
reset_vars()
else:
func()
def genfunc_no_reset(self):
senslist = self.senslist
assert len(senslist) == 1
senslist = senslist[0]
func = self.func
while 1:
yield senslist
func()
# similar to always_comb, calls for refactoring
# note: make a difference between augmented assign and inout signals
INPUT, OUTPUT, INOUT = range(3)
class _SigNameVisitor(ast.NodeVisitor):
def __init__(self, symdict):
self.inputs = set()
self.outputs = set()
self.toplevel = 1
self.symdict = symdict
self.context = INPUT
def visit_Module(self, node):
for n in node.body:
self.visit(n)
def visit_FunctionDef(self, node):
if self.toplevel:
self.toplevel = 0 # skip embedded functions
for n in node.body:
self.visit(n)
else:
raise AlwaysSeqError(_error.EmbeddedFunction)
def visit_If(self, node):
if not node.orelse:
if isinstance(node.test, ast.Name) and \
node.test.id == '__debug__':
return # skip
self.generic_visit(node)
def visit_Name(self, node):
id = node.id
if id not in self.symdict:
return
s = self.symdict[id]
if isinstance(s, (_Signal, intbv)) or _isListOfSigs(s):
if self.context == INPUT:
self.inputs.add(id)
elif self.context == OUTPUT:
self.outputs.add(id)
elif self.context == INOUT:
raise AlwaysSeqError(_error.SigAugAssign % id)
else:
raise AssertionError("bug in always_seq")
def visit_Assign(self, node):
self.context = OUTPUT
for n in node.targets:
self.visit(n)
self.context = INPUT
self.visit(node.value)
def visit_Attribute(self, node):
self.visit(node.value)
def visit_Subscript(self, node, access=INPUT):
self.visit(node.value)
self.context = INPUT
self.visit(node.slice)
def visit_AugAssign(self, node, access=INPUT):
self.context = INOUT
self.visit(node.target)
self.context = INPUT
self.visit(node.value)
def visit_ClassDef(self, node):
pass # skip
def visit_Exec(self, node):
pass # skip
def visit_Print(self, node):
pass # skip
|
cordoval/myhdl-python
|
myhdl/_always_seq.py
|
Python
|
lgpl-2.1
| 7,997
|
[
"VisIt"
] |
4302c36bd1fab7ba451184cf167d5fc32f3362ae24e0910ccc7073d97ae99a63
|
from django.shortcuts import render
from django.conf import settings
from django.core.files import File
from protein.models import ProteinFamily, ProteinAlias, ProteinSet, Protein, ProteinSegment
from common.views import AbsTargetSelection
from common.views import AbsSegmentSelection
from common.views import AbsMiscSelection
from common.selection import SelectionItem
from mutation.models import *
import math
import os, shutil, subprocess, signal
import uuid
from phylogenetic_trees.PrepareTree import *
from collections import OrderedDict
Alignment = getattr(__import__('common.alignment_' + settings.SITE_NAME, fromlist=['Alignment']), 'Alignment')
def kill_phylo(): #FIXME, needs better way of handling this!
p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'protdist' in str(line):
pid = int(line.split(None, 1)[0])
os.kill(pid, signal.SIGKILL)
class TargetSelection(AbsTargetSelection):
step = 1
number_of_steps = 3
docs = 'sequences.html#phylogeneric-trees'
selection_boxes = OrderedDict([
('targets', True),
('segments', False),
])
buttons = {
'continue': {
'label': 'Continue to next step',
'url': '/phylogenetic_trees/segmentselection',
'color': 'success',
},
}
class SegmentSelection(AbsSegmentSelection):
step = 2
number_of_steps = 3
docs = 'sequences.html#phylogeneric-trees'
selection_boxes = OrderedDict([
('targets', True),
('segments', True),
])
buttons = {
'continue': {
'label': 'Continue to next step',
'url': '/phylogenetic_trees/treesettings',
'color': 'success',
},
}
class TreeSettings(AbsMiscSelection):
step = 3
number_of_steps = 3
docs = 'sequences.html#phylogeneric-trees'
title = 'SELECT TREE OPTIONS'
description = 'Select options for tree generation in the middle column.\nOnce you have selected your' \
+ ' settings, click the green button.'
docs = '/documentation/similarities'
selection_boxes = OrderedDict([
('targets', True),
('segments', True),
])
buttons = OrderedDict({
'continue_new': {
'label': 'Draw tree using new code',
'url': '/phylogenetic_trees/render_new',
'color': 'success',
},
'continue': {
'label': 'Draw tree using previous code',
'url': '/phylogenetic_trees/render',
'color': 'success',
}
})
tree_settings = True
class Treeclass:
family = {}
def __init__(self):
self.Additional_info={"crystal": {"include":"False", "order":6, "colours":{"crystal_true":"#6dcde1","crystal_false":"#EEE"}, "color_type":"single", "proteins":[], "parent":None, "child": None, "name":"Crystals"},
"class": {"include":"True", "order":0, "colours":{}, "proteins":[], "color_type":"grayscale", "parent":[], "child": ["family,ligand"], "name":"Class"},
"family": {"include":"True", "order":1, "colours":{}, "proteins":[], "color_type":"spectrum", "parent":[], "child": ["ligand"], "name":"Ligand type"},
"ligand": {"include":"True", "order":2, "colours":{}, "proteins":[], "color_type":"spectrum", "parent":["family","class"], "child": [], "name":"Receptor type"},
"mutant": {"include":"False", "order":3, "colours":{"mutant_true":"#6dcde1","mutant_false":"#EEE"}, "color_type":"single", "proteins":[], "parent":[], "child": ["mutant_plus","mutant_minus"], "name":"Mutated proteins"},
"mutant_plus": {"include":"False", "order":4, "colours":{"mutant_plus_true":"#6dcde1","mutant_plus_false":"#EEE"}, "color_type":"single", "proteins":[], "parent":"mutant", "child": [], "name":"Positive affinity mutants"},
"mutant_minus": {"include":"False", "order":5, "colours":{"mutant_minus_true":"#6dcde1","mutant_minus_false":"#EEE"}, "color_type":"single", "proteins":[], "parent":"mutant", "child": [], "name":"Negative affinity mutants"}
}
self.buttons = [(x[1]['order'],x[1]['name']) for x in sorted(self.Additional_info.items(), key= lambda x: x[1]['order']) if x[1]['include']=='True']
self.family = {}
self.phylip = None
self.outtree = None
self.dir = ''
def Prepare_file(self, request,build=False):
self.Tree = PrepareTree(build)
a=Alignment()
sets = ProteinSet.objects.all()
#### Get additional data ####
crysts=[]
for n in sets:
if n.id==1:
for prot in n.proteins.all():
crysts.append(prot.entry_name)
#############################
# get the user selection from session
if build != False:
################################## FOR BUILDING STATISTICS ONLY##########################
build_proteins=[]
if build == '001':
cons_prots = []
for prot in Protein.objects.filter(sequence_type__slug='consensus', species_id=1):
if prot.family.slug.startswith('001') and len(prot.family.slug.split('_'))==3:
build_proteins.append(prot)
for set in sets:
if set.id==1:
for prot in set.proteins.all():
if prot.family.slug.startswith('001_') and prot.species.latin_name=='Homo sapiens':
build_proteins.append(prot)
else:
for prot in Protein.objects.filter(sequence_type__slug='wt', species_id=1):
if prot.family.slug.startswith(build):
build_proteins.append(prot)
a.load_proteins(build_proteins)
segments = ProteinSegment.objects.all()
a.load_segments(segments)
self.bootstrap,self.UPGMA,self.branches,self.ttype=[0,1,1,0]
##################################################################
else:
simple_selection=request.session.get('selection', False)
a.load_proteins_from_selection(simple_selection)
a.load_segments_from_selection(simple_selection)
self.bootstrap,self.UPGMA,self.branches,self.ttype = map(int,simple_selection.tree_settings)
if self.bootstrap!=0:
self.bootstrap=pow(10,self.bootstrap)
#### Create an alignment object
a.build_alignment()
a.calculate_statistics()
a.calculate_similarity()
self.total = len(a.proteins)
total_length = 0
for chain in a.proteins[0].alignment:
total_length += len(a.proteins[0].alignment[chain])
families = ProteinFamily.objects.all()
self.famdict = {}
for n in families:
self.famdict[self.Tree.trans_0_2_A(n.slug)]=n.name
dirname = unique_filename = uuid.uuid4()
os.mkdir('/tmp/%s' %dirname)
infile = open('/tmp/%s/infile' %dirname,'w')
infile.write(' '+str(self.total)+' '+str(total_length)+'\n')
if len(a.proteins) < 3:
return 'More_prots',None, None, None, None,None,None,None,None
####Get additional protein information
accesions = {}
for n in a.proteins:
fam = self.Tree.trans_0_2_A(n.protein.family.slug)
if n.protein.sequence_type.slug == 'consensus':
fam+='_CON'
entry_name = n.protein.entry_name
name = n.protein.name.replace('<sub>','').replace('</sub>','').replace('<i>','').replace('</i>','')
if '&' in name and ';' in name:
name = name.replace('&','').replace(';',' ')
acc = n.protein.accession
if acc:
acc = acc.replace('-','_')
else:
acc = link.replace('-','_')[:6]
spec = str(n.protein.species)
fam += '_'+n.protein.species.common_name.replace(' ','_').upper()
desc = name
if entry_name in crysts:
if not fam in self.Additional_info['crystal']['proteins']:
self.Additional_info['crystal']['proteins'].append(fam)
if len(name)>25:
name=name[:25]+'...'
self.family[entry_name] = {'name':name,'family':fam,'description':desc,'species':spec,'class':'','accession':acc,'ligand':'','type':'','link': entry_name}
accesions[acc]=entry_name
####Write PHYLIP input
sequence = ''
for chain in n.alignment:
for residue in n.alignment[chain]:
sequence += residue[2].replace('_','-')
infile.write(acc+' '*9+sequence+'\n')
infile.close()
####Run bootstrap
if self.bootstrap:
### Write phylip input options
inp = open('/tmp/%s/temp' %dirname,'w')
inp.write('\n'.join(['r',str(self.bootstrap),'y','77','y'])+'\n')
inp.close()
###
try:
subprocess.check_output(['phylip seqboot<temp'], shell=True, cwd = '/tmp/%s' %dirname, timeout=60)
os.rename('/tmp/%s/outfile' %dirname, '/tmp/%s/infile' %dirname)
except:
kill_phylo() #FIXME, needs better way of handling this!
return "too big","too big","too big","too big","too big","too big","too big","too big","too big"
### Write phylip input options
inp = open('/tmp/%s/temp' %dirname,'w')
if self.bootstrap:
inp.write('\n'.join(['m','d',str(self.bootstrap),'y'])+'\n')
else:
inp.write('y\n')
inp.close()
###
try:
subprocess.check_output(['phylip protdist<temp>>log'], shell=True, cwd = '/tmp/%s' %dirname, timeout=60)
except:
kill_phylo() #FIXME, needs better way of handling this!
return "too big","too big","too big","too big","too big","too big","too big","too big","too big"
os.rename('/tmp/%s/infile' %dirname, '/tmp/%s/dupa' %dirname)
os.rename('/tmp/%s/outfile' %dirname, '/tmp/%s/infile' %dirname)
inp = open('/tmp/%s/temp' %dirname,'w')
if self.bootstrap:
### Write phylip input options
if self.UPGMA:
inp.write('\n'.join(['N','m',str(self.bootstrap),'111','y'])+'\n')
else:
inp.write('\n'.join(['m',str(self.bootstrap),'111','y'])+'\n')
else:
if self.UPGMA:
inp.write('N\ny\n')
else:
inp.write('y\n')
inp.close()
###
try:
subprocess.check_output(['phylip neighbor<temp'], shell=True, cwd = '/tmp/%s' %dirname, timeout=60)
except:
kill_phylo() #FIXME, needs better way of handling this!
return "too big","too big","too big","too big","too big","too big","too big","too big"
if self.bootstrap:
os.rename('/tmp/%s/outfile' %dirname, '/tmp/%s/infile' %dirname)
os.rename('/tmp/%s/outtree' %dirname, '/tmp/%s/intree' %dirname)
### Write phylip input options
inp = open('/tmp/%s/temp' %dirname,'w')
inp.write('y\n')
inp.close()
###
try:
subprocess.check_output(['phylip consense<temp'], shell=True, cwd = '/tmp/%s' %dirname, timeout=60)
except:
kill_phylo() #FIXME, needs better way of handling this!
return "too big","too big","too big","too big","too big","too big","too big","too big"
self.phylip = open('/tmp/%s/outtree' %dirname).read()
for acc in accesions.keys():
self.phylip=self.phylip.replace(acc,accesions[acc])
# self.phylogeny_output = self.phylip
self.outtree = open('/tmp/%s/outfile' %dirname).read().lstrip()
phylogeny_input = self.get_phylogeny('/tmp/%s/' %dirname)
shutil.rmtree('/tmp/%s' %dirname)
if build != False:
open('static/home/images/'+build+'_legend.svg','w').write(str(self.Tree.legend))
open('static/home/images/'+build+'_tree.xml','w').write(phylogeny_input)
else:
return phylogeny_input, self.branches, self.ttype, self.total, str(self.Tree.legend), self.Tree.box, self.Additional_info, self.buttons, a.proteins
def get_phylogeny(self, dirname):
self.Tree.treeDo(dirname, self.phylip,self.branches,self.family,self.Additional_info, self.famdict)
phylogeny_input = open('%s/out.xml' %dirname,'r').read().replace('\n','')
return phylogeny_input
def get_data(self):
return self.branches, self.ttype, self.total, str(self.Tree.legend), self.Tree.box, self.Additional_info, self.buttons
def get_buttons(request):
Tree_class=request.session['Tree']
buttons = [(x[1]['order'],x[1]['name']) for x in sorted(Tree_class.Additional_info.items(), key= lambda x: x[1]['order']) if x[1]['include']=='True']
return render(request, 'phylogenetic_trees/ring_buttons.html', {'but':buttons })
def modify_tree(request):
try:
shutil.rmtree('/tmp/modify')
except:
pass
arg = request.GET.getlist('arg[]')
value = request.GET.getlist('value[]')
Tree_class=request.session['Tree']
for n in range(len(arg)):
Tree_class.Additional_info[arg[n].replace('_btn','')]['include']=value[n]
request.session['Tree']=Tree_class
os.mkdir('/tmp/modify')
phylogeny_input = Tree_class.get_phylogeny('/tmp/modify')
branches, ttype, total, legend, box, Additional_info, buttons=Tree_class.get_data()
shutil.rmtree('/tmp/modify')
if ttype == '1':
float(total)/4*100
else:
count = 1900 - 1400/math.sqrt(float(total))
print(count)
return render(request, 'phylogenetic_trees/main.html', {'phylo': phylogeny_input, 'branch':branches, 'ttype': ttype, 'count':count, 'leg':legend, 'b':box, 'add':Additional_info, 'but':buttons, 'phylip':Tree_class.phylip, 'outtree':Tree_class.outtree})
def render_tree(request):
Tree_class=Treeclass()
phylogeny_input, branches, ttype, total, legend, box, Additional_info, buttons, proteins=Tree_class.Prepare_file(request)
if phylogeny_input == 'too big':
return render(request, 'phylogenetic_trees/too_big.html')
if phylogeny_input == 'More_prots':
return render(request, 'phylogenetic_trees/warning.html')
if ttype == '1':
float(total)/4*100
else:
count = 1900 - 1400/math.sqrt(float(total))
request.session['Tree']=Tree_class
return render(request, 'phylogenetic_trees/alignment.html', {'phylo': phylogeny_input, 'branch':branches, 'ttype': ttype, 'count':count, 'leg':legend, 'b':box, 'add':Additional_info, 'but':buttons, 'phylip':Tree_class.phylip, 'outtree':Tree_class.outtree })
def render_tree_new(request):
Tree_class=Treeclass()
phylogeny_input, branches, ttype, total, legend, box, Additional_info, buttons, proteins=Tree_class.Prepare_file(request)
if phylogeny_input == 'too big':
return render(request, 'phylogenetic_trees/too_big.html')
if phylogeny_input == 'More_prots':
return render(request, 'phylogenetic_trees/warning.html')
if ttype == '1':
float(total)/4*100
else:
count = 1900 - 1400/math.sqrt(float(total))
protein_data = []
#FIXME remove
import random
for pc in proteins:
v = {}
p = pc.protein
v['name'] = p.entry_name
v['GPCR_class'] = p.family.parent.parent.parent.name
v['selectivity'] = ["Gq/G11 family"]
v['ligand_type'] = p.family.parent.parent.name
v['coverage'] = random.uniform(0, 1)
v['receptor_page'] = ''
print(v)
protein_data.append(v)
request.session['Tree']=Tree_class
context = {}
context['phylip'] = Tree_class.phylip.replace('\n', '')
context['protein_data'] = protein_data
return render(request, 'phylogenetic_trees/display.html', context)
|
cmunk/protwis
|
phylogenetic_trees/views.py
|
Python
|
apache-2.0
| 16,637
|
[
"CRYSTAL"
] |
adb497aad15f847ba9f39a800c7272475e7a7ad845d1b98ae0ac7efbffd629a4
|
from pyatompaw import AtompawMaster
# Atom definition
atom = AtompawMaster('26-Fe')
atom.launcher.set_executable(executable)
atom.Atom_name = 'Fe'
atom.Z = 26
rpaw = 2.12 # a.u.
rshape = 1.81
rs = 2.01
rp = 1.81
rd = 2.01
rmax = 10.0 # a.u.
rmatch = rpaw
# Keywords
atom.XC_functional = 'GGA-PBE'
atom.rel_keyword = 'scalarrelativistic'
atom.grid_keyword = 'loggrid', 2001, rmax, rmatch
atom.logderivrange = 'logderivrange', -10., 10., 2001
atom.projector_keyword = 'custom'
atom.ps_scheme = 'RRKJ'
#atom.ps_scheme = 'bloechlps'
atom.ortho_scheme = 'GramSchmidtOrtho'
# Local part of pseudopotential
atom.Vloc_scheme = 'ultrasoft'
#atom.Vloc_scheme = 'bessel'
#atom.Vloc_scheme = 'trouillermartins'
#atom.lloc = 2
#atom.Eloc =-0.5
atom.coreWF_keyword = 'prtcorewf'
atom.proj_optim_keyword = 'nooptim'
atom.comp_in_XC_keyword = 'usexcnhat'
#atom.output_format = 'abinit'
atom.output_format = 'xml'
# Atom configuration
atom.nmax = [4, 3, 3, 0, 0, 0] # Maximum occupied orbitals: 4s 3p 3d
#atom.occ = [(3,2,6)] # 3d has partiall occ: 6
atom.occ = [ (3,2,7), # 3d has partiall occ: 7
(4,0,1)] # 4s has partiall occ: 1
atom.lmax = 2
atom.rpaw = rpaw
atom.rshape = rshape
atom.rvloc = rshape
atom.rcore = rshape
# Projectors for valence states
atom.add_valence(n=3, l=0, rc=rs)
atom.add_valence(n=4, l=0, rc=rs)
atom.add_valence(n=3, l=1, rc=rp)
atom.add_valence(n=3, l=2, rc=rd)
# Additional projectors
atom.add_proj(l=1, Eref=3.5, rc=rp)
atom.add_proj(l=2, Eref=2.0, rc=rd)
# Test configurations
atom.configurations = [ [ (3,0,2),
(3,1,6),
(3,2,6),
(4,0,2)],
[(3,0,2),
(3,1,6),
(3,2,8),
(4,0,0)]]
# Execution
atom.make() # Write the files
atom.run() # Run atompaw
# Plot partial waves and logarithmic derivatives.
atom.plot_wfn()
atom.show_wfn()
atom.plot_logderiv(show=True)
# Export the atomic dataset
atom.export('.')
|
GkAntonius/pyatompaw
|
examples/Fe.py
|
Python
|
gpl-3.0
| 2,166
|
[
"ABINIT"
] |
ce44fc33b9f1dee4a2f945dd0c2fcc61415d4e364085c35b875430ad804a8d54
|
#!/usr/bin/env python
""" routines describing the number density evolution of galaxy populations in Illustris. """
__author__ = "Paul Torrey with contributions from Ryan McKinnon"
__copyright__ = "Copyright 2015, The Authors"
__credits__ = ["Paul Torrey and Ryan McKinnon"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Paul Torrey"
__email__ = "ptorrey@mit.edu"
__status__ = "Public Release. v1.0."
import numpy as np
from scipy.optimize import newton
class number_density:
def __init__(self):
self._cmf_vars = np.array(
[ [-2.89381061e+00, 8.21989048e-02, -1.23157487e-01],
[-6.25597878e-01, 8.62157470e-02, -4.90327135e-02],
[-3.88949868e-02, 2.54185320e-02, -7.12953126e-03],
[ 1.15238521e+01, -1.87102380e-01, 2.10223127e-02] ] )
self._dm_mf_vars = np.array(
[ [-4.58622836, 0.32510541, -0.00729362],
[-0.85569723, -0.11641107, 0.00950263],
[ 0.00190046, -0.01466030, 0.00173920],
[14.14366307, -0.60796535, 0.02010243] ] )
self._dmf_vars = np.array(
[ [-2.62477122, 0.08536494, -0.11269835],
[-0.51284616, 0.09404466, -0.04949364],
[-0.03864963, 0.03270023, -0.00815488],
[11.54279153, -0.19945507, 0.01974203] ] )
self._cvdf_vars = np.array(
[ [ 7.39149763, 5.72940031, -1.12055245],
[-6.86339338, -5.27327109, 1.10411386],
[ 2.85208259, 1.25569600, -0.28663846],
[ 0.06703215, -0.04868317, 0.00764841] ] )
self._cdmf_vars = np.array(
[ [-5.08189685e+00, 1.55357858e-01, -2.18816909e-02],
[-1.05830010e+00, -1.66413452e-01, 3.95287210e-03],
[-2.11658380e-02, -2.13713414e-02, 6.82334998e-04],
[1.41924033e+01, -5.14655812e-01, 2.22309520e-02]])
self._mil_cdmf_vars = np.array(
[ [ -4.44138264, 0.35711012, -0.01111473],
[ -0.72959257, -0.09591461, 0.00593439],
[ 0.02103888, -0.00868751, 0.00081672],
[ 14.32909807, -0.61663087, 0.02347729] ] )
self._vars_nc = np.array(
[ [-2.64396065, -0.29957949, -0.03786140],
[-0.52684351, -0.13813571, -0.03224572],
[-0.02648208, -0.01600591, -0.00564523],
[11.33927770, 0.39102542, -0.01573175] ])
self._vars_nc_rev3 = np.array(
[ [-4.35301221, -0.43893418, 0.22963251],
[-0.79280392, -0.19135083, 0.06622329],
[-0.02908882, -0.02000640, 0.00739485],
[11.80107352, 0.19790054, -0.14974796] ])
self._vars_nc_rev2 = np.array(
[ [-3.99168548, 0.34870581, 0.05096602],
[-0.79382527, 0.01932935, 0.03692425],
[-0.03829736, -0.00023232, 0.00694828],
[11.82842896, -0.28090410, -0.03059818] ])
self._vars_nc_rev1 = np.array(
[ [-3.64009890, 1.03601036, -0.31161953],
[-0.79721518, 0.29299124, -0.07746618],
[-0.04838342, 0.03363121, -0.00486779],
[11.82759108, -0.66505107, 0.18483682] ])
self._single_nd_fit = np.array(
[ [-6.616925, 4.772104, -0.620776, ],
[-12.553859, 10.062380, -1.593516, ],
[-8.490627, 7.305744, -1.297247, ],
[-2.393108, 2.170882, -0.415974, ],
[-0.237254, 0.224544, -0.045412, ],
[20.215976, -19.237827, 3.786527, ],
[35.822739, -34.933411, 7.113855, ],
[22.317927, -22.302322, 4.677395, ],
[5.845230, -5.964089, 1.282028, ],
[0.546387, -0.566171, 0.124111, ],
[-12.516574, 11.750153, -2.437514, ],
[-21.967282, 20.806088, -4.367866, ],
[-13.612419, 13.014453, -2.763500, ],
[-3.562422, 3.431866, -0.735922, ],
[-0.334570, 0.323606, -0.069900, ], ]
)
self._sigma_forward_fit = np.array(
[ 0.30753811, -0.35706088, 0.07811929,
0.11658773, -0.36310229, 0.0878064,
0.05432019, -0.08567579, 0.01938756,
0.3410485 , -0.26566291, 0.05867066,
0.40404559, -0.27861255, 0.05734425,
0.06319481, -0.04605944, 0.01022102] )
self._surv_forward_fit = np.array(
[-0.57610316, 0.25395473, -0.06909108,
-0.32306881, 0.30592045, -0.0841637,
-0.0588044 , 0.07159256, -0.01935816,
0.34438256, -0.21727332, 0.04128673,
0.29859003, -0.21186113, 0.04228161,
0.03889436, -0.03177222, 0.00690157] )
self._nd_backward_fit = np.array(
[ 0.12576944, -0.0011525, 0.00337458, 0.04573972, 0.03545278, 0.00692101] )
self._sigma_backward_fit = np.array(
[ 0.11019958, 0.02569108, 0.02499621, -0.02997248, -0.02119875, -0.00762916] )
self._mil_single_nd_fit = np.array([
[ 1.13550747, -1.60321674, 0.14007560,],
[ 2.32643209, -3.10396702, 0.35853430,],
[ 1.45825077, -2.05562185, 0.28640381,],
[ 0.39731886, -0.59565224, 0.09607464,],
[ 0.04044023, -0.06307704, 0.01132825,],
[ -9.06663472, 10.20717290, -2.23962380,],
[ -15.35672950, 17.37750894, -3.82828958,],
[ -9.34183983, 10.63300290, -2.35495830,],
[ -2.44570257, 2.80484153, -0.62496982,],
[ -0.23348522, 0.26954533, -0.06040623,],
[ 6.46838958, -6.74495114, 1.50746143,],
[ 10.99464939, -11.45579413, 2.55812019,],
[ 6.72317076, -7.01007726, 1.56536705,],
[ 1.77252101, -1.84931108, 0.41298179,],
[ 0.17038025, -0.17775311, 0.03968929,], ])
self._mil_sigma_forward_fit = np.array(
[ -0.11752455, 0.58506396, -0.18510631,
-0.21970148, 0.51497538, -0.15945522,
-0.02558280, 0.09755363, -0.03092733,
-0.33390659, 0.09918064, 0.00485999,
-0.21655906, 0.04328793, 0.01154582,
-0.05937238, 0.01746433, 0.00092052, ])
self._mil_nd_backward_fit = np.array(
[ 0.6447802, 0.3364304, 0.05757577, 0.00449681, 0.00230737, 0.00132303] )
self._mil_sigma_backward_fit = np.array(
[ 0.20872779, 0.06660569, 0.01841613, -0.0879753, -0.05365703, -0.00989903] )
def single_nd_fit(self, z, z0, init_N_tilde, target=0, verbose=False, type='IllustrisCMF', **kwargs):
""" Evaluate the forward number density evolution tracks for log_mass, z, and z0 """
if (init_N_tilde>-1) or (init_N_tilde<-6.5):
if verbose: print "out of range"
return init_N_tilde
if type=='IllustrisCMF':
this_vars=self._single_nd_fit
elif type=='MillenniumCMF':
this_vars=self._mil_single_nd_fit
result=0
n_z0_exp=3; n_N0_exp = 5
A = np.zeros(n_N0_exp); B = np.zeros(n_N0_exp); C = np.zeros(n_N0_exp)
for i in range(n_N0_exp):
A[i] = np.sum( [this_vars[i+0*n_N0_exp][j] * z0**j for j in range(n_z0_exp) ] )
B[i] = np.sum( [this_vars[i+1*n_N0_exp][j] * z0**j for j in range(n_z0_exp) ] )
C[i] = np.sum( [this_vars[i+2*n_N0_exp][j] * z0**j for j in range(n_z0_exp) ] )
A = np.sum( [A[i] * init_N_tilde ** i for i in range(n_N0_exp) ] )
B = np.sum( [B[i] * init_N_tilde ** i for i in range(n_N0_exp) ] )
C = np.sum( [C[i] * init_N_tilde ** i for i in range(n_N0_exp) ] )
dz = (z0 - z)
return init_N_tilde + A * dz + B * dz ** 2 + C * dz ** 3 - target
def sigma_forward_fit(self, z, z0, init_N_tilde, sigma_0=0.00, verbose=False, type='IllustrisCMF'):
""" Evaluate scatter in forward ND evolution tracks """
if (init_N_tilde>-1) or (init_N_tilde<-5.5):
if verbose: print "out of range"
return sigma_0
if type=='IllustrisCMF':
this_vars = self._sigma_forward_fit
elif type=='MillenniumCMF':
this_vars = self._mil_sigma_forward_fit
else:
print "unrecognized fit type?"
A_exp = [ np.sum( [coeff * z0**iii for iii, coeff in enumerate(this_vars[ 0+3*jjj:0+3*(jjj+1) ]) ] ) for jjj in range(3) ]
B_exp = [ np.sum( [coeff * z0**iii for iii, coeff in enumerate(this_vars[ 9+3*jjj:9+3*(jjj+1) ]) ] ) for jjj in range(3) ]
A = np.sum( [A_exp[iii] * init_N_tilde**iii for iii in range(3) ] )
B = np.sum( [B_exp[iii] * init_N_tilde**iii for iii in range(3) ] )
dz = (z0 - z)
return sigma_0 + A*dz + B*dz**2
def surv_forward_fit(self, z, z0, init_N_tilde, sigma_0=0.00, verbose=False):
""" Evaluate survival fraction of galaxies as a function of elapsed time """
if (init_N_tilde>-1) or (init_N_tilde<-5.5):
if verbose: print "out of range"
return 0.0
this_vars = self._surv_forward_fit
A_exp = [ np.sum( [coeff * z0**iii for iii, coeff in enumerate(this_vars[ 0+3*jjj:0+3*(jjj+1) ]) ] ) for jjj in range(3) ]
B_exp = [ np.sum( [coeff * z0**iii for iii, coeff in enumerate(this_vars[ 9+3*jjj:9+3*(jjj+1) ]) ] ) for jjj in range(3) ]
A = np.sum( [A_exp[iii] * init_N_tilde**iii for iii in range(3) ] )
B = np.sum( [B_exp[iii] * init_N_tilde**iii for iii in range(3) ] )
dz = (z0 - z)
return 1.0 + A*dz + B*dz**2
def nd_backward_fit( self, z, init_N_tilde, type='IllustrisCMF' ):
if type=='IllustrisCMF':
this_vars = self._nd_backward_fit
elif type=='MillenniumCMF':
this_vars = self._mil_nd_backward_fit
A_exp = np.sum( [this_vars[iii+0] * init_N_tilde **iii for iii in range(3) ] )
B_exp = np.sum( [this_vars[iii+3] * init_N_tilde **iii for iii in range(3) ] )
return init_N_tilde + A_exp * z + B_exp * z**2
def sigma_backward_fit( self, z, init_N_tilde, sigma_0=0.0, type='IllustrisCMF' ):
if type=='IllustrisCMF':
this_vars = self._sigma_backward_fit
elif type=='MillenniumCMF':
this_vars = self._mil_sigma_backward_fit
A_exp = np.sum( [this_vars[iii+0] * init_N_tilde **iii for iii in range(3) ] )
B_exp = np.sum( [this_vars[iii+3] * init_N_tilde **iii for iii in range(3) ] )
return sigma_0 + A_exp * z + B_exp * z**2
def cmf_fit(self, log_mass, redshift, target=0, **kwargs):
""" Evaluate the CMF at a given list of masses at some redshift"""
if np.max(log_mass) > 20: warn_not_log_arg()
return self._cmf_fit_func(log_mass, self._cmf_vars, redshift, target=target)
def cvdf_fit(self, log_vd, redshift, target=0, **kwargs):
""" Evaluate the CVDF at a given list of vd's at some redshift"""
if np.max(log_vd) > 10: warn_not_log_arg()
return self._cmf_fit_func(log_vd, self._cvdf_vars, redshift, target=target)
def cdmf_fit(self, log_mass, redshift, target=0, **kwargs):
""" Evaluate the dm CMF at a given list of masses at some redshift"""
if np.max(log_mass) > 20: warn_not_log_arg()
return self._cmf_fit_func(log_mass, self._cdmf_vars, redshift, target=target)
def mil_cdmf_fit(self, log_mass, redshift, target=0, **kwargs):
""" Evaluate the dm CMF at a given list of masses at some redshift"""
if np.max(log_mass) > 20: warn_not_log_arg()
return self._cmf_fit_func(log_mass, self._mil_cdmf_vars, redshift, target=target)
def gsmf_fit(self, log_mass, redshift, target=0, **kwargs):
""" Evaluate the GSMF at a given list of masses at some redshift"""
if np.max(log_mass) > 20: warn_not_log_arg()
return self._cmf_fit_func(log_mass, self._dmf_vars, redshift, target=target)
def nc_cmf_fit(self, log_mass, redshift, target=0, z_init=0, **kwargs):
""" If you feed this a **z_init*** stellar mass (log_mass), it will return the
median number density of that galaxy population at some other redshift (redshift) """
if np.max(log_mass) > 20: warn_not_log_arg()
if z_init == 0: this_vars = self._vars_nc
if z_init == 1: this_vars = self._vars_nc_rev1
if z_init == 2: this_vars = self._vars_nc_rev2
if z_init == 3: this_vars = self._vars_nc_rev3
return self._cmf_fit_func(log_mass, this_vars, redshift, target=target)
def mass_from_density(self, cum_num_dens, redshift, type='IllustrisCMF'):
""" Calculate the stellar mass from a cum num dens by inverting the CMF """
args = (redshift, cum_num_dens)
if type=='IllustrisCMF':
mass = newton(self.cmf_fit, 10.0, args=args)
elif type=='MillenniumCDMF':
mass = newton(self.mil_cdmf_fit, 10.0, args=args)
return mass
def dm_mass_from_density(self, cum_num_dens, redshift):
""" Calculate the dm mass from a cum num dens by inverting the dm CMF """
args = (redshift, cum_num_dens)
mass = newton(self.cdmf_fit, 10.0, args=args)
return mass
def vd_from_density(self, cum_num_dens, redshift):
""" Calculate the vel disp from a cum num dens by inverting the CVDF """
args = (redshift, cum_num_dens)
try: val = newton(self.cvdf_fit, 2.0, args=args)
except: val = -1
return val
def project_growth(self, arr, z1, z2, field='mass', nc=True):
""" Project a galaxy (population's) growth from init value arr at z1 to z2 """
arr = np.array([arr]).flatten() # input masses or vds at z1
res = np.zeros_like(arr) # output masses or vds at z2
if field == 'mass':
from_dens = self.mass_from_density
if nc: arr = arr
else: to_dens = self.cmf_fit
elif field == 'vd':
from_dens = self.vd_from_density
if nc: arr = [self.mass_from_density(self.cvdf_fit(x, z1), z1) for x in arr]
else: to_dens = self.cvdf_fit
elif field == 'dm':
from_dens = self.dm_mass_from_density
if nc: arr = [self.mass_from_density(self.cdmf_fit(x, z1), z1) for x in arr]
else: to_dens = self.cdmf_fit
if nc:
to_dens = self.nc_cmf_fit
for i, elem in enumerate(arr):
if nc: proj_dens = to_dens(elem, z2, z_init=z1)
else: proj_dens = to_dens(elem, z1)
if proj_dens < np.log10(1e-5): # you're below the fit limits
print " "
print " PROJECTING GROWTH BELOW FIT LIMITS! "
proj_dens = np.log10(1e-5)
res[i] = from_dens(proj_dens, z2)
if res.shape[0] == 1:
return res[0]
else:
return res
def _cmf_fit_func(self, this_val, this_vars, redshift, target=0):
""" Evaluate Equations 1 & 2-5 from Torrey+2015 """
coeffs = [this_vars[i][0] + this_vars[i][1] * redshift +
this_vars[i][2] * redshift**2 for i in range(4)]
mstar = this_val - coeffs[3]
return coeffs[0] + coeffs[1]*mstar + coeffs[2]*mstar**2 - np.exp(mstar) - target
def warn_not_log_arg():
print " "
print " WARNING: ARGUMENT DETECTED IN torrey_cmf.py THAT IS MUCH LARGER THAN EXPECTED"
print " WARNING: VERIFY THAT YOU ARE USING LOG SCALE (AS REQUIRED) "
print " "
|
ptorrey/torrey_cmf
|
torrey_cmf.py
|
Python
|
gpl-2.0
| 15,955
|
[
"Galaxy"
] |
a349d8ea39fe24599ae4ed2c5bd29cd797c8b9de8fd1242cbc5b202b6e296324
|
# -*- coding: utf-8 -*-
#
# test_clopath_synapse.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Test functionality of the Clopath stdp synapse
"""
import unittest
import nest
import numpy as np
HAVE_GSL = nest.ll_api.sli_func("statusdict/have_gsl ::")
@nest.ll_api.check_stack
@unittest.skipIf(not HAVE_GSL, 'GSL is not available')
class ClopathSynapseTestCase(unittest.TestCase):
"""Test Clopath synapse"""
def test_ConnectNeuronsWithClopathSynapse(self):
"""Ensures that the restriction to supported neuron models works."""
nest.set_verbosity('M_WARNING')
# Specify supported models
supported_models = [
'aeif_psc_delta_clopath',
'hh_psc_alpha_clopath',
]
# Connect supported models with Clopath synapse
for nm in supported_models:
nest.ResetKernel()
n = nest.Create(nm, 2)
nest.Connect(n, n, {"rule": "all_to_all"},
{"model": "clopath_synapse"})
# Compute not supported models
not_supported_models = [n for n in nest.Models(mtype='nodes')
if n not in supported_models]
# Ensure that connecting not supported models fails
for nm in not_supported_models:
nest.ResetKernel()
n = nest.Create(nm, 2)
# try to connect with clopath_rule
with self.assertRaises(nest.kernel.NESTError):
nest.Connect(n, n, {"rule": "all_to_all"},
{"model": "clopath_synapse"})
def test_SynapseDepressionFacilitation(self):
"""Ensure that depression and facilitation work correctly"""
nest.set_verbosity('M_WARNING')
# This is done using the spike pairing experiment of
# Clopath et al. 2010. First we specify the parameters
resolution = 0.1
init_w = 0.5
spike_times_pre = [
[29., 129., 229., 329., 429.],
[29., 62.3, 95.7, 129., 162.3],
[29., 49., 69., 89., 109.],
[129., 229., 329., 429., 529., 629.],
[62.3, 95.6, 129., 162.3, 195.6, 229.],
[49., 69., 89., 109., 129., 149.]]
spike_times_post = [
[19., 119., 219., 319., 419.],
[19., 52.3, 85.7, 119., 152.3],
[19., 39., 59., 79., 99.],
[139., 239., 339., 439., 539., 639.],
[72.3, 105.6, 139., 172.3, 205.6, 239.],
[59., 79., 99., 119., 139., 159.]]
tested_models = ["aeif_psc_delta_clopath", "hh_psc_alpha_clopath"]
# Loop over tested neuron models
for nrn_model in tested_models:
if(nrn_model == "aeif_psc_delta_clopath"):
nrn_params = {'V_m': -70.6,
'E_L': -70.6,
'V_peak': 33.0,
'C_m': 281.0,
'theta_minus': -70.6,
'theta_plus': -45.3,
'A_LTD': 14.0e-5,
'A_LTP': 8.0e-5,
'tau_minus': 10.0,
'tau_plus': 7.0,
'delay_u_bars': 4.0,
'a': 4.0,
'b': 0.0805,
'V_reset': -70.6 + 21.0,
'V_clamp': 33.0,
't_clamp': 2.0,
't_ref': 0.0, }
elif(nrn_model == "hh_psc_alpha_clopath"):
nrn_params = {'V_m': -64.9,
'C_m': 100.0,
'tau_syn_ex': 0.2,
'tau_syn_in': 2.0,
'theta_minus': -64.9,
'theta_plus': -35.0,
'A_LTD': 14.0e-5,
'A_LTP': 8.0e-5,
'tau_minus': 10.0,
'tau_plus': 114.0,
'delay_u_bars': 5.0,
}
syn_weights = []
# Loop over pairs of spike trains
for (s_t_pre, s_t_post) in zip(spike_times_pre, spike_times_post):
nest.ResetKernel()
nest.SetKernelStatus({"resolution": resolution})
# Create one neuron
nrn = nest.Create(nrn_model, 1, nrn_params)
prrt_nrn = nest.Create("parrot_neuron", 1)
# Create and connect spike generator
spike_gen_pre = nest.Create("spike_generator", 1, {
"spike_times": s_t_pre})
nest.Connect(spike_gen_pre, prrt_nrn,
syn_spec={"delay": resolution})
if(nrn_model == "aeif_psc_delta_clopath"):
conn_weight = 80.0
elif(nrn_model == "hh_psc_alpha_clopath"):
conn_weight = 2000.0
spike_gen_params_post = {"spike_times": s_t_post}
spike_gen_post = nest.Create("spike_generator", 1, {
"spike_times": s_t_post})
nest.Connect(spike_gen_post, nrn, syn_spec={
"delay": resolution, "weight": conn_weight})
# Create weight recorder
wr = nest.Create('weight_recorder', 1)
# Create Clopath synapse with weight recorder
nest.CopyModel("clopath_synapse", "clopath_synapse_rec",
{"weight_recorder": wr[0]})
syn_dict = {"model": "clopath_synapse_rec",
"weight": init_w, "delay": resolution}
nest.Connect(prrt_nrn, nrn, syn_spec=syn_dict)
# Simulation
simulation_time = (10.0 + max(s_t_pre[-1], s_t_post[-1]))
nest.Simulate(simulation_time)
# Evaluation
w_events = nest.GetStatus(wr)[0]["events"]
weights = w_events["weights"]
syn_weights.append(weights[-1])
# Compare to expected result
syn_weights = np.array(syn_weights)
syn_weights = 100.0*15.0*(syn_weights - init_w)/init_w + 100.0
if(nrn_model == "aeif_psc_delta_clopath"):
correct_weights = [57.82638722, 72.16730112, 149.43359357,
103.30408341, 124.03640668, 157.02882555]
elif(nrn_model == "hh_psc_alpha_clopath"):
correct_weights = [70.14343863, 99.49206222, 178.1028757,
119.63314118, 167.37750688, 178.83111685]
self.assertTrue(np.allclose(
syn_weights, correct_weights, rtol=1e-7))
def test_SynapseFunctionWithAeifModel(self):
"""Ensure that spikes are properly processed"""
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
# Create neurons and devices
nrns = nest.Create('aeif_psc_delta_clopath', 2, {'V_m': -70.6})
prrt_nrn = nest.Create('parrot_neuron', 1)
spike_times = [10.0]
sg = nest.Create('spike_generator', 1, {'spike_times': spike_times})
mm = nest.Create('multimeter', params={
'record_from': ['V_m'], 'interval': 1.0})
nest.Connect(sg, prrt_nrn)
nest.Connect(mm, nrns)
# Connect one neuron with static connection
conn_dict = {'rule': 'all_to_all'}
static_syn_dict = {'model': 'static_synapse',
'weight': 2.0, 'delay': 1.0}
nest.Connect(prrt_nrn, nrns[0:1], conn_dict, static_syn_dict)
# Connect one neuron with Clopath stdp connection
cl_stdp_syn_dict = {'model': 'clopath_synapse',
'weight': 2.0, 'delay': 1.0}
nest.Connect(prrt_nrn, nrns[1:2], conn_dict, cl_stdp_syn_dict)
# Simulation
nest.Simulate(20.)
# Evaluation
data = nest.GetStatus(mm)
senders = data[0]['events']['senders']
voltages = data[0]['events']['V_m']
vm1 = voltages[np.where(senders == 1)]
vm2 = voltages[np.where(senders == 2)]
# Compare results for static synapse and Clopath stdp synapse
self.assertTrue(np.allclose(vm1, vm2, rtol=1e-5))
# Check that a spike with weight 2.0 is processes properly
# in the aeif_psc_delta_clopath model
self.assertTrue(np.isclose(vm2[11]-vm2[10], 2.0, rtol=1e-5))
def suite():
suite = unittest.makeSuite(ClopathSynapseTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
hakonsbm/nest-simulator
|
pynest/nest/tests/test_clopath_synapse.py
|
Python
|
gpl-2.0
| 9,545
|
[
"NEURON"
] |
125c89b155bfe4118cb4ff8a6db83b9db4f0f3df86a947736f78cf9eb8be2c15
|
# nnmware(c)2012-2020
from __future__ import unicode_literals
import json
from django.contrib import messages
from django.utils.deprecation import MiddlewareMixin
from django.utils.timezone import now
from nnmware.core.utils import setting
from nnmware.core.http import get_session_from_request
from nnmware.core.models import VisitorHit
class AjaxMessagingMiddleware(object):
def process_response(self, request, response):
if request.is_ajax():
if response['Content-Type'] in ["application/javascript", "application/json"]:
try:
content = json.loads(response.content)
except ValueError as valerr:
return response
django_messages = []
for message in messages.get_messages(request):
django_messages.append(dict(level=message.level, message=message.message, extra_tags=message.tags))
content['core_messages'] = django_messages
response.content = json.dumps(content)
return response
UNTRACKED_USER_AGENT = [
"Teoma", "alexa", "froogle", "Gigabot", "inktomi", "looksmart", "URL_Spider_SQL", "Firefly",
"NationalDirectory", "Ask Jeeves", "TECNOSEEK", "InfoSeek", "WebFindBot", "girafabot", "crawler",
"www.galaxy.com", "Googlebot", "Googlebot/2.1", "Google", "Webmaster", "Scooter", "James Bond",
"Slurp", "msnbot", "appie", "FAST", "WebBug", "Spade", "ZyBorg", "rabaz", "Baiduspider",
"Feedfetcher-Google", "TechnoratiSnoop", "Rankivabot", "Mediapartners-Google", "Sogou web spider",
"WebAlta Crawler", "MJ12bot", "Yandex/", "YandexBot", "YaDirectBot", "StackRambler", "DotBot", "dotbot",
"AhrefsBot", "Mail.RU_Bot", "YandexDirect", "Twitterbot", "PaperLiBot", "bingbot", "Ezooms", 'SiteExplorer'
]
class VisitorHitMiddleware(MiddlewareMixin):
def process_request(self, request):
if request.is_ajax():
return
if request.path.startswith(setting('ADMIN_SYSTEM_PREFIX', '/admin/')):
return
# see if the user agent is not supposed to be tracked
user_agent = request.META.get('HTTP_USER_AGENT', '')[:255]
for ua in UNTRACKED_USER_AGENT:
# if the keyword is found in the user agent, stop tracking
if user_agent.find(ua) != -1:
return
v = VisitorHit()
if request.user.is_authenticated:
v.user = request.user
v.user_agent = user_agent
v.ip = request.META.get('REMOTE_ADDR', '')
v.session_key = get_session_from_request(request)
v.secure = request.is_secure()
v.referer = request.META.get('HTTP_REFERER', '')
v.hostname = request.META.get('REMOTE_HOST', '')[:100]
v.url = request.get_full_path()
v.date = now()
v.save()
|
nnmware/nnmware
|
core/middleware.py
|
Python
|
gpl-3.0
| 2,844
|
[
"Firefly",
"Galaxy"
] |
9d938a2bd01da6ddcde85b837c69293506970adbd2b20112c914e62182e4b630
|
"""
Acceptance tests for grade settings in Studio.
"""
from __future__ import absolute_import
from bok_choy.promise import EmptyPromise
from six.moves import range
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.studio.settings_graders import GradingPage
from common.test.acceptance.tests.studio.base_studio_test import StudioCourseTest
class GradingPageTest(StudioCourseTest):
"""
Bockchoy tests to add/edit grade settings in studio.
"""
shard = 13
url = None
GRACE_FIELD_CSS = "#course-grading-graceperiod"
def setUp(self): # pylint: disable=arguments-differ
super(GradingPageTest, self).setUp()
self.grading_page = GradingPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.grading_page.visit()
self.ensure_input_fields_are_loaded()
def ensure_input_fields_are_loaded(self):
"""
Ensures values in input fields are loaded.
"""
EmptyPromise(
lambda: self.grading_page.q(css=self.GRACE_FIELD_CSS).attrs('value')[0],
"Waiting for input fields to be loaded"
).fulfill()
def populate_course_fixture(self, course_fixture):
"""
Return a test course fixture.
"""
course_fixture.add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
)
)
)
def test_add_grade_range(self):
"""
Scenario: Users can add grading ranges
Given I have opened a new course in Studio
And I am viewing the grading settings
When I add "1" new grade
Then I see I now have "3"
"""
length = self.grading_page.total_number_of_grades
self.grading_page.click_add_grade()
self.assertTrue(self.grading_page.is_grade_added(length))
self.grading_page.save()
self.grading_page.refresh_and_wait_for_load()
total_number_of_grades = self.grading_page.total_number_of_grades
self.assertEqual(total_number_of_grades, 3)
def test_staff_can_add_up_to_five_grades_only(self):
"""
Scenario: Users can only have up to 5 grading ranges
Given I have opened a new course in Studio
And I am viewing the grading settings
When I try to add more than 5 grades
Then I see I have only "5" grades
"""
for grade_ordinal in range(1, 5):
length = self.grading_page.total_number_of_grades
self.grading_page.click_add_grade()
# By default page has 2 grades, so greater than 3 means, attempt is made to add 6th grade
if grade_ordinal > 3:
self.assertFalse(self.grading_page.is_grade_added(length))
else:
self.assertTrue(self.grading_page.is_grade_added(length))
self.grading_page.save()
self.grading_page.refresh_and_wait_for_load()
total_number_of_grades = self.grading_page.total_number_of_grades
self.assertEqual(total_number_of_grades, 5)
def test_grades_remain_consistent(self):
"""
Scenario: When user removes a grade the remaining grades should be consistent
Given I have opened a new course in Studio
And I am viewing the grading settings
When I add "2" new grade
Then Grade list has "A,B,C,F" grades
And I delete a grade
Then Grade list has "A,B,F" grades
"""
for _ in range(2):
length = self.grading_page.total_number_of_grades
self.grading_page.click_add_grade()
self.assertTrue(self.grading_page.is_grade_added(length))
self.grading_page.save()
grades_alphabets = self.grading_page.grade_letters
self.assertEqual(grades_alphabets, ['A', 'B', 'C', 'F'])
self.grading_page.remove_grades(1)
self.grading_page.save()
grades_alphabets = self.grading_page.grade_letters
self.assertEqual(grades_alphabets, ['A', 'B', 'F'])
def test_staff_can_delete_grade_range(self):
"""
Scenario: Users can delete grading ranges
Given I have opened a new course in Studio
And I am viewing the grading settings
When I add "1" new grade
And I delete a grade
Then I see I now have "2" grades
"""
length = self.grading_page.total_number_of_grades
self.grading_page.click_add_grade()
self.assertTrue(self.grading_page.is_grade_added(length))
self.grading_page.save()
total_number_of_grades = self.grading_page.total_number_of_grades
self.assertEqual(total_number_of_grades, 3)
self.grading_page.remove_grades(1)
total_number_of_grades = self.grading_page.total_number_of_grades
self.assertEqual(total_number_of_grades, 2)
def test_staff_can_move_grading_ranges(self):
"""
Scenario: Users can move grading ranges
Given I have opened a new course in Studio
And I am viewing the grading settings
When I move a grading section
Then I see that the grade range has changed
"""
grade_ranges = self.grading_page.grades_range
self.assertIn('0-50', grade_ranges)
self.grading_page.drag_and_drop_grade()
grade_ranges = self.grading_page.grades_range
self.assertIn(
'0-3',
grade_ranges,
u'expected range: 0-3, not found in grade ranges:{}'.format(grade_ranges)
)
def test_settings_are_persisted_on_save_only(self):
"""
Scenario: Settings are only persisted when saved
Given I have populated a new course in Studio
And I am viewing the grading settings
When I change assignment type "Homework" to "New Type"
Then I do not see the changes persisted on refresh
"""
self.grading_page.change_assignment_name('Homework', 'New Type')
self.grading_page.refresh_and_wait_for_load()
self.assertIn('Homework', self.grading_page.get_assignment_names)
def test_settings_are_reset_on_cancel(self):
"""
Scenario: Settings are reset on cancel
Given I have populated a new course in Studio
And I am viewing the grading settings
When I change assignment type "Homework" to "New Type"
And I press the "Cancel" notification button
Then I see the assignment type "Homework"
"""
self.grading_page.change_assignment_name('Homework', 'New Type')
self.grading_page.cancel()
assignment_names = self.grading_page.get_assignment_names
self.assertIn('Homework', assignment_names)
def test_confirmation_is_shown_on_save(self):
"""
Scenario: Confirmation is shown on save
Given I have populated a new course in Studio
And I am viewing the grading settings
When I change assignment type "Homework" to "New Type"
And I press the "Save" notification button
Then I see a confirmation that my changes have been saved
"""
self.grading_page.change_assignment_name('Homework', 'New Type')
self.grading_page.save()
confirmation_message = self.grading_page.confirmation_message
self.assertEqual(confirmation_message, 'Your changes have been saved.')
def test_staff_can_set_weight_to_assignment(self):
"""
Scenario: Users can set weight to Assignment types
Given I have opened a new course in Studio
And I am viewing the grading settings
When I add a new assignment type "New Type"
And I set the assignment weight to "7"
And I press the "Save" notification button
Then the assignment weight is displayed as "7"
And I reload the page
Then the assignment weight is displayed as "7"
"""
self.grading_page.add_new_assignment_type()
self.grading_page.change_assignment_name('', 'New Type')
self.grading_page.set_weight('New Type', '7')
self.grading_page.save()
assignment_weight = self.grading_page.get_assignment_weight('New Type')
self.assertEqual(assignment_weight, '7')
self.grading_page.refresh_and_wait_for_load()
assignment_weight = self.grading_page.get_assignment_weight('New Type')
self.assertEqual(assignment_weight, '7')
def test_staff_cannot_save_invalid_settings(self):
"""
Scenario: User cannot save invalid settings
Given I have populated a new course in Studio
And I am viewing the grading settings
When I change assignment type "Homework" to ""
Then the save notification button is disabled
"""
self.grading_page.change_assignment_name('Homework', '')
self.assertTrue(self.grading_page.is_notification_button_disbaled(), True)
def test_edit_highest_grade_name(self):
"""
Scenario: User can edit grading range names
Given I have populated a new course in Studio
And I am viewing the grading settings
When I change the highest grade range to "Good"
And I press the "Save" notification button
And I reload the page
Then I see the highest grade range is "Good"
"""
self.grading_page.edit_grade_name('Good')
self.grading_page.save()
self.grading_page.refresh_and_wait_for_load()
grade_name = self.grading_page.highest_grade_name
self.assertEqual(grade_name, 'Good')
def test_staff_cannot_edit_lowest_grade_name(self):
"""
Scenario: User cannot edit failing grade range name
Given I have populated a new course in Studio
And I am viewing the grading settings
Then I cannot edit the "Fail" grade range
"""
self.grading_page.try_edit_fail_grade('Failure')
self.assertNotEqual(self.grading_page.lowest_grade_name, 'Failure')
|
ESOedX/edx-platform
|
common/test/acceptance/tests/studio/test_studio_grading.py
|
Python
|
agpl-3.0
| 10,392
|
[
"VisIt"
] |
6a8472950dfa5f1fd6fe3a9120f0bc52608546db1230e70bc4316bb0696f2c40
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
"""
from PIL import ImageEnhance
from nupicvision.regions.ImageSensorFilters.BaseFilter import BaseFilter
class GaussianBlur(BaseFilter):
"""
Apply a Gaussian blur to the image.
"""
def __init__(self, level=1):
"""
@param level -- Number of times to blur.
"""
BaseFilter.__init__(self)
self.level = level
def process(self, image):
"""
@param image -- The image to process.
Returns a single image, or a list containing one or more images.
"""
BaseFilter.process(self, image)
mask = image.split()[1]
for i in xrange(self.level):
sharpness_enhancer = ImageEnhance.Sharpness(image.split()[0])
image = sharpness_enhancer.enhance(0.0)
image.putalpha(mask)
return image
|
neuroidss/nupic.vision
|
nupicvision/regions/ImageSensorFilters/GaussianBlur.py
|
Python
|
gpl-3.0
| 1,746
|
[
"Gaussian"
] |
fae41f80e0df21d4654f056617ac31a17ec2efaeace1a87f72517e3ee1cbe9b1
|
#!/usr/bin/env python
#
# GPytage config.py module
#
############################################################################
# Copyright (C) 2008-2010 by Kenneth Prugh, Brian Dolbec #
# ken69267@gmail.com #
# #
# This program is free software; you can redistribute it and#or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation under version 2 of the license. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the #
# Free Software Foundation, Inc., #
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #
############################################################################
from sys import exit, stderr
config_files = ['bashrc', 'categories', 'color.map', 'mirrors', 'modules', \
'package.keywords', 'package.license', 'package.mask',\
'package.properties', 'package.unmask', 'package.use', 'repos.conf',\
'profile', 'sets']
test_path = '/etc/testportage/'
try: # >=portage 2.1 modules
import portage
import portage.const
except ImportError, e:
print >>stderr, "Portage Import Error: ", e
exit('Could not find portage module.\n'
'Are you sure this is a Gentoo system?')
print >>stderr, ("Config: portage version = " + portage.VERSION)
config_path = "/" + portage.const.USER_CONFIG_PATH + "/"
PORTDIR=portage.config(clone=portage.settings).environ()['PORTDIR']
# house cleaning no longer needed imports
del portage
def set_test_path():
global config_path, test_path
config_path = test_path
print "CONFIG: new config_path = " + config_path
def get_config_path():
return config_path
|
Ken69267/gpytage
|
gpytage/config.py
|
Python
|
gpl-2.0
| 2,419
|
[
"Brian"
] |
aab682be0dc448f9e6644212a996233b510959d6948a35749f8b7a3e630942a0
|
import numpy as np
import ephem
from scipy.stats import norm
def gaussian(x, a, b, c, d):
val = d + (a * np.exp(-(x - b)**2 / c**2))
return val
def fehprior(feh):
_fehprior = (0.8/0.15*np.exp(-0.5*(feh-0.016)**2./0.15**2.)
+0.2/0.22*np.exp(-0.5*(feh+0.15)**2./0.22**2.))
return _fehprior
def avprior(av_in,data,i,dust,dist):
# conversion from E(B-V) to Av from Schlafly & Finkbeiner
fac = 2.742
ebv_in = av_in/fac
dm = 5.*np.log10(dist)-5.
ra = data['ra'][i]*np.pi/180.
dec = data['dec'][i]*np.pi/180.
equ = ephem.Equatorial(ra, dec, epoch=ephem.J2000)
gal = ephem.Galactic(equ)
dsquared = ((gal.lon*180./np.pi - dust['lon'])**2
+ (gal.lat*180./np.pi - dust['lat'])**2 )
pos = np.argmin(dsquared)
ebv = dust['vals'][pos,:,:]
dms =np.arange(4.0,19.5,0.5)
ebvs = np.zeros(20)
for j in range(0,len(ebvs)):
ebvs[j] = np.interp(dm,dms,ebv[j,:])
med = np.median(ebvs)
std = np.std(ebvs)
#print gal.lon*180./np.pi,gal.lat*180./np.pi,dm,med
#pdb.set_trace()
return fac * gaussian(ebv_in,1.,np.median(ebvs),np.std(ebvs),0.)
def getav(data,i,dust,dist):
# conversion from E(B-V) to Av from Schlafly & Finkbeiner
fac = 2.742
dm = 5.*np.log10(dist)-5.
ra = data['ra'][i]*np.pi/180.
dec = data['dec'][i]*np.pi/180.
equ = ephem.Equatorial(ra, dec, epoch=ephem.J2000)
gal = ephem.Galactic(equ)
dsquared = ((gal.lon*180./np.pi - dust['lon'])**2
+ (gal.lat*180./np.pi - dust['lat'])**2 )
pos = np.argmin(dsquared)
ebv = dust['vals'][pos,:,:]
dms = np.arange(4.0,19.5,0.5)
ebvs = np.zeros(20)
for j in range(0,len(ebvs)):
ebvs[j] = np.interp(dm,dms,ebv[j,:])
med = np.median(ebvs)
std = np.std(ebvs)
#print gal.lon*180./np.pi,gal.lat*180./np.pi,dm,med
#pdb.set_trace()
#return fac*gaussian(ebv_in,1.,np.median(ebvs),np.std(ebvs),0.)
return med,std
|
danxhuber/isoclassify
|
isoclassify/grid/priors.py
|
Python
|
mit
| 1,993
|
[
"Gaussian"
] |
441bf32fac3692aba3519bb806395c495e8696b1b71cb500d50821412a49cf79
|
#!/usr/bin/env python
import numpy
import netCDF4
import argparse
import os
def parseCommandLine():
"""
Parse the command line and invoke operations.
"""
parser = argparse.ArgumentParser(description=
'''
Reads WOA05 ascii files and writes as netcdf.
''',
epilog='Written by A.Adcroft, 2014. No support offered.')
parser.add_argument('inFile', type=str,
metavar='ASCII_FILE',
help='''Ascii file obtained from ftp://ftp.nodc.noaa.gov/pub/WOA05/DATA/ .''')
parser.add_argument('outFile', type=str,
metavar='NETCDF_FILE',
help='''netCDF file to construct.''')
cla = parser.parse_args()
data = readAscii(cla.inFile)
writeNetcdf(cla.outFile, data, os.path.split(cla.inFile)[1][0])
def readAscii(inFile):
"""
Reads the ascii file format and constructs a netcdf file with the same data.
"""
N = 180*360*33 # Largest size of dataset in WOA05
data = numpy.zeros(N)
n = 0
for line in open(inFile, 'r'):
s = 0
while s<80:
data[n] = float(line[s:s+8])
n += 1
s += 8
print '\rRead %i/%i (%.2f%%)'%(n,N,100.*n/N),'from',inFile,
nk = n/(360*180)
print ' ...',nk,'levels read'
data = data.reshape((33,180,360))
return data[:nk] # Removes unfilled levels
def writeNetcdf(outFile, data, varId):
"""
Writes data to a 1x1x33 grid netcdf file, outFile.
varId is a single character indicating the variable (t,s,o,n,p or i).
"""
nk = data.shape[0]
# Axis data
dl = 360./data.shape[-1]
lat = numpy.arange(-90.+0.5*dl,90.,dl)
lon = numpy.arange(0.5*dl,360.,dl)
depth = numpy.array([0,10,20,30,50,75,
100,125,150,200,250,300,400,500,600,700,800,900,
1000,1100,1200,1300,1400,1500,1750,2000,2500,3000,3500,
4000,4500,5000,5500,6000,6500,7000,7500,8000,8500,9000])
depth = depth[:nk]
rg = netCDF4.Dataset(outFile, 'w', format='NETCDF3_CLASSIC')
# netCDF dimensions
dLon = rg.createDimension('lon', lon.shape[0])
dLat = rg.createDimension('lat', lat.shape[0])
dDepth = rg.createDimension('depth', depth.shape[0])
# netCDF variables
vLon = rg.createVariable('lon','f4',('lon'))
vLat = rg.createVariable('lat','f4',('lat'))
vDepth = rg.createVariable('depth','f4',('depth'))
vData = rg.createVariable({'t':'temperature',
's':'salinity'
}[varId], 'f4',('depth','lat','lon'))
# netCDF attributes
rg.source = 'http://www.nodc.noaa.gov/OC5/WOA05/pr_woa05.html'
rg.Conventions = 'CF-1.4'
rg.title = 'World Ocean Atlas 2005'
rg.reference = 'ftp://ftp.nodc.noaa.gov/pub/WOA05/DOC/woa05documentation.pdf'
vLon.long_name = 'Longitude'
vLon.standard_name = 'longitude'
vLon.units = 'degrees east'
vLon.axis = 'X'
vLon.description = 'Longitude Range: 0 to 360 degrees with 0 at Greenwich Meridian and increasing towards the east.'
vLat.long_name = 'Latitude'
vLat.standard_name = 'latitude'
vLat.units = 'degrees north'
vLat.axis = 'Y'
vLat.description = 'Latitude Range: -90 to 90 degrees with 0 at the Equator and increasing towards the north.'
vDepth.long_name = 'Depth'
vDepth.standard_name = 'depth'
vDepth.units = 'm'
vDepth.axis = 'Z'
vDepth.positive = 'down'
vDepth.description = 'Standard depth levels'
vData.long_name = {'t':'Objectively Analyzed Climatology of In-situ temperature',
's':'Objectively Analyzed Climatology of Practical Salinity',
'o':'Objectively Analyzed Climatology of Disolved Oxygen'
}[varId]
vData.standard_name = {'t':'sea_water_temperature',
's':'sea_water_salinity',
'o':'sea_water_disolved_oxygen'
}[varId]
vData.definition = {'t':'In-situ temperature',
's':'Practical salinity',
'o':'Dissolved oxygen'
}[varId]
vData.units = {'t':'degrees Celsius',
's':'psu',
'o':'ml/l'
}[varId]
vData.missing_value = -99.9999
vData.references = {'t':'Locarnini, R. A., A. V. Mishonov, J. I. Antonov, T. P. Boyer, and H. E. Garcia, 2006. World Ocean Atlas 2005, Volume 1: Temperature. S. Levitus, Ed. NOAA Atlas NESDIS 61, U.S. Government Printing Office, Washington, D.C., 182 pp.',
's':'Antonov, J. I., R. A. Locarnini, T. P. Boyer, A. V. Mishonov, and H. E. Garcia, 2006. World Ocean Atlas 2005, Volume 2: Salinity. S. Levitus, Ed. NOAA Atlas NESDIS 62, U.S. Government Printing Office, Washington, D.C., 182 pp.',
'o':'Garcia, H. E., R. A. Locarnini, T. P. Boyer, and J. I. Antonov, 2006. World Ocean Atlas 2005, Volume 3: Dissolved Oxygen, Apparent Oxygen Utilization, and Oxygen Saturation. S. Levitus, Ed. NOAA Atlas NESDIS 63, U.S. Government Printing Office, Washington, D.C., 342 pp.'
}[varId]
vData.documentation = {'t':'ftp://ftp.nodc.noaa.gov/pub/WOA05/DOC/woa05_temperature_final.pdf',
's':'ftp://ftp.nodc.noaa.gov/pub/WOA05/DOC/woa05_salinity_final.pdf',
'o':'ftp://ftp.nodc.noaa.gov/pub/WOA05/DOC/woa05_oxygen_final.pdf'
}[varId]
vData.notes = {'t':'This variable is assumed to be in-situ temperature and not potential temperature because the documentation does not make any explicit statements to the contrary.',
's':'Units are not documented but assumed to be psu.',
'o':''
}[varId]
# Write the data
vLon[:] = lon
vLat[:] = lat
vDepth[:] = depth
vData[:] = data
rg.close()
# Invoke parseCommandLine(), the top-level prodedure
if __name__ == '__main__': parseCommandLine()
|
adcroft/convert_WOA05
|
python/WOA05_to_netcdf.py
|
Python
|
mit
| 5,973
|
[
"NetCDF"
] |
0dd176714cfdb515f04c0585d15542823715708e2094e30e2b8c7ab649b193d3
|
#!/usr/bin/env python
'''
TrainNetwork.py
Written by Jeff Berry Jul 1 2010
Revised by Gus Hahn-Powell March 8 2014
purpose:
Train a translational Deep Belief Network for tracing. The training
data should be arranged in a folder called Subject<N>, where N is any
positive integer, such as Subject1. Images are located in Subject1/IMAGES/
and should be .jpg or .png files. The traces are in Subject1/TongueContours.csv,
which can be created using AutoTrace.py or configdir.py. Parameters
defining the region of interest should be listed in Subject1/ROI_config.txt,
otherwise a default ROI will be used. The resulting tDBN will be located
in savefiles/network<time>.mat.
usage:
python TrainNetwork2.py
---------------------------------------------
Modified by Jeff Berry Feb 19 2011
reason:
Updated to make use of ROI_config.txt, which should be in the same
folder as JPG/ and TongueContours.csv
'''
import sys, os
import subprocess as sp
import re
import smtplib
try:
import pygtk
pygtk.require("2.0")
except:
pass
try:
import gtk
import gtk.glade
except:
sys.exit(1)
subject_number_pattern = re.compile("Subject([0-9]+)",re.IGNORECASE)
class TrainNetwork:
"""This is the class for the main window of trainnetwork.py"""
def __init__(self):
#Set the Glade file
self.gladefile = "TrainNetwork2.glade"
self.wTree = gtk.glade.XML(self.gladefile, "mainWindow")
#Create dictionary of callbacks and connect
dic = { "on_mainWindow_destroy" : gtk.main_quit,
"on_open_activate" : self.open_settings,
"on_save_activate" : self.on_save,
"on_save_as_activate" : self.save_settings,
"on_quit_activate" : gtk.main_quit,
"on_add_data_activate" : self.add_data,
"on_crossvalbutton_toggled" : self.set_crossval,
"on_maximgbutton_toggled" : self.practice_run,
"on_config_layers" : self.config_layers,
"on_reset_default_activate" : self.reset_network,
"on_crossvalbutton_toggled" : self.set_crossval,
"on_train_clicked" : self.train,
"on_cancel_clicked" : self.cancel_training,
"on_notify_toggled" : self.set_notify }
self.wTree.signal_autoconnect(dic)
#Set default values
self.set_defaults()
#set status for grayed out entry boxes
self.maximgtextentry = self.wTree.get_widget("maximgtextentry")
self.crossvaltextentry = self.wTree.get_widget("crossvaltextentry")
self.maximgtextentry.set_text(str(self.MAX_NUM_IMAGES))
self.maximgtextentry.set_sensitive(False)
self.crossvaltextentry.set_text(str(self.NFOLDS))
self.crossvaltextentry.set_sensitive(False)
#Get widgets for settings functions
self.maximgbutton = self.wTree.get_widget("maximgbutton")
self.crossvalbutton = self.wTree.get_widget("crossvalbutton")
#Callback definitions
def set_defaults(self):
self.MAX_NUM_IMAGES = 100
self.NFOLDS = 5
self.PRACTICE_RUN = False
self.USE_CROSSVAL = False
self.USE_DEFAULT_NET = True
self.TRAIN_ULTRASOUND = True
self.TRAIN_CONTOURS = True
self.TRAIN_AUDIO = False
self.TRAIN_VELUM = False
self.TRAIN_GLOTTIS = False
self.TRAIN_LIPS = False
self.TRAIN_TRANSCRIPTION = False
self.TEST_ULTRASOUND = True
self.TEST_CONTOURS = False
self.TEST_AUDIO = False
self.TEST_VELUM = False
self.TEST_GLOTTIS = False
self.TEST_LIPS = False
self.TEST_TRANSCRIPTION = False
self.LAYER_SIZES = "[size(trainX,2), size(XC,2), size(XC,2), size(XC,2)*5]"
self.LAYER_TYPES = "{'gaussian', 'sigmoid', 'sigmoid', 'sigmoid'}"
self.SAVEFILE = ""
self.DATA_DIR = ""
self.SUBJECT_NUMS = ""
self.NOTIFY = False
def open_settings(self, widget):
"""A file chooser called when user clicks Open"""
fc = gtk.FileChooserDialog(title='Open Settings File...', parent=None,
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
g_directory = fc.get_current_folder() if fc.get_current_folder() else os.path.expanduser("~")
fc.set_current_folder(g_directory)
fc.set_default_response(gtk.RESPONSE_OK)
ffilter = gtk.FileFilter()
ffilter.set_name('Settings Files')
ffilter.add_pattern('*.txt')
fc.add_filter(ffilter)
response = fc.run()
if response == gtk.RESPONSE_OK:
settings = fc.get_filename()
g_directory = fc.get_current_folder()
self.load_settings(settings)
fc.destroy()
def load_settings(self, settings):
f = open(settings, 'r').readlines()
dic = {}
for i in range(len(f)):
line2list = f[i][:-1].split('=')
if len(line2list) == 2:
dic[line2list[0]] = line2list[1]
if dic.has_key("MAX_NUM_IMAGES"):
self.MAX_NUM_IMAGES = int(dic["MAX_NUM_IMAGES"])
self.maximgtextentry.set_text(str(self.MAX_NUM_IMAGES))
if dic.has_key("NFOLDS"):
self.NFOLDS = int(dic["NFOLDS"])
self.crossvaltextentry.set_text(str(self.NFOLDS))
if dic.has_key("PRACTICE_RUN"):
self.maximgbutton.set_active(self.parseBool(dic["PRACTICE_RUN"]))
if dic.has_key("USE_CROSSVAL"):
self.crossvalbutton.set_active(self.parseBool(dic["USE_CROSSVAL"]))
if dic.has_key("USE_DEFAULT_NET"):
self.USE_DEFAULT_NET = self.parseBool(dic["USE_DEFAULT_NET"])
if dic.has_key("LAYER_SIZES"):
self.LAYER_SIZES = dic["LAYER_SIZES"]
if dic.has_key("LAYER_TYPES"):
self.LAYER_TYPES = dic["LAYER_TYPES"]
if dic.has_key("DATA_DIR"):
self.DATA_DIR = dic["DATA_DIR"]
if dic.has_key("SUBJECT_NUMS"):
self.SUBJECT_NUMS = dic["SUBJECT_NUMS"]
def parseBool(self, inputstr):
return inputstr[0].upper() == 'T'
def save_settings(self, widget):
fc = gtk.FileChooserDialog(title='Save Settings File...', parent=None,
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
g_directory = fc.get_current_folder() if fc.get_current_folder() else os.path.expanduser("~")
fc.set_current_folder(g_directory)
fc.set_default_response(gtk.RESPONSE_OK)
fc.set_do_overwrite_confirmation(True)
ffilter = gtk.FileFilter()
ffilter.set_name('Settings Files')
ffilter.add_pattern('*.txt')
fc.add_filter(ffilter)
response = fc.run()
if response == gtk.RESPONSE_OK:
self.SAVEFILE = fc.get_filename()
g_directory = fc.get_current_folder()
self.save(self.SAVEFILE)
fc.destroy()
def on_save(self, widget):
if self.SAVEFILE != "":
self.save(self.SAVEFILE)
else:
self.save_settings(widget)
def save(self, filename):
f = open(filename, 'w')
f.write("MAX_NUM_IMAGES=%d\n" %self.MAX_NUM_IMAGES)
f.write("NFOLDS=%d\n" %self.NFOLDS)
f.write("PRACTICE_RUN=%s\n" %str(self.PRACTICE_RUN))
f.write("USE_CROSSVAL=%s\n" %str(self.USE_CROSSVAL))
f.write("USE_DEFAULT_NET=%s\n" %str(self.USE_DEFAULT_NET))
f.write("TRAIN_ULTRASOUND=%s\n" %str(self.TRAIN_ULTRASOUND))
f.write("TRAIN_CONTOURS=%s\n" %str(self.TRAIN_CONTOURS))
f.write("TRAIN_AUDIO=%s\n" %str(self.TRAIN_AUDIO))
f.write("TRAIN_VELUM=%s\n" %str(self.TRAIN_VELUM))
f.write("TRAIN_GLOTTIS=%s\n" %str(self.TRAIN_GLOTTIS))
f.write("TRAIN_LIPS=%s\n" %str(self.TRAIN_LIPS))
f.write("TRAIN_TRANSCRIPTION=%s\n" %str(self.TRAIN_TRANSCRIPTION))
f.write("TEST_ULTRASOUND=%s\n" %str(self.TEST_ULTRASOUND))
f.write("TEST_CONTOURS=%s\n" %str(self.TEST_CONTOURS))
f.write("TEST_AUDIO=%s\n" %str(self.TEST_AUDIO))
f.write("TEST_VELUM=%s\n" %str(self.TEST_VELUM))
f.write("TEST_GLOTTIS=%s\n" %str(self.TEST_GLOTTIS))
f.write("TEST_LIPS=%s\n" %str(self.TEST_LIPS))
f.write("TEST_TRANSCRIPTION=%s\n" %str(self.TEST_TRANSCRIPTION))
f.write("LAYER_SIZES=%s\n" %self.LAYER_SIZES)
f.write("LAYER_TYPES=%s\n" %self.LAYER_TYPES)
f.write("DATA_DIR=%s\n" %self.DATA_DIR)
f.write("SUBJECT_NUMS=%s\n" %self.SUBJECT_NUMS)
f.close()
def get_max_images(self):
if self.PRACTICE_RUN == True:
self.MAX_NUM_IMAGES = int(self.maximgtextentry.get_text())
return self.MAX_NUM_IMAGES
def get_nFolds(self):
if self.USE_CROSSVAL == True:
self.NFOLDS = int(self.crossvaltextentry.get_text())
return self.NFOLDS
def set_crossval(self, widget):
"""Called when user clicks Cross validation in Configure menu"""
if self.USE_CROSSVAL == False:
self.USE_CROSSVAL = True
self.crossvaltextentry.set_sensitive(True)
else:
self.USE_CROSSVAL = False
self.crossvaltextentry.set_sensitive(False)
def practice_run(self, widget):
"""Called when user clicks Practice run in Configure menu
Changes PRACTICE_RUN value and prompts user to set max images
"""
if self.PRACTICE_RUN == False:
self.PRACTICE_RUN = True
self.maximgtextentry.set_sensitive(True)
else:
self.PRACTICE_RUN = False
self.maximgtextentry.set_sensitive(False)
def reset_network(self, widget):
self.USE_DEFAULT_NET = True
self.LAYER_SIZES = "[size(trainX,2), size(XC,2), size(XC,2), size(XC,2)*5]"
self.LAYER_TYPES = "{'gaussian', 'sigmoid', 'sigmoid', 'sigmoid'}"
def config_layers(self, widget):
configDlg = ConfigLayersDialog()
result, layers = configDlg.run()
if result == gtk.RESPONSE_OK:
self.USER_NETWORK = layers
self.USE_DEFAULT_NET = False
def get_network_config(self):
if self.USE_DEFAULT_NET == False:
lsizes = []
ltypes = []
for i in range(len(self.USER_NETWORK)):
lsizes.append(self.USER_NETWORK[i][0])
ltypes.append(self.USER_NETWORK[i][1])
sizestr = ', '.join(lsizes)
typestr = "', '".join(ltypes)
self.LAYER_SIZES = "[" + sizestr + "]"
self.LAYER_TYPES = "{'" + typestr + "'}"
def set_notify(self, widget):
if self.NOTIFY == False:
self.NOTIFY = True
else:
self.NOTIFY = False
def train(self, widget):
"""This calls the matlab scripts and passes the parameters to them"""
args = self.parse_args()
#argstr = "cd ~/autotracer/trunk/TrainNetwork/; TrainNetwork" + args + "; quit()"
argstr = "TrainNetwork" + args + "; quit()"
print argstr
cmd = ['matlab', '-nodesktop', '-nosplash', '-r', argstr]
self.proc = sp.Popen(cmd)
self.proc.wait()
if self.NOTIFY:
self.send_notification(argstr)
def send_notification(self, argstr):
print "done"
senderentry = self.wTree.get_widget("senderentry")
passwordentry = self.wTree.get_widget("passwordentry")
recipiententry = self.wTree.get_widget("recipiententry")
FROMADDR = senderentry.get_text()
LOGIN = FROMADDR
PASSWORD = passwordentry.get_text()
TOADDRS = [recipiententry.get_text()]
SUBJECT = "Network finished training"
msg = ("From: %s\r\nTo: %s\r\nSubject: %s\r\n\r\n"
% (FROMADDR, ", ".join(TOADDRS), SUBJECT) )
msg += "done\r\n" + argstr
server = smtplib.SMTP('smtp.gmail.com', 587)
server.set_debuglevel(1)
server.ehlo()
server.starttls()
server.login(LOGIN, PASSWORD)
server.sendmail(FROMADDR, TOADDRS, msg)
server.quit()
def cancel_training(self, widget):
"""Stops the matlab training scripts"""
self.proc.send_signal(15)
def parse_args(self):
"""This function is run when the user presses 'Go', and figures out all the settings from the buttons.
For now we'll ignore all the training and test params, except ultrasound and contours
function will need the following input args:
(bool train_ultrasound, bool train_contours, bool test_ultrasound, bool test_contours,
bool practice_run, int max_images, bool use_crossval, int nfolds,
str network_sizes, str network_types, str data_dir, str subject_nums)"""
args = []
args.append(str(self.TRAIN_ULTRASOUND).lower())
args.append(str(self.TRAIN_CONTOURS).lower())
args.append(str(self.TEST_ULTRASOUND).lower())
args.append(str(self.TEST_CONTOURS).lower())
args.append(str(self.PRACTICE_RUN).lower())
if self.PRACTICE_RUN == True:
args.append(self.get_max_images())
else:
args.append('Inf')
args.append(str(self.USE_CROSSVAL).lower())
args.append(self.get_nFolds())
self.get_network_config()
args.append("'" + self.LAYER_SIZES + "'")
args.append(self.LAYER_TYPES)
args.append("'" + self.DATA_DIR + "'")
args.append(self.SUBJECT_NUMS)
self.pathtofiles = self.DATA_DIR + "/Subject" + self.SUBJECT_NUMS[1] + '/'
self.config = self.pathtofiles + 'ROI_config.txt'
print self.config
if (os.path.isfile(self.config)):
print "Found ROI_config.txt"
c = open(self.config, 'r').readlines()
top = int(c[1][:-1].split('\t')[1])
bottom = int(c[2][:-1].split('\t')[1])
left = int(c[3][:-1].split('\t')[1])
right = int(c[4][:-1].split('\t')[1])
print "using ROI: [%d:%d, %d:%d]" % (top, bottom, left, right)
else:
print "ROI_config.txt not found"
top = 140 #default settings for the Sonosite Titan
bottom = 320
left = 250
right = 580
print "using ROI: [%d:%d, %d:%d]" % (top, bottom, left, right)
args.append([top, bottom, left, right])
argstrlist = []
for i in range(len(args)):
print args[i]
argstrlist.append(str(args[i]))
argstr = "(" + ", ".join(argstrlist) + ")"
return argstr
def add_data(self, widget):
fc = gtk.FileChooserDialog(title='Select Subject<N> folders to use...', parent=None,
action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
g_directory = fc.get_current_folder() if fc.get_current_folder() else os.path.expanduser("~")
fc.set_current_folder(g_directory)
fc.set_default_response(gtk.RESPONSE_OK)
fc.set_select_multiple(True)
response = fc.run()
if response == gtk.RESPONSE_OK:
dirs = fc.get_filenames()
g_directory = fc.get_current_folder()
self.getSubjectNums(dirs)
fc.destroy()
def getSubjectNums(self, dirlist):
subjectNums = [re.search(subject_number_pattern, subject_folder).group(1) for subject_folder in dirlist if re.search(subject_number_pattern, subject_folder)]
self.SUBJECT_NUMS = "[" + ", ".join(subjectNums) + "]"
self.DATA_DIR = os.path.dirname(dirlist[0])
class ConfigLayersDialog:
"""This class is used to get sizes and types of layers of the network"""
def __init__(self):
self.gladefile = "TrainNetwork.glade"
def run(self):
self.wTree = gtk.glade.XML(self.gladefile, "ConfigureNetwork")
dic = {"on_tbAddLayer_clicked" : self.onAddLayer }
self.wTree.signal_autoconnect(dic)
self.dlg = self.wTree.get_widget("ConfigureNetwork")
self.tView = self.wTree.get_widget("treeview1")
self.cSize = 0
self.cType = 1
self.sSize = "Size"
self.sType = "Type"
self.AddColumn(self.sSize, self.cSize)
self.AddColumn(self.sType, self.cType)
self.LayerList = gtk.ListStore(str, str)
self.tView.set_model(self.LayerList)
#This will be the returned data
self.layers = []
self.result = self.dlg.run()
self.dlg.destroy()
return self.result, self.layers
def AddColumn(self, title, columnId):
column = gtk.TreeViewColumn(title, gtk.CellRendererText(), text=columnId)
column.set_resizable(True)
column.set_sort_column_id(columnId)
self.tView.append_column(column)
def onAddLayer(self, widget):
layerDlg = layerDialog()
result, newLayer = layerDlg.run()
if (result == gtk.RESPONSE_OK):
self.LayerList.append(newLayer.getList())
self.layers.append(newLayer.getList())
class layerDialog:
"""This class shows the dialog for adding layers to the network"""
def __init__(self, size="", ltype=""):
self.gladefile = "TrainNetwork.glade"
self.layer = Layer(size, ltype)
def run(self):
self.wTree = gtk.glade.XML(self.gladefile, "AddLayer")
self.dlg = self.wTree.get_widget("AddLayer")
self.enSize = self.wTree.get_widget("enSize")
self.enSize.set_text(self.layer.size)
self.enTypeCB = self.wTree.get_widget("enTypeCB")
self.result = self.dlg.run()
self.layer.size = self.enSize.get_text()
self.layer.ltype = self.enTypeCB.get_active_text()
self.dlg.destroy()
return self.result, self.layer
class Layer:
"""This class holds the layer information"""
def __init__(self, size="", ltype=""):
self.size = size
self.ltype = ltype
def getList(self):
return [self.size, self.ltype]
if __name__ == "__main__":
TrainNetwork()
gtk.main()
|
arizona-phonological-imaging-lab/Autotrace
|
matlab-version/TrainNetwork2.py
|
Python
|
mit
| 16,128
|
[
"Gaussian"
] |
ee0e1b9539ec85e8430bf4714f8abc3ed8aefe5a8e208e100c01e69cc0af166b
|
import sys
sys.path.insert(1, "../../../")
import h2o, tests
import random
def cv_carsGBM():
# read in the dataset and construct training set (and validation set)
cars = h2o.import_file(path=h2o.locate("smalldata/junit/cars_20mpg.csv"))
# choose the type model-building exercise (multinomial classification or regression). 0:regression, 1:binomial,
# 2:multinomial
problem = random.sample(range(3),1)[0]
# pick the predictors and response column, along with the correct distribution
predictors = ["displacement","power","weight","acceleration","year"]
if problem == 1 :
response_col = "economy_20mpg"
distribution = "bernoulli"
cars[response_col] = cars[response_col].asfactor()
elif problem == 2 :
response_col = "cylinders"
distribution = "multinomial"
cars[response_col] = cars[response_col].asfactor()
else :
response_col = "economy"
distribution = "gaussian"
print "Distribution: {0}".format(distribution)
print "Response column: {0}".format(response_col)
## cross-validation
# 1. check that cv metrics are the same over repeated "Modulo" runs
nfolds = random.randint(3,10)
gbm1 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, distribution=distribution, ntrees=5,
fold_assignment="Modulo")
gbm2 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, distribution=distribution, ntrees=5,
fold_assignment="Modulo")
tests.check_models(gbm1, gbm2, True)
# 2. check that cv metrics are different over repeated "Random" runs
nfolds = random.randint(3,10)
gbm1 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, distribution=distribution, ntrees=5,
fold_assignment="Random")
gbm2 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, distribution=distribution, ntrees=5,
fold_assignment="Random")
try:
tests.check_models(gbm1, gbm2, True)
assert False, "Expected models to be different over repeated Random runs"
except AssertionError:
assert True
# 3. folds_column
num_folds = random.randint(2,5)
fold_assignments = h2o.H2OFrame(python_obj=[[random.randint(0,num_folds-1)] for f in range(cars.nrow)])
fold_assignments.setNames(["fold_assignments"])
cars = cars.cbind(fold_assignments)
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], training_frame=cars, distribution=distribution, ntrees=5,
fold_column="fold_assignments", keep_cross_validation_predictions=True)
num_cv_models = len(gbm._model_json['output']['cross_validation_models'])
assert num_cv_models==num_folds, "Expected {0} cross-validation models, but got " \
"{1}".format(num_folds, num_cv_models)
cv_model1 = h2o.get_model(gbm._model_json['output']['cross_validation_models'][0]['name'])
cv_model2 = h2o.get_model(gbm._model_json['output']['cross_validation_models'][1]['name'])
assert isinstance(cv_model1, type(gbm)), "Expected cross-validation model to be the same model type as the " \
"constructed model, but got {0} and {1}".format(type(cv_model1),type(gbm))
assert isinstance(cv_model2, type(gbm)), "Expected cross-validation model to be the same model type as the " \
"constructed model, but got {0} and {1}".format(type(cv_model2),type(gbm))
# 4. keep_cross_validation_predictions
cv_predictions = gbm1._model_json['output']['cross_validation_predictions']
assert cv_predictions is None, "Expected cross-validation predictions to be None, but got {0}".format(cv_predictions)
cv_predictions = gbm._model_json['output']['cross_validation_predictions']
assert len(cv_predictions)==num_folds, "Expected the same number of cross-validation predictions " \
"as folds, but got {0}".format(len(cv_predictions))
# # 5. manually construct models
# fold1 = cars[cars["fold_assignments"]==0]
# fold2 = cars[cars["fold_assignments"]==1]
# manual_model1 = h2o.gbm(y=fold2[response_col],
# x=fold2[predictors],
# validation_y=fold1[response_col],
# validation_x=fold1[predictors], ntrees=5,
# distribution=distribution)
# manual_model2 = h2o.gbm(y=fold1[response_col],
# x=fold1[predictors],
# validation_y=fold2[response_col],
# validation_x=fold2[predictors], ntrees=5,
# distribution=distribution)
## boundary cases
# 1. nfolds = number of observations (leave-one-out cross-validation)
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=cars.nrow, distribution=distribution, ntrees=5,
fold_assignment="Modulo")
# 2. nfolds = 0
gbm1 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=0, distribution=distribution, ntrees=5)
# check that this is equivalent to no nfolds
gbm2 = h2o.gbm(y=cars[response_col], x=cars[predictors], distribution=distribution, ntrees=5)
tests.check_models(gbm1, gbm2)
# 3. cross-validation and regular validation attempted
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=random.randint(3,10), validation_y=cars[response_col], ntrees=5,
validation_x=cars[predictors], distribution=distribution)
## error cases
# 1. nfolds == 1 or < 0
try:
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=random.sample([-1,1], 1)[0], ntrees=5,
distribution=distribution)
assert False, "Expected model-build to fail when nfolds is 1 or < 0"
except EnvironmentError:
assert True
# 2. more folds than observations
try:
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=cars.nrow+1, distribution=distribution, ntrees=5,
fold_assignment="Modulo")
assert False, "Expected model-build to fail when nfolds > nobs"
except EnvironmentError:
assert True
# 3. fold_column and nfolds both specified
try:
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=3, fold_column="fold_assignments", ntrees=5,
distribution=distribution, training_frame=cars)
assert False, "Expected model-build to fail when fold_column and nfolds both specified"
except EnvironmentError:
assert True
# 4. fold_column and fold_assignment both specified
try:
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], fold_assignment="Random", fold_column="fold_assignments", ntrees=5,
distribution=distribution, training_frame=cars)
assert False, "Expected model-build to fail when fold_column and fold_assignment both specified"
except EnvironmentError:
assert True
if __name__ == "__main__":
tests.run_test(sys.argv, cv_carsGBM)
|
tarasane/h2o-3
|
h2o-py/tests/testdir_algos/gbm/pyunit_cv_carsGBM.py
|
Python
|
apache-2.0
| 7,238
|
[
"Gaussian"
] |
858720fd2c688f47838189f88a6a399f483d6fa8fadd529056c8e5f556da550c
|
# Copyright (C) 2015 Hydriz Scholz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA, or visit
# <http://www.gnu.org/copyleft/gpl.html>
import DBRCore
class DBRnspagecount:
def __init__( self, db='' ):
self.dbquery = DBRCore.DBQuery( db )
self.Wiki = DBRCore.Wiki( db )
# For forward compatibility, in case certain extensions are enabled on one but not the other
if ( db == "simplewiki" or db == "simplewiktionary" ):
self.namespaces = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 828, 829 ]
def execute( self ):
title = "Page count by namespace"
template = '''The number of pages in each [[{{subst:SITENAME}}:Namespace|namespace]]; data as of <onlyinclude>%s</onlyinclude>.
{| class="wikitable sortable plainlinks" style="width:100%%; margin:auto;"
|- style="white-space:nowrap;"
! [[{{subst:SITENAME}}:Namespace|ID]]
! Name
! Non-redirects
! Redirects
! Total
|-
%s
|- class="sortbottom"
! colspan="2" | Totals
! style="text-align:left;" | %s
! style="text-align:left;" | %s
! style="text-align:left;" | %s
|}
[[Category:{{subst:SITENAME}} database reports|{{SUBPAGENAME}}]]
'''
output = []
for ns in self.namespaces:
noredirectsquery = "SELECT COUNT(*) FROM page WHERE page_namespace='%d' AND page_is_redirect='0';" % ( ns )
redirectsquery = "SELECT COUNT(*) FROM page WHERE page_namespace='%d' AND page_is_redirect='1';" % ( ns )
totalquery = "SELECT COUNT(*) FROM page WHERE page_namespace='%d';" % ( ns )
noredirects = self.dbquery.execute( noredirectsquery )
for noredirect in noredirects:
nordr = noredirect[0]
redirects = self.dbquery.execute( redirectsquery )
for redirect in redirects:
rdr = redirect[0]
total = self.dbquery.execute( totalquery )
for summ in total:
tot = summ[0]
tablerow = "| %d\n| {{subst:ns:%d}}\n| %s\n| %s\n| %s\n|-" % ( ns, ns, nordr, rdr, tot )
output.append( tablerow )
totalnoredirectsquery = "SELECT COUNT(*) FROM page WHERE page_is_redirect='0';"
totalredirectsquery = "SELECT COUNT(*) FROM page WHERE page_is_redirect='1';"
grandtotalquery = "SELECT COUNT(*) FROM page;"
totalnoredirects = self.dbquery.execute( totalnoredirectsquery )
for totalnoredirect in totalnoredirects:
totnordr = totalnoredirect[0]
totalredirects = self.dbquery.execute( totalredirectsquery )
for totalredirect in totalredirects:
totrdr = totalredirect[0]
grandtotal = self.dbquery.execute( grandtotalquery )
for grand in grandtotal:
grant = grand[0]
contents = template % ( self.Wiki.getDataAsOf(), '\n'.join( output ), totnordr, totrdr, grant )
self.Wiki.outputToWiki( title, contents )
if __name__ == "__main__":
print "This module should not be called directly! Please use dbr.py to run the database reports."
|
Hydriz/DBReports
|
reports/nspagecount.py
|
Python
|
gpl-3.0
| 3,398
|
[
"VisIt"
] |
1a8be4a973af30129c4428df441d8d948e2e001059b7b45c4a1e2366a7d35801
|
from __future__ import division, print_function, absolute_import
import math
import warnings
from collections import namedtuple
import numpy as np
from numpy import (isscalar, r_, log, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d,
sqrt, ceil, floor, array, compress,
pi, exp, ravel, count_nonzero, sin, cos, arctan2, hypot)
from scipy._lib.six import string_types
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats, _contains_nan
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'circmean', 'circvar', 'circstd', 'anderson_ksamp',
'yeojohnson_llf', 'yeojohnson', 'yeojohnson_normmax',
'yeojohnson_normplot'
]
Mean = namedtuple('Mean', ('statistic', 'minmax'))
Variance = namedtuple('Variance', ('statistic', 'minmax'))
Std_dev = namedtuple('Std_dev', ('statistic', 'minmax'))
def bayes_mvs(data, alpha=0.90):
r"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability ``alpha``.
See Also
--------
mvsdist
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
``alpha``.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
First a basic example to demonstrate the outputs:
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.bayes_mvs(data)
>>> mean
Mean(statistic=9.0, minmax=(7.103650222612533, 10.896349777387467))
>>> var
Variance(statistic=10.0, minmax=(3.176724206..., 24.45910382...))
>>> std
Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.945614605014631))
Now we generate some normally distributed random data, and get estimates of
mean and standard deviation with 95% confidence intervals for those
estimates:
>>> n_samples = 100000
>>> data = stats.norm.rvs(size=n_samples)
>>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.hist(data, bins=100, density=True, label='Histogram of data')
>>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean')
>>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r',
... alpha=0.2, label=r'Estimated mean (95% limits)')
>>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale')
>>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2,
... label=r'Estimated scale (95% limits)')
>>> ax.legend(fontsize=10)
>>> ax.set_xlim([-4, 4])
>>> ax.set_ylim([0, 0.5])
>>> plt.show()
"""
m, v, s = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given."
% alpha)
m_res = Mean(m.mean(), m.interval(alpha))
v_res = Variance(v.mean(), v.interval(alpha))
s_res = Std_dev(s.mean(), s.interval(alpha))
return m_res, v_res, s_res
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data
vdist : "frozen" distribution object
Distribution object representing the variance of the data
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data
See Also
--------
bayes_mvs
Notes
-----
The return values from ``bayes_mvs(data)`` is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if n < 2:
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if n > 1000: # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)
else:
nm1 = n - 1
fac = n * C / 2.
val = nm1 / 2.
mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1))
sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac))
vdist = distributions.invgamma(val, scale=fac)
return mdist, vdist, sdist
def kstat(data, n=2):
r"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic k_n is the unique symmetric unbiased estimator of the
nth cumulant kappa_n.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
For a sample size n, the first few k-statistics are given by:
.. math::
k_{1} = \mu
k_{2} = \frac{n}{n-1} m_{2}
k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3}
k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)}
where :math:`\mu` is the sample mean, :math:`m_2` is the sample
variance, and :math:`m_i` is the i-th sample central moment.
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
Examples
--------
>>> from scipy import stats
>>> rndm = np.random.RandomState(1234)
As sample size increases, n-th moment and n-th k-statistic converge to the
same number (although they aren't identical). In the case of the normal
distribution, they converge to zero.
>>> for n in [2, 3, 4, 5, 6, 7]:
... x = rndm.normal(size=10**n)
... m, k = stats.moment(x, 3), stats.kstat(x, 3)
... print("%.3g %.3g %.3g" % (m, k, m-k))
-0.631 -0.651 0.0194
0.0282 0.0283 -8.49e-05
-0.0454 -0.0454 1.36e-05
7.53e-05 7.53e-05 -2.26e-09
0.00166 0.00166 -4.99e-09
-2.88e-06 -2.88e-06 8.63e-13
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = np.zeros(n + 1, np.float64)
data = ravel(data)
N = data.size
# raise ValueError on empty input
if N == 0:
raise ValueError("Data input must not be empty")
# on nan input, return nan without warning
if np.isnan(np.sum(data)):
return np.nan
for k in range(1, n + 1):
S[k] = np.sum(data**k, axis=0)
if n == 1:
return S[1] * 1.0/N
elif n == 2:
return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))
elif n == 4:
return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /
(N*(N-1.0)*(N-2.0)*(N-3.0)))
else:
raise ValueError("Should not be here.")
def kstatvar(data, n=2):
r"""
Returns an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat: Returns the n-th k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
The variances of the first few k-statistics are given by:
.. math::
var(k_{1}) = \frac{\kappa^2}{n}
var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1}
var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} +
\frac{9 \kappa^2_{3}}{n - 1} +
\frac{6 n \kappa^3_{2}}{(n-1) (n-2)}
var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} +
\frac{48 \kappa_{3} \kappa_5}{n - 1} +
\frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} +
\frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} +
\frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)}
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data, n=2) * 1.0/N
elif n == 2:
k2 = kstat(data, n=2)
k4 = kstat(data, n=4)
return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(n):
"""
Approximations of uniform order statistic medians.
Parameters
----------
n : int
Sample size.
Returns
-------
v : 1d float array
Approximations of the order statistic medians.
References
----------
.. [1] James J. Filliben, "The Probability Plot Correlation Coefficient
Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
Order statistics of the uniform distribution on the unit interval
are marginally distributed according to beta distributions.
The expectations of these order statistic are evenly spaced across
the interval, but the distributions are skewed in a way that
pushes the medians slightly towards the endpoints of the unit interval:
>>> n = 4
>>> k = np.arange(1, n+1)
>>> from scipy.stats import beta
>>> a = k
>>> b = n-k+1
>>> beta.mean(a, b)
array([ 0.2, 0.4, 0.6, 0.8])
>>> beta.median(a, b)
array([ 0.15910358, 0.38572757, 0.61427243, 0.84089642])
The Filliben approximation uses the exact medians of the smallest
and greatest order statistics, and the remaining medians are approximated
by points spread evenly across a sub-interval of the unit interval:
>>> from scipy.morestats import _calc_uniform_order_statistic_medians
>>> _calc_uniform_order_statistic_medians(n)
array([ 0.15910358, 0.38545246, 0.61454754, 0.84089642])
This plot shows the skewed distributions of the order statistics
of a sample of size four from a uniform distribution on the unit interval:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0.0, 1.0, num=50, endpoint=True)
>>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)]
>>> plt.figure()
>>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3])
"""
v = np.zeros(n, dtype=np.float64)
v[-1] = 0.5**(1.0 / n)
v[0] = 1 - v[-1]
i = np.arange(2, n)
v[1:-1] = (i - 0.3175) / (n + 0.365)
return v
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, string_types):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def _add_axis_labels_title(plot, xlabel, ylabel, title):
"""Helper function to add axes labels and a title to stats plots"""
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title(title)
plot.set_xlabel(xlabel)
plot.set_ylabel(ylabel)
else:
# matplotlib.pyplot module
plot.title(title)
plot.xlabel(xlabel)
plot.ylabel(ylabel)
except Exception:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
def probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample//2,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
_perform_fit = fit or (plot is not None)
if x.size == 0:
if _perform_fit:
return (x, x), (np.nan, np.nan, 0.0)
else:
return x, x
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if _perform_fit:
# perform a linear least squares fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
_add_axis_labels_title(plot, xlabel='Theoretical quantiles',
ylabel='Ordered Values',
title='Probability Plot')
# Add R^2 value to the plot as text
if rvalue:
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):
"""
Calculate the shape parameter that maximizes the PPCC
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. ppcc_max returns the shape parameter that would maximize the
probability plot correlation coefficient for the given data to a
one-parameter family of distributions.
Parameters
----------
x : array_like
Input array.
brack : tuple, optional
Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c)
then they are assumed to be a starting interval for a downhill bracket
search (see `scipy.optimize.brent`).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
Returns
-------
shape_value : float
The shape parameter at which the probability plot correlation
coefficient reaches its max value.
See also
--------
ppcc_plot, probplot, boxcox
Notes
-----
The brack keyword serves as a starting point which is useful in corner
cases. One can use a plot to obtain a rough visual estimate of the location
for the maximum to start the search near it.
References
----------
.. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
.. [2] https://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
... random_state=1234567) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(8, 6))
>>> ax = fig.add_subplot(111)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax)
We calculate the value where the shape should reach its maximum and a red
line is drawn there. The line should coincide with the highest point in the
ppcc_plot.
>>> max = stats.ppcc_max(x)
>>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):
"""
Calculate and optionally plot probability plot correlation coefficient.
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. It cannot be used for distributions without shape parameters
(like the normal distribution) or with multiple shape parameters.
By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A
Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed
distributions via an approximately normal one, and is therefore particularly
useful in practice.
Parameters
----------
x : array_like
Input array.
a, b: scalar
Lower and upper bounds of the shape parameter to use.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
plot : object, optional
If given, plots PPCC against the shape parameter.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`a` to `b`).
Returns
-------
svals : ndarray
The shape values for which `ppcc` was calculated.
ppcc : ndarray
The calculated probability plot correlation coefficient values.
See also
--------
ppcc_max, probplot, boxcox_normplot, tukeylambda
References
----------
J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234567)
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> fig = plt.figure(figsize=(12, 4))
>>> ax1 = fig.add_subplot(131)
>>> ax2 = fig.add_subplot(132)
>>> ax3 = fig.add_subplot(133)
>>> res = stats.probplot(x, plot=ax1)
>>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax3)
>>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
if b <= a:
raise ValueError("`b` has to be larger than `a`.")
svals = np.linspace(a, b, num=N)
ppcc = np.empty_like(svals)
for k, sval in enumerate(svals):
_, r2 = probplot(x, sval, dist=dist, fit=True)
ppcc[k] = r2[-1]
if plot is not None:
plot.plot(svals, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='Shape Values',
ylabel='Prob Plot Corr. Coef.',
title='(%s) PPCC Plot' % dist)
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title(r'$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
logdata = np.log(data)
# Compute the variance of the transformed data.
if lmb == 0:
variance = np.var(logdata, axis=0)
else:
# Transform without the constant offset 1/lmb. The offset does
# not effect the variance, and the subtraction of the offset can
# lead to loss of precision.
variance = np.var(data**lmb / lmb, axis=0)
return (lmb - 1) * np.sum(logdata, axis=0) - N/2 * np.log(variance)
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and ``alpha`` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given ``alpha``.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when ``alpha`` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def _normplot(method, x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox or Yeo-Johnson normality plot,
optionally show it. See `boxcox_normplot` or `yeojohnson_normplot` for
details."""
if method == 'boxcox':
title = 'Box-Cox Normality Plot'
transform_func = boxcox
else:
title = 'Yeo-Johnson Normality Plot'
transform_func = yeojohnson
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the square root of correlation coefficient
# of transformed x
z = transform_func(x, lmbda=val)
_, (_, _, r) = probplot(z, dist='norm', fit=True)
ppcc[i] = r
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='$\\lambda$',
ylabel='Prob Plot Corr. Coef.',
title=title)
return lmbdas, ppcc
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
return _normplot('boxcox', x, la, lb, plot, N)
def yeojohnson(x, lmbda=None):
r"""
Return a dataset transformed by a Yeo-Johnson power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : float, optional
If ``lmbda`` is ``None``, find the lambda that maximizes the
log-likelihood function and return it as the second output argument.
Otherwise the transformation is done for the given value.
Returns
-------
yeojohnson: ndarray
Yeo-Johnson power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
See Also
--------
probplot, yeojohnson_normplot, yeojohnson_normmax, yeojohnson_llf, boxcox
Notes
-----
The Yeo-Johnson transform is given by::
y = ((x + 1)**lmbda - 1) / lmbda, for x >= 0, lmbda != 0
log(x + 1), for x >= 0, lmbda = 0
-((-x + 1)**(2 - lmbda) - 1) / (2 - lmbda), for x < 0, lmbda != 2
-log(-x + 1), for x < 0, lmbda = 2
Unlike `boxcox`, `yeojohnson` does not require the input data to be
positive.
.. versionadded:: 1.2.0
References
----------
I. Yeo and R.A. Johnson, "A New Family of Power Transformations to
Improve Normality or Symmetry", Biometrika 87.4 (2000):
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `yeojohnson` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, lmbda = stats.yeojohnson(x)
>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Yeo-Johnson transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if lmbda is not None:
return _yeojohnson_transform(x, lmbda)
# if lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = yeojohnson_normmax(x)
y = _yeojohnson_transform(x, lmax)
return y, lmax
def _yeojohnson_transform(x, lmbda):
"""Return x transformed by the Yeo-Johnson power transform with given
parameter lmbda."""
out = np.zeros_like(x)
pos = x >= 0 # binary mask
# when x >= 0
if abs(lmbda) < np.spacing(1.):
out[pos] = np.log1p(x[pos])
else: # lmbda != 0
out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda
# when x < 0
if abs(lmbda - 2) > np.spacing(1.):
out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda)
else: # lmbda == 2
out[~pos] = -np.log1p(-x[~pos])
return out
def yeojohnson_llf(lmb, data):
r"""The yeojohnson log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Yeo-Johnson transformation. See `yeojohnson` for
details.
data : array_like
Data to calculate Yeo-Johnson log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float
Yeo-Johnson log-likelihood of `data` given `lmb`.
See Also
--------
yeojohnson, probplot, yeojohnson_normplot, yeojohnson_normmax
Notes
-----
The Yeo-Johnson log-likelihood function is defined here as
.. math::
llf = N/2 \log(\hat{\sigma}^2) + (\lambda - 1)
\sum_i \text{ sign }(x_i)\log(|x_i| + 1)
where :math:`\hat{\sigma}^2` is estimated variance of the the Yeo-Johnson
transformed input data ``x``.
.. versionadded:: 1.2.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Yeo-Johnson log-likelihood
values for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.yeojohnson_llf(lmbda, x)
Also find the optimal lmbda value with `yeojohnson`:
>>> x_most_normal, lmbda_optimal = stats.yeojohnson(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.yeojohnson_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Yeo-Johnson log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `yeojohnson` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.yeojohnson(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title(r'$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
n_samples = data.shape[0]
if n_samples == 0:
return np.nan
trans = _yeojohnson_transform(data, lmb)
loglike = -n_samples / 2 * np.log(trans.var(axis=0))
loglike += (lmb - 1) * (np.sign(data) * np.log(np.abs(data) + 1)).sum(axis=0)
return loglike
def yeojohnson_normmax(x, brack=(-2, 2)):
"""Compute optimal Yeo-Johnson transform parameter for input data, using
maximum likelihood estimation.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
Returns
-------
maxlog : float
The optimal transform parameter found.
Notes
-----
.. versionadded:: 1.2.0
See Also
--------
yeojohnson, yeojohnson_llf, yeojohnson_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda``
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> lmax = stats.yeojohnson_normmax(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.yeojohnson_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax, color='r')
>>> plt.show()
"""
def _neg_llf(lmbda, data):
return -yeojohnson_llf(lmbda, data)
return optimize.brent(_neg_llf, brack=brack, args=(x,))
def yeojohnson_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Yeo-Johnson normality plot, optionally show it.
A Yeo-Johnson normality plot shows graphically what the best
transformation parameter is to use in `yeojohnson` to obtain a
distribution that is close to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to
`yeojohnson` for Yeo-Johnson transformations. These are also the
limits of the horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Yeo-Johnson transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, yeojohnson, yeojohnson_normmax, yeojohnson_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
.. versionadded:: 1.2.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Yeo-Johnson plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.yeojohnson_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.yeojohnson(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
return _normplot('yeojohnson', x, la, lb, plot, N)
def shapiro(x):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
See Also
--------
anderson : The Anderson-Darling test for normality
kstest : The Kolmogorov-Smirnov test for goodness of fit.
Notes
-----
The algorithm used is described in [4]_ but censoring parameters as
described are not implemented. For N > 5000 the W test statistic is accurate
but the p-value may not be.
The chance of rejecting the null hypothesis when it is true is close to 5%
regardless of sample size.
References
----------
.. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for
normality (complete samples), Biometrika, Vol. 52, pp. 591-611.
.. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk,
Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of
Statistical Modeling and Analytics, Vol. 2, pp. 21-33.
.. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = stats.norm.rvs(loc=5, scale=3, size=100)
>>> stats.shapiro(x)
(0.9772805571556091, 0.08144091814756393)
"""
x = np.ravel(x)
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
a = zeros(N, 'f')
init = 0
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0, 2]:
warnings.warn("Input data for shapiro has range zero. The results "
"may not be accurate.")
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of he American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
AndersonResult = namedtuple('AndersonResult', ('statistic',
'critical_values',
'significance_level'))
def anderson(x, dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling tests the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
array of sample data
dist : {'norm','expon','logistic','gumbel','gumbel_l', gumbel_r',
'extreme1'}, optional
the type of distribution to test against. The default is 'norm'
and 'extreme1', 'gumbel_l' and 'gumbel' are synonyms.
Returns
-------
statistic : float
The Anderson-Darling test statistic
critical_values : list
The critical values for this distribution
significance_level : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
See Also
--------
kstest : The Kolmogorov-Smirnov test for goodness-of-fit.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If the returned statistic is larger than these critical values then
for the corresponding significance level, the null hypothesis that
the data come from the chosen distribution can be rejected.
The returned statistic is referred to as 'A2' in the references.
References
----------
.. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm', 'expon', 'gumbel', 'gumbel_l',
'gumbel_r', 'extreme1', 'logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y - xbar) / s
logcdf = distributions.norm.logcdf(w)
logsf = distributions.norm.logsf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3)
elif dist == 'expon':
w = y / xbar
logcdf = distributions.expon.logcdf(w)
logsf = distributions.expon.logsf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_expon / (1.0 + 0.6/N), 3)
elif dist == 'logistic':
def rootfunc(ab, xj, N):
a, b = ab
tmp = (xj - a) / b
tmp2 = exp(tmp)
val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N,
np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N]
return array(val)
sol0 = array([xbar, np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5)
w = (y - sol[0]) / sol[1]
logcdf = distributions.logistic.logcdf(w)
logsf = distributions.logistic.logsf(w)
sig = array([25, 10, 5, 2.5, 1, 0.5])
critical = around(_Avals_logistic / (1.0 + 0.25/N), 3)
elif dist == 'gumbel_r':
xbar, s = distributions.gumbel_r.fit(x)
w = (y - xbar) / s
logcdf = distributions.gumbel_r.logcdf(w)
logsf = distributions.gumbel_r.logsf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
else: # (dist == 'gumbel') or (dist == 'gumbel_l') or (dist == 'extreme1')
xbar, s = distributions.gumbel_l.fit(x)
w = (y - xbar) / s
logcdf = distributions.gumbel_l.logcdf(w)
logsf = distributions.gumbel_l.logsf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
i = arange(1, N + 1)
A2 = -N - np.sum((2*i - 1.0) / N * (logcdf + logsf[::-1]), axis=0)
return AndersonResult(A2, critical, sig)
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
Anderson_ksampResult = namedtuple('Anderson_ksampResult',
('statistic', 'critical_values',
'significance_level'))
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
statistic : float
Normalized k-sample Anderson-Darling test statistic.
critical_values : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%,
0.5%, 0.1%.
significance_level : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected. The value is floored / capped at
0.1% / 25%.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
The critical values corresponding to the significance levels from 0.01
to 0.25 are taken from [1]_. p-values are floored / capped
at 0.1% / 25%. Since the range of critical values might be extended in
future releases, it is recommended not to test ``p == 0.25``, but rather
``p >= 0.25`` (analogously for the lower bound).
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.2%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752, 4.592, 6.546]),
0.03176687568842282)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The reported p-value (25%) has been capped and
may not be very accurate (since it corresponds to the value 0.449
whereas the statistic is -0.731):
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856,
4.07210043, 5.56419101]),
0.25)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
H = (1. / n).sum()
hs_cs = (1. / arange(N - 1, 1, -1)).cumsum()
h = hs_cs[-1] + 1
g = (hs_cs / arange(2, N)).sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326, 2.573, 3.085])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822, 2.364, 3.615])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396, -0.345, -0.154])
critical = b0 + b1 / math.sqrt(m) + b2 / m
sig = np.array([0.25, 0.1, 0.05, 0.025, 0.01, 0.005, 0.001])
if A2 < critical.min():
p = sig.max()
warnings.warn("p-value capped: true value larger than {}".format(p),
stacklevel=2)
elif A2 > critical.max():
p = sig.min()
warnings.warn("p-value floored: true value smaller than {}".format(p),
stacklevel=2)
else:
# interpolation of probit of significance level
pf = np.polyfit(critical, log(sig), 2)
p = math.exp(np.polyval(pf, A2))
return Anderson_ksampResult(A2, critical, p)
AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue'))
def ansari(x, y):
"""
Perform the Ansari-Bradley test for equal scale parameters
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
arrays of sample data
Returns
-------
statistic : float
The Ansari-Bradley test statistic
pvalue : float
The p-value of the hypothesis test
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x, y = asarray(x), asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m + n
xy = r_[x, y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank, N - rank + 1)), 0)
AB = np.sum(symrank[:n], axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and (m < 55 or n < 55):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n, m)
ind = AB - astart
total = np.sum(a1, axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if ind == cind:
pval = 2.0 * np.sum(a1[:cind+1], axis=0) / total
else:
pval = 2.0 * np.sum(a1[:cind], axis=0) / total
else:
find = int(floor(ind))
if ind == floor(ind):
pval = 2.0 * np.sum(a1[find:], axis=0) / total
else:
pval = 2.0 * np.sum(a1[find+1:], axis=0) / total
return AnsariResult(AB, min(1.0, pval))
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n * (N+1.0)**2 / 4.0 / N
varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2)
else:
mnAB = n * (N+2.0) / 4.0
varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0)
if repeats: # adjust variance estimates
# compute np.sum(tj * rj**2,axis=0)
fac = np.sum(symrank**2, axis=0)
if N % 2: # N odd
varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1))
else: # N even
varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1))
z = (AB - mnAB) / sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AnsariResult(AB, pval)
BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue'))
def bartlett(*args):
"""
Perform Bartlett's test for equal variances
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. Only 1d arrays are accepted, they may have
different lengths.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value of the test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
levene : A robust parametric test for equality of k variances
Notes
-----
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power
([3]_).
References
----------
.. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical
Tests. Proceedings of the Royal Society of London. Series A,
Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282.
"""
# Handle empty input and input that is not 1d
for a in args:
if np.asanyarray(a).size == 0:
return BartlettResult(np.nan, np.nan)
if np.asanyarray(a).ndim > 1:
raise ValueError('Samples must be one-dimensional.')
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k, 'd')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = np.sum(Ni, axis=0)
spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k))
numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0)
denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) -
1.0/(Ntot - k))
T = numer / denom
pval = distributions.chi2.sf(T, k - 1) # 1 - cdf
return BartlettResult(T, pval)
LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue'))
def levene(*args, **kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths. Only one-dimensional
samples are accepted.
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
The test version using the mean was proposed in the original article
of Levene ([2]_) while the median and trimmed mean have been studied by
Brown and Forsythe ([3]_), sometimes also referred to as Brown-Forsythe
test.
References
----------
.. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
# check for 1d input
for j in range(k):
if np.asanyarray(args[j]).ndim > 1:
raise ValueError('Samples must be one-dimensional.')
Ni = zeros(k)
Yci = zeros(k, 'd')
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
" or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [None] * k
for i in range(k):
Zij[i] = abs(asarray(args[i]) - Yci[i])
# compute Zbari
Zbari = zeros(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i] * Ni[i]
Zbar /= Ntot
numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0)
denom = (k - 1.0) * dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return LeveneResult(W, pval)
def binom_test(x, n=None, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : integer or array_like
the number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : integer
the number of trials. This is ignored if x gives both the
number of successes and failures
p : float, optional
The hypothesized probability of success. 0 <= p <= 1. The
default value is p = 0.5
alternative : {'two-sided', 'greater', 'less'}, optional
Indicates the alternative hypothesis. The default value is
'two-sided'.
Returns
-------
p-value : float
The p-value of the hypothesis test
References
----------
.. [1] https://en.wikipedia.org/wiki/Binomial_test
Examples
--------
>>> from scipy import stats
A car manufacturer claims that no more than 10% of their cars are unsafe.
15 cars are inspected for safety, 3 were found to be unsafe. Test the
manufacturer's claim:
>>> stats.binom_test(3, n=15, p=0.1, alternative='greater')
0.18406106910639114
The null hypothesis cannot be rejected at the 5% level of significance
because the returned p-value is greater than the critical value of 5%.
"""
x = atleast_1d(x).astype(np.integer)
if len(x) == 2:
n = x[1] + x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = distributions.binom.cdf(x, n, p)
return pval
if alternative == 'greater':
pval = distributions.binom.sf(x-1, n, p)
return pval
# if alternative was neither 'less' nor 'greater', then it's 'two-sided'
d = distributions.binom.pmf(x, n, p)
rerr = 1 + 1e-7
if x == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif x < p * n:
i = np.arange(np.ceil(p * n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n - y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = [func(x[g[k]:g[k+1]]) for k in range(len(g) - 1)]
return asarray(output)
FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue'))
def fligner(*args, **kwds):
"""
Perform Fligner-Killeen test for equality of variance.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner-Killeen's test is
distribution free when populations are identical [2]_.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the hypothesis test.
See Also
--------
bartlett : A parametric test for equality of k variances in normal samples
levene : A robust parametric test for equality of k variances
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
https://cecas.clemson.edu/~cspark/cv/paper/qif/draftqif2.pdf
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A
comparative study of tests for homogeneity of variances, with
applications to the outer continental shelf biding data.
Technometrics, 23(4), 351-361.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return FlignerResult(np.nan, np.nan)
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
" or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, np.sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return FlignerResult(Xsq, pval)
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis : int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(1234)
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.7178125 , -5.25342163]), array([ 1.07904114e-08, 1.49299218e-07]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue'))
def wilcoxon(x, y=None, zero_method="wilcox", correction=False,
alternative="two-sided"):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
Either the first set of measurements (in which case `y` is the second
set of measurements), or the differences between two sets of
measurements (in which case `y` is not to be specified.) Must be
one-dimensional.
y : array_like, optional
Either the second set of measurements (if `x` is the first set of
measurements), or not specified (if `x` is the differences between
two sets of measurements.) Must be one-dimensional.
zero_method : {"pratt", "wilcox", "zsplit"}, optional. Default is "wilcox".
"pratt":
includes zero-differences in the ranking process,
but drops the ranks of the zeros, see [4]_, (more conservative)
"wilcox":
discards all zero-differences, the default
"zsplit":
includes zero-differences in the ranking process and split the
zero rank between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
alternative : {"two-sided", "greater", "less"}, optional
The alternative hypothesis to be tested, see Notes. Default is
"two-sided".
Returns
-------
statistic : float
If `alternative` is "two-sided", the sum of the ranks of the
differences above or below zero, whichever is smaller.
Otherwise the sum of the ranks of the differences above zero.
pvalue : float
The p-value for the test depending on `alternative`.
See Also
--------
kruskal, mannwhitneyu
Notes
-----
The test has been introduced in [4]_. Given n independent samples
(xi, yi) from a bivariate distribution (i.e. paired samples),
it computes the differences di = xi - yi. One assumption of the test
is that the differences are symmetric, see [2]_.
The two-sided test has the null hypothesis that the median of the
differences is zero against the alternative that it is different from
zero. The one-sided test has the null that the median is positive against
the alternative that the it is negative (``alternative == 'less'``),
or vice versa (``alternative == 'greater.'``).
The test uses a normal approximation to derive the p-value (if
``zero_method == 'pratt'``, the approximation is adjusted as in [5]_).
A typical rule is to require that n > 20 ([2]_, p. 383). For smaller n,
exact tables can be used to find critical values.
References
----------
.. [1] https://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
.. [2] Conover, W.J., Practical Nonparametric Statistics, 1971.
.. [3] Pratt, J.W., Remarks on Zeros and Ties in the Wilcoxon Signed
Rank Procedures, Journal of the American Statistical Association,
Vol. 54, 1959, pp. 655-667. :doi:`10.1080/01621459.1959.10501526`
.. [4] Wilcoxon, F., Individual Comparisons by Ranking Methods,
Biometrics Bulletin, Vol. 1, 1945, pp. 80-83. :doi:`10.2307/3001968`
.. [5] Cureton, E.E., The Normal Approximation to the Signed-Rank
Sampling Distribution When Zero Differences are Present,
Journal of the American Statistical Association, Vol. 62, 1967,
pp. 1068-1069. :doi:`10.1080/01621459.1967.10500917`
Examples
--------
In [4]_, the differences in height between cross- and self-fertilized
corn plants is given as follows:
>>> d = [6, 8, 14, 16, 23, 24, 28, 29, 41, -48, 49, 56, 60, -67, 75]
Cross-fertilized plants appear to be be higher. To test the null
hypothesis that there is no height difference, we can apply the
two-sided test:
>>> from scipy.stats import wilcoxon
>>> w, p = wilcoxon(d)
>>> w, p
(24.0, 0.04088813291185591)
Hence, we would reject the null hypothesis at a confidence level of 5%,
concluding that there is a difference in height between the groups.
To confirm that the median of the differences can be assumed to be
positive, we use:
>>> w, p = wilcoxon(d, alternative='greater')
>>> w, p
(96.0, 0.020444066455927955)
This shows that the null hypothesis that the median is negative can be
rejected at a confidence level of 5% in favor of the alternative that
the median is greater than zero. The p-value based on the approximation
is within the range of 0.019 and 0.054 given in [2]_.
Note that the statistic changed to 96 in the one-sided case (the sum
of ranks of positive differences) whereas it is 24 in the two-sided
case (the minimum of sum of ranks above and below zero).
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' "
"or 'pratt' or 'zsplit'")
if alternative not in ["two-sided", "less", "greater"]:
raise ValueError("Alternative must be either 'two-sided', "
"'greater' or 'less'")
if y is None:
d = asarray(x)
if d.ndim > 1:
raise ValueError('Sample x must be one-dimensional.')
else:
x, y = map(asarray, (x, y))
if x.ndim > 1 or y.ndim > 1:
raise ValueError('Samples x and y must be one-dimensional.')
if len(x) != len(y):
raise ValueError('The samples x and y must have the same length.')
d = x - y
if zero_method in ["wilcox", "pratt"]:
n_zero = np.sum(d == 0, axis=0)
if n_zero == len(d):
raise ValueError("zero_method 'wilcox' and 'pratt' do not work if "
"the x - y is zero for all elements.")
if zero_method == "wilcox":
# Keep all non-zero differences
d = compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
if count < 10:
warnings.warn("Sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = np.sum((d > 0) * r, axis=0)
r_minus = np.sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = np.sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
# return min for two-sided test, but r_plus for one-sided test
# the literature is not consistent here
# r_plus is more informative since r_plus + r_minus = count*(count+1)/2,
# i.e. the sum of the ranks, so r_minus and the min can be inferred
# (If alternative='pratt', r_plus + r_minus = count*(count+1)/2 - r_zero.)
# [3] uses the r_plus for the one-sided test, keep min for two-sided test
# to keep backwards compatibility
if alternative == "two-sided":
T = min(r_plus, r_minus)
else:
T = r_plus
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
# normal approximation needs to be adjusted, see Cureton (1967)
mn -= n_zero * (n_zero + 1.) * 0.25
se -= n_zero * (n_zero + 1.) * (2. * n_zero + 1.)
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
# apply continuity correction if applicable
d = 0
if correction:
if alternative == "two-sided":
d = 0.5 * np.sign(T - mn)
elif alternative == "less":
d = -0.5
else:
d = 0.5
# compute statistic and p-value using normal approximation
z = (T - mn - d) / se
if alternative == "two-sided":
prob = 2. * distributions.norm.sf(abs(z))
elif alternative == "greater":
# large T = r_plus indicates x is greater than y; i.e.
# accept alternative in that case and return small p-value (sf)
prob = distributions.norm.sf(z)
else:
prob = distributions.norm.cdf(z)
return WilcoxonResult(T, prob)
def median_test(*args, **kwds):
"""
Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table. If ``nan_policy`` is "propagate" and there
are nans in the input, the return value for ``table`` is ``None``.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> from scipy.stats import median_test
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
ties = kwds.pop('ties', 'below')
correction = kwds.pop('correction', True)
lambda_ = kwds.pop('lambda_', None)
nan_policy = kwds.pop('nan_policy', 'propagate')
if len(kwds) > 0:
bad_kwd = kwds.keys()[0]
raise TypeError("median_test() got an unexpected keyword "
"argument %r" % bad_kwd)
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
cdata = np.concatenate(data)
contains_nan, nan_policy = _contains_nan(cdata, nan_policy)
if contains_nan and nan_policy == 'propagate':
return np.nan, np.nan, np.nan, None
if contains_nan:
grand_median = np.median(cdata[~np.isnan(cdata)])
else:
grand_median = np.median(cdata)
# When the minimum version of numpy supported by scipy is 1.9.0,
# the above if/else statement can be replaced by the single line:
# grand_median = np.nanmedian(cdata)
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
sample = sample[~np.isnan(sample)]
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.nonzero((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _circfuncs_common(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
ang = (samples - low)*2.*pi / (high - low)
return samples, ang
def circmean(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
Returns
-------
circmean : float
Circular mean.
Examples
--------
>>> from scipy.stats import circmean
>>> circmean([0.1, 2*np.pi+0.2, 6*np.pi+0.3])
0.2
>>> from scipy.stats import circmean
>>> circmean([0.2, 1.4, 2.6], high = 1, low = 0)
0.4
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).sum(axis=axis)
C = cos(ang).sum(axis=axis)
res = arctan2(S, C)
mask = res < 0
if mask.ndim > 0:
res[mask] += 2*pi
elif mask:
res += 2*pi
return res*(high - low)/2.0/pi + low
def circvar(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular variance for samples assumed to be in a range
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
Examples
--------
>>> from scipy.stats import circvar
>>> circvar([0, 2*np.pi/3, 5*np.pi/3])
2.19722457734
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).mean(axis=axis)
C = cos(ang).mean(axis=axis)
R = hypot(S, C)
return ((high - low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
Examples
--------
>>> from scipy.stats import circstd
>>> circstd([0, 0.1*np.pi/2, 0.001*np.pi, 0.03*np.pi/2])
0.063564063306
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).mean(axis=axis)
C = cos(ang).mean(axis=axis)
R = hypot(S, C)
return ((high - low)/2.0/pi) * sqrt(-2*log(R))
|
lhilt/scipy
|
scipy/stats/morestats.py
|
Python
|
bsd-3-clause
| 111,757
|
[
"Gaussian"
] |
f670b57b1050ccbc3783ca52f848626bc5a84927df3e813b2be82d8193ef94c3
|
"""
Service class implements the server side part of the DISET protocol
There are 2 main parts in this class:
- All useful functions for initialization
- All useful functions to handle the requests
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: skip-file
# __searchInitFunctions gives RuntimeError: maximum recursion depth exceeded
import os
import time
import threading
# TODO: Remove ThreadPool later
useThreadPoolExecutor = False
if os.getenv('DIRAC_USE_NEWTHREADPOOL', 'YES').lower() in ('yes', 'true'):
from concurrent.futures import ThreadPoolExecutor
useThreadPoolExecutor = True
else:
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
import DIRAC
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.DErrno import ENOAUTH
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.Core.Utilities import Time, MemStat, Network
from DIRAC.Core.DISET.private.LockManager import LockManager
from DIRAC.FrameworkSystem.Client.MonitoringClient import MonitoringClient
from DIRAC.Core.DISET.private.ServiceConfiguration import ServiceConfiguration
from DIRAC.Core.DISET.private.TransportPool import getGlobalTransportPool
from DIRAC.Core.DISET.private.MessageBroker import MessageBroker, MessageSender
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Core.Utilities.ReturnValues import isReturnStructure
from DIRAC.Core.DISET.AuthManager import AuthManager
from DIRAC.FrameworkSystem.Client.SecurityLogClient import SecurityLogClient
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.Core.DISET.RequestHandler import getServiceOption
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
__RCSID__ = "$Id$"
class Service(object):
SVC_VALID_ACTIONS = {'RPC': 'export',
'FileTransfer': 'transfer',
'Message': 'msg',
'Connection': 'Message'}
SVC_SECLOG_CLIENT = SecurityLogClient()
def __init__(self, serviceData):
"""
Init the variables for the service
:param serviceData: dict with modName, standalone, loadName, moduleObj, classObj. e.g.:
{'modName': 'Framework/serviceName',
'standalone': True,
'loadName': 'Framework/serviceName',
'moduleObj': <module 'serviceNameHandler' from '/home/DIRAC/FrameworkSystem/Service/serviceNameHandler.pyo'>,
'classObj': <class 'serviceNameHandler.serviceHandler'>}
Standalone is true if there is only one service started
If it's false, every service is linked to a different MonitoringClient
"""
self._svcData = serviceData
self._name = serviceData['modName']
self._startTime = Time.dateTime()
self._validNames = [serviceData['modName']]
if serviceData['loadName'] not in self._validNames:
self._validNames.append(serviceData['loadName'])
self._cfg = ServiceConfiguration(list(self._validNames))
self._standalone = serviceData['standalone']
self.__monitorLastStatsUpdate = time.time()
self._stats = {'queries': 0, 'connections': 0}
self._authMgr = AuthManager("%s/Authorization" % PathFinder.getServiceSection(serviceData['loadName']))
self._transportPool = getGlobalTransportPool()
self.__cloneId = 0
self.__maxFD = 0
def setCloneProcessId(self, cloneId):
self.__cloneId = cloneId
if not self.activityMonitoring:
self._monitor.setComponentName("%s-Clone:%s" % (self._name, cloneId))
def _isMetaAction(self, action):
referedAction = Service.SVC_VALID_ACTIONS[action]
if referedAction in Service.SVC_VALID_ACTIONS:
return referedAction
return False
def initialize(self):
# Build the URLs
self._url = self._cfg.getURL()
if not self._url:
return S_ERROR("Could not build service URL for %s" % self._name)
gLogger.verbose("Service URL is %s" % self._url)
# Load handler
result = self._loadHandlerInit()
if not result['OK']:
return result
self._handler = result['Value']
# Initialize lock manager
self._lockManager = LockManager(self._cfg.getMaxWaitingPetitions())
# TODO: remove ThreadPool
if useThreadPoolExecutor:
self._threadPool = ThreadPoolExecutor(max(0, self._cfg.getMaxThreads()))
else:
self._threadPool = ThreadPool(max(1, self._cfg.getMinThreads()),
max(0, self._cfg.getMaxThreads()),
self._cfg.getMaxWaitingPetitions())
self._threadPool.daemonize()
self._msgBroker = MessageBroker("%sMSB" % self._name, threadPool=self._threadPool)
# Create static dict
self._serviceInfoDict = {'serviceName': self._name,
'serviceSectionPath': PathFinder.getServiceSection(self._name),
'URL': self._cfg.getURL(),
'messageSender': MessageSender(self._name, self._msgBroker),
'validNames': self._validNames,
'csPaths': [PathFinder.getServiceSection(svcName) for svcName in self._validNames]
}
# Initialize Monitoring
# This is a flag used to check whether "EnableActivityMonitoring" is enabled or not from the config file.
self.activityMonitoring = (
Operations().getValue("EnableActivityMonitoring", False) or
getServiceOption(self._serviceInfoDict, "EnableActivityMonitoring", False)
)
if self.activityMonitoring:
# The import needs to be here because of the CS must be initialized before importing
# this class (see https://github.com/DIRACGrid/DIRAC/issues/4793)
from DIRAC.MonitoringSystem.Client.MonitoringReporter import MonitoringReporter
self.activityMonitoringReporter = MonitoringReporter(monitoringType="ComponentMonitoring")
gThreadScheduler.addPeriodicTask(100, self.__activityMonitoringReporting)
elif self._standalone:
self._monitor = gMonitor
else:
self._monitor = MonitoringClient()
self._initMonitoring()
# Call static initialization function
try:
if self.activityMonitoring:
self._handler['class']._rh__initializeClass(dict(self._serviceInfoDict),
self._lockManager,
self._msgBroker,
self.activityMonitoringReporter)
else:
self._handler['class']._rh__initializeClass(dict(self._serviceInfoDict),
self._lockManager,
self._msgBroker,
self._monitor)
if self._handler['init']:
for initFunc in self._handler['init']:
gLogger.verbose("Executing initialization function")
try:
result = initFunc(dict(self._serviceInfoDict))
except Exception as excp:
gLogger.exception("Exception while calling initialization function", lException=excp)
return S_ERROR("Exception while calling initialization function: %s" % str(excp))
if not isReturnStructure(result):
return S_ERROR("Service initialization function %s must return S_OK/S_ERROR" % initFunc)
if not result['OK']:
return S_ERROR("Error while initializing %s: %s" % (self._name, result['Message']))
except Exception as e:
errMsg = "Exception while initializing %s" % self._name
gLogger.exception(e)
gLogger.exception(errMsg)
return S_ERROR(errMsg)
# Load actions after the handler has initialized itself
result = self._loadActions()
if not result['OK']:
return result
self._actions = result['Value']
if not self.activityMonitoring:
gThreadScheduler.addPeriodicTask(30, self.__reportThreadPoolContents)
return S_OK()
def __searchInitFunctions(self, handlerClass, currentClass=None):
if not currentClass:
currentClass = handlerClass
initFuncs = []
ancestorHasInit = False
for ancestor in currentClass.__bases__:
initFuncs += self.__searchInitFunctions(handlerClass, ancestor)
if 'initializeHandler' in dir(ancestor):
ancestorHasInit = True
if ancestorHasInit:
initFuncs.append(super(currentClass, handlerClass).initializeHandler)
if currentClass == handlerClass and 'initializeHandler' in dir(handlerClass):
initFuncs.append(handlerClass.initializeHandler)
return initFuncs
def _loadHandlerInit(self):
handlerClass = self._svcData['classObj']
handlerName = handlerClass.__name__
handlerInitMethods = self.__searchInitFunctions(handlerClass)
try:
handlerInitMethods.append(getattr(self._svcData['moduleObj'], "initialize%s" % handlerName))
except AttributeError:
gLogger.verbose("Not found global initialization function for service")
if handlerInitMethods:
gLogger.info("Found %s initialization methods" % len(handlerInitMethods))
handlerInfo = {}
handlerInfo["name"] = handlerName
handlerInfo["module"] = self._svcData['moduleObj']
handlerInfo["class"] = handlerClass
handlerInfo["init"] = handlerInitMethods
return S_OK(handlerInfo)
def _loadActions(self):
handlerClass = self._handler['class']
authRules = {}
typeCheck = {}
methodsList = {}
for actionType in Service.SVC_VALID_ACTIONS:
if self._isMetaAction(actionType):
continue
authRules[actionType] = {}
typeCheck[actionType] = {}
methodsList[actionType] = []
handlerAttributeList = dir(handlerClass)
for actionType in Service.SVC_VALID_ACTIONS:
if self._isMetaAction(actionType):
continue
methodPrefix = '%s_' % Service.SVC_VALID_ACTIONS[actionType]
for attribute in handlerAttributeList:
if attribute.find(methodPrefix) != 0:
continue
exportedName = attribute[len(methodPrefix):]
methodsList[actionType].append(exportedName)
gLogger.verbose("+ Found %s method %s" % (actionType, exportedName))
# Create lock for method
self._lockManager.createLock("%s/%s" % (actionType, exportedName),
self._cfg.getMaxThreadsForMethod(actionType, exportedName))
# Look for type and auth rules
if actionType == 'RPC':
typeAttr = "types_%s" % exportedName
authAttr = "auth_%s" % exportedName
else:
typeAttr = "types_%s_%s" % (Service.SVC_VALID_ACTIONS[actionType], exportedName)
authAttr = "auth_%s_%s" % (Service.SVC_VALID_ACTIONS[actionType], exportedName)
if typeAttr in handlerAttributeList:
obj = getattr(handlerClass, typeAttr)
gLogger.verbose("|- Found type definition %s: %s" % (typeAttr, str(obj)))
typeCheck[actionType][exportedName] = obj
if authAttr in handlerAttributeList:
obj = getattr(handlerClass, authAttr)
gLogger.verbose("|- Found auth rules %s: %s" % (authAttr, str(obj)))
authRules[actionType][exportedName] = obj
for actionType in Service.SVC_VALID_ACTIONS:
referedAction = self._isMetaAction(actionType)
if not referedAction:
continue
gLogger.verbose("Action %s is a meta action for %s" % (actionType, referedAction))
authRules[actionType] = []
for method in authRules[referedAction]:
for prop in authRules[referedAction][method]:
if prop not in authRules[actionType]:
authRules[actionType].append(prop)
gLogger.verbose("Meta action %s props are %s" % (actionType, authRules[actionType]))
return S_OK({'methods': methodsList, 'auth': authRules, 'types': typeCheck})
def _initMonitoring(self):
if not self.activityMonitoring:
# Init extra bits of monitoring
self._monitor.setComponentType(MonitoringClient.COMPONENT_SERVICE)
self._monitor.setComponentName(self._name)
self._monitor.setComponentLocation(self._cfg.getURL())
self._monitor.initialize()
self._monitor.registerActivity(
"Connections",
"Connections received",
"Framework",
"connections",
MonitoringClient.OP_RATE)
self._monitor.registerActivity("Queries", "Queries served", "Framework", "queries", MonitoringClient.OP_RATE)
self._monitor.registerActivity('CPU', "CPU Usage", 'Framework', "CPU,%", MonitoringClient.OP_MEAN, 600)
self._monitor.registerActivity('MEM', "Memory Usage", 'Framework', 'Memory,MB', MonitoringClient.OP_MEAN, 600)
self._monitor.registerActivity(
'PendingQueries',
"Pending queries",
'Framework',
'queries',
MonitoringClient.OP_MEAN)
self._monitor.registerActivity(
'ActiveQueries',
"Active queries",
'Framework',
'threads',
MonitoringClient.OP_MEAN)
self._monitor.registerActivity(
'RunningThreads',
"Running threads",
'Framework',
'threads',
MonitoringClient.OP_MEAN)
self._monitor.registerActivity('MaxFD', "Max File Descriptors", 'Framework', 'fd', MonitoringClient.OP_MEAN)
self._monitor.setComponentExtraParam('DIRACVersion', DIRAC.version)
self._monitor.setComponentExtraParam('platform', DIRAC.getPlatform())
self._monitor.setComponentExtraParam('startTime', Time.dateTime())
for prop in (("__RCSID__", "version"), ("__doc__", "description")):
try:
value = getattr(self._handler['module'], prop[0])
except Exception as e:
gLogger.exception(e)
gLogger.error("Missing property", prop[0])
value = 'unset'
self._monitor.setComponentExtraParam(prop[1], value)
for secondaryName in self._cfg.registerAlsoAs():
gLogger.info("Registering %s also as %s" % (self._name, secondaryName))
self._validNames.append(secondaryName)
return S_OK()
def __reportThreadPoolContents(self):
# TODO: remove later
if useThreadPoolExecutor:
pendingQueries = self._threadPool._work_queue.qsize()
activeQuereies = len(self._threadPool._threads)
else:
pendingQueries = self._threadPool.pendingJobs()
activeQuereies = self._threadPool.numWorkingThreads()
self._monitor.addMark('PendingQueries', pendingQueries)
self._monitor.addMark('ActiveQueries', activeQuereies)
self._monitor.addMark('RunningThreads', threading.activeCount())
self._monitor.addMark('MaxFD', self.__maxFD)
self.__maxFD = 0
def getConfig(self):
return self._cfg
# End of initialization functions
def handleConnection(self, clientTransport):
"""
This method may be called by ServiceReactor.
The method stacks openened connection in a queue, another thread
read this queue and handle connection.
:param clientTransport: Object which describes opened connection (PlainTransport or SSLTransport)
"""
if not self.activityMonitoring:
self._stats['connections'] += 1
self._monitor.setComponentExtraParam('queries', self._stats['connections'])
# TODO: remove later
if useThreadPoolExecutor:
self._threadPool.submit(self._processInThread, clientTransport)
else:
self._threadPool.generateJobAndQueueIt(self._processInThread,
args=(clientTransport,))
# Threaded process function
def _processInThread(self, clientTransport):
"""
This method handles a RPC, FileTransfer or Connection.
Connection may be opened via ServiceReactor.__acceptIncomingConnection
- Do the SSL/TLS Handshake (if dips is used) and extract credentials
- Get the action called by the client
- Check if the client is authorized to perform ation
- If not, connection is closed
- Instanciate the RequestHandler (RequestHandler contain all methods callable)
(Following is not directly in this method but it describe what happen at
#Execute the action)
- Notify the client we're ready to execute the action (via _processProposal)
and call RequestHandler._rh_executeAction()
- Receive arguments/file/something else (depending on action) in the RequestHandler
- Executing the action asked by the client
:param clientTransport: Object which describe the opened connection (SSLTransport or PlainTransport)
:return: S_OK with "closeTransport" a boolean to indicate if th connection have to be closed
e.g. after RPC, closeTransport=True
"""
self.__maxFD = max(self.__maxFD, clientTransport.oSocket.fileno())
self._lockManager.lockGlobal()
try:
monReport = self.__startReportToMonitoring()
except Exception:
monReport = False
try:
# Handshake
try:
result = clientTransport.handshake()
if not result['OK']:
clientTransport.close()
return
except Exception:
return
# Add to the transport pool
trid = self._transportPool.add(clientTransport)
if not trid:
return
# Receive and check proposal
result = self._receiveAndCheckProposal(trid)
if not result['OK']:
self._transportPool.sendAndClose(trid, result)
return
proposalTuple = result['Value']
# Instantiate handler
result = self._instantiateHandler(trid, proposalTuple)
if not result['OK']:
self._transportPool.sendAndClose(trid, result)
return
handlerObj = result['Value']
# Execute the action
result = self._processProposal(trid, proposalTuple, handlerObj)
# Close the connection if required
if result['closeTransport'] or not result['OK']:
if not result['OK']:
gLogger.error("Error processing proposal", result['Message'])
self._transportPool.close(trid)
return result
finally:
self._lockManager.unlockGlobal()
if monReport:
self.__endReportToMonitoring(*monReport)
def _createIdentityString(self, credDict, clientTransport=None):
if 'username' in credDict:
if 'group' in credDict:
identity = "[%s:%s]" % (credDict['username'], credDict['group'])
else:
identity = "[%s:unknown]" % credDict['username']
else:
identity = 'unknown'
if clientTransport:
addr = clientTransport.getRemoteAddress()
if addr:
addr = "{%s:%s}" % (addr[0], addr[1])
if 'DN' in credDict:
identity += "(%s)" % credDict['DN']
return identity
@staticmethod
def _deserializeProposalTuple(serializedProposal):
""" We receive the proposalTuple as a list.
Turn it into a tuple again
"""
proposalTuple = tuple(tuple(x) if isinstance(x, list) else x for x in serializedProposal)
return proposalTuple
def _receiveAndCheckProposal(self, trid):
clientTransport = self._transportPool.get(trid)
# Get the peer credentials
credDict = clientTransport.getConnectingCredentials()
# Receive the action proposal
retVal = clientTransport.receiveData(1024)
if not retVal['OK']:
gLogger.error("Invalid action proposal", "%s %s" % (self._createIdentityString(credDict,
clientTransport),
retVal['Message']))
return S_ERROR("Invalid action proposal")
proposalTuple = Service._deserializeProposalTuple(retVal['Value'])
gLogger.debug("Received action from client", "/".join(list(proposalTuple[1])))
# Check if there are extra credentials
if proposalTuple[2]:
clientTransport.setExtraCredentials(proposalTuple[2])
# Check if this is the requested service
requestedService = proposalTuple[0][0]
if requestedService not in self._validNames:
return S_ERROR("%s is not up in this server" % requestedService)
# Check if the action is valid
requestedActionType = proposalTuple[1][0]
if requestedActionType not in Service.SVC_VALID_ACTIONS:
return S_ERROR("%s is not a known action type" % requestedActionType)
# Check if it's authorized
result = self._authorizeProposal(proposalTuple[1], trid, credDict)
if not result['OK']:
return result
# Proposal is OK
return S_OK(proposalTuple)
def _authorizeProposal(self, actionTuple, trid, credDict):
# Find CS path for the Auth rules
referedAction = self._isMetaAction(actionTuple[0])
if referedAction:
csAuthPath = "%s/Default" % actionTuple[0]
hardcodedMethodAuth = self._actions['auth'][actionTuple[0]]
else:
if actionTuple[0] == 'RPC':
csAuthPath = actionTuple[1]
else:
csAuthPath = "/".join(actionTuple)
# Find if there are hardcoded auth rules in the code
hardcodedMethodAuth = False
if actionTuple[0] in self._actions['auth']:
hardcodedRulesByType = self._actions['auth'][actionTuple[0]]
if actionTuple[0] == "FileTransfer":
methodName = actionTuple[1][0].lower() + actionTuple[1][1:]
else:
methodName = actionTuple[1]
if methodName in hardcodedRulesByType:
hardcodedMethodAuth = hardcodedRulesByType[methodName]
# Auth time!
if not self._authMgr.authQuery(csAuthPath, credDict, hardcodedMethodAuth):
# Get the identity string
identity = self._createIdentityString(credDict)
fromHost = "unknown host"
tr = self._transportPool.get(trid)
if tr:
fromHost = '/'.join([str(item) for item in tr.getRemoteAddress()])
gLogger.warn("Unauthorized query", "to %s:%s by %s from %s" % (self._name,
"/".join(actionTuple),
identity, fromHost))
result = S_ERROR(ENOAUTH, "Unauthorized query")
else:
result = S_OK()
# Security log
tr = self._transportPool.get(trid)
if not tr:
return S_ERROR("Client disconnected")
sourceAddress = tr.getRemoteAddress()
identity = self._createIdentityString(credDict)
Service.SVC_SECLOG_CLIENT.addMessage(result['OK'], sourceAddress[0], sourceAddress[1], identity,
self._cfg.getHostname(),
self._cfg.getPort(),
self._name, "/".join(actionTuple))
return result
def _instantiateHandler(self, trid, proposalTuple=None):
"""
Generate an instance of the handler for a given service
:param int trid: transport ID
:param tuple proposalTuple: tuple describing the proposed action
:return: S_OK/S_ERROR, Value is the handler object
"""
# Generate the client params
clientParams = {'serviceStartTime': self._startTime}
if proposalTuple:
# The 4th element is the client version
clientParams['clientVersion'] = proposalTuple[3] if len(proposalTuple) > 3 else None
clientParams['clientSetup'] = proposalTuple[0][1]
if len(proposalTuple[0]) < 3:
clientParams['clientVO'] = gConfig.getValue("/DIRAC/VirtualOrganization", "unknown")
else:
clientParams['clientVO'] = proposalTuple[0][2]
clientTransport = self._transportPool.get(trid)
if clientTransport:
clientParams['clientAddress'] = clientTransport.getRemoteAddress()
# Generate handler dict with per client info
handlerInitDict = dict(self._serviceInfoDict)
for key in clientParams:
handlerInitDict[key] = clientParams[key]
# Instantiate and initialize
try:
handlerInstance = self._handler['class'](handlerInitDict, trid)
handlerInstance.initialize()
except Exception as e:
gLogger.exception("Server error while loading handler: %s" % str(e))
return S_ERROR("Server error while loading handler")
return S_OK(handlerInstance)
def _processProposal(self, trid, proposalTuple, handlerObj):
# Notify the client we're ready to execute the action
retVal = self._transportPool.send(trid, S_OK())
if not retVal['OK']:
return retVal
messageConnection = False
if proposalTuple[1] == ('Connection', 'new'):
messageConnection = True
if messageConnection:
if self._msgBroker.getNumConnections() > self._cfg.getMaxMessagingConnections():
result = S_ERROR("Maximum number of connections reached. Try later")
result['closeTransport'] = True
return result
# This is a stable connection
self._msgBroker.addTransportId(trid, self._name,
receiveMessageCallback=self._mbReceivedMsg,
disconnectCallback=self._mbDisconnect,
listenToConnection=False)
result = self._executeAction(trid, proposalTuple, handlerObj)
if result['OK'] and messageConnection:
self._msgBroker.listenToTransport(trid)
result = self._mbConnect(trid, handlerObj)
if not result['OK']:
self._msgBroker.removeTransport(trid)
result['closeTransport'] = not messageConnection or not result['OK']
return result
def _mbConnect(self, trid, handlerObj=None):
if not handlerObj:
result = self._instantiateHandler(trid)
if not result['OK']:
return result
handlerObj = result['Value']
return handlerObj._rh_executeConnectionCallback('connected')
def _executeAction(self, trid, proposalTuple, handlerObj):
try:
response = handlerObj._rh_executeAction(proposalTuple)
if self.activityMonitoring and response["OK"]:
self.activityMonitoringReporter.addRecord({
'timestamp': int(Time.toEpoch()),
'host': Network.getFQDN(),
'componentType': 'service',
'component': "_".join(self._name.split("/")),
'componentLocation': self._cfg.getURL(),
'ServiceResponseTime': response["Value"][1]
})
return response["Value"][0]
except Exception as e:
gLogger.exception("Exception while executing handler action")
return S_ERROR("Server error while executing action: %s" % str(e))
def _mbReceivedMsg(self, trid, msgObj):
result = self._authorizeProposal(('Message', msgObj.getName()),
trid,
self._transportPool.get(trid).getConnectingCredentials())
if not result['OK']:
return result
result = self._instantiateHandler(trid)
if not result['OK']:
return result
handlerObj = result['Value']
response = handlerObj._rh_executeMessageCallback(msgObj)
if self.activityMonitoring and response["OK"]:
self.activityMonitoringReporter.addRecord({
'timestamp': int(Time.toEpoch()),
'host': Network.getFQDN(),
'componentType': 'service',
'component': "_".join(self._name.split("/")),
'componentLocation': self._cfg.getURL(),
'ServiceResponseTime': response["Value"][1]
})
if response["OK"]:
return response["Value"][0]
else:
return response
def _mbDisconnect(self, trid):
result = self._instantiateHandler(trid)
if not result['OK']:
return result
handlerObj = result['Value']
return handlerObj._rh_executeConnectionCallback('drop')
def __activityMonitoringReporting(self):
""" This method is called by the ThreadScheduler as a periodic task in order to commit the collected data which
is done by the MonitoringReporter and is send to the 'ComponentMonitoring' type.
:return: True / False
"""
result = self.activityMonitoringReporter.commit()
return result['OK']
def __startReportToMonitoring(self):
if not self.activityMonitoring:
self._monitor.addMark("Queries")
now = time.time()
stats = os.times()
cpuTime = stats[0] + stats[2]
if now - self.__monitorLastStatsUpdate < 0:
return (now, cpuTime)
# Send CPU consumption mark
wallClock = now - self.__monitorLastStatsUpdate
self.__monitorLastStatsUpdate = now
# Send Memory consumption mark
membytes = MemStat.VmB('VmRSS:')
if membytes:
mem = membytes / (1024. * 1024.)
if not self.activityMonitoring:
self._monitor.addMark('MEM', mem)
return (now, cpuTime)
def __endReportToMonitoring(self, initialWallTime, initialCPUTime):
wallTime = time.time() - initialWallTime
stats = os.times()
cpuTime = stats[0] + stats[2] - initialCPUTime
percentage = cpuTime / wallTime * 100.
if percentage > 0:
if not self.activityMonitoring:
self._monitor.addMark('CPU', percentage)
|
yujikato/DIRAC
|
src/DIRAC/Core/DISET/private/Service.py
|
Python
|
gpl-3.0
| 28,969
|
[
"DIRAC"
] |
99110fd1048ef399a3e0eaf2a03b4450bcd6583b487c33a0b84e63970c62ba69
|
from . import neuralnet
from . import neuron
from . import helpers
__all__ = ["neuralnet", "neuron", "helpers"]
|
jpypi/ffnn
|
__init__.py
|
Python
|
mit
| 113
|
[
"NEURON"
] |
0bd722962dd488d122425edd1be2ab0a1ba5e2ade6cc472dfe6a277ed1f793e8
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import AbinsModules
class SingleCrystalData(AbinsModules.GeneralData):
"""
Class for storing the data in case sample has a form of single crystal.
"""
def __init__(self):
super(SingleCrystalData, self).__init__()
self._dw_crystal_data = None
self._abins_data = None
def set(self, abins_data=None, dw_crystal_data=None):
"""
:param abins_data: object of type AbinsData with data from DFT phonon calculation.
:param dw_crystal_data: object of type DwCrystalData with DW for the case of crystal.
"""
if isinstance(abins_data, AbinsModules.AbinsData):
self._abins_data = abins_data
else:
raise ValueError("Object of type AbinsData was expected.")
if isinstance(dw_crystal_data, AbinsModules.DWSingleCrystalData):
self._dw_crystal_data = dw_crystal_data
else:
raise ValueError("Object of type DwCrystalData was expected.")
self._data = {"abins_data": self._abins_data.extract(), "dw_crystal_data": self._dw_crystal_data.extract()}
def extract(self):
gamma = AbinsModules.AbinsConstants.GAMMA_POINT
if (self._data["abins_data"]["k_points_data"]["atomic_displacements"][gamma].shape[0] ==
self._data["dw_crystal_data"].shape[0]):
return self._data
else:
raise ValueError("Object fo type CrystalData is inconsistent.")
def __str__(self):
return "Crystal data"
|
mganeva/mantid
|
scripts/AbinsModules/SingleCrystalData.py
|
Python
|
gpl-3.0
| 1,850
|
[
"CRYSTAL"
] |
f5ffe26bd6345e8a0cbc7fb76bc0c2c97adefeb8f4c2e973c64a8e39e618b037
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...fluid.initializer import Initializer
from ...fluid.data_feeder import check_variable_and_dtype
from ...fluid.core import VarDesc
from ...fluid import framework
from paddle import in_dynamic_mode
from paddle.utils import unique_name
__all__ = []
class Dirac(Initializer):
r"""Initialize the 3D/4D/5D Tensor with Dirac delta function.
It can reserve the feature of convolution layer input, which means that
as many channels are reserved as possible.
In this initialize method, elements in the middle of convolution kernels will
be set to 1 . The formula can be described as follow.
.. math::
X[d, d, shape[2]//2, shape[3]//2, ...]=1, \ d=0,1...N
where, ``N`` is the minimum value of ``in_channels`` and ``out_channels``
Args:
groups(int, optional): 0-dimension of the Tensor will be divided by groups,
each group has the same value. Default: 1.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Dirac initializer instance objects.
Examples:
.. code-block:: python
import paddle
#1. For kernel_size is uneven number:
attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Dirac())
conv = paddle.nn.Conv1D(3, 2, 3, weight_attr=attr)
conv.weight
# Tensor(shape=[2, 3, 3], dtype=float32, place=CPUPlace, stop_gradient=False,
# [[[0., 1., 0.],
# [0., 0., 0.],
# [0., 0., 0.]],
#
# [[0., 0., 0.],
# [0., 1., 0.],
# [0., 0., 0.]]])
input = paddle.rand([8, 3, 10])
output = conv(input)
output == input[:, 0:2, 1:9]
# output.shape is [8, 2, 8], It means output is almost the same with input, 2 channels are reserved
#2. For kernel_size is even number:
attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Dirac())
conv = paddle.nn.Conv1D(3, 2, 4, weight_attr=attr)
conv.weight
# Tensor(shape=[2, 3, 4], dtype=float32, place=CPUPlace, stop_gradient=False,
# [[[0., 0., 1., 0.],
# [0., 0., 0., 0.],
# [0., 0., 0., 0.]],
#
# [[0., 0., 0., 0.],
# [0., 0., 1., 0.],
# [0., 0., 0., 0.]]])
"""
def __init__(self, groups=1, name=None):
assert groups > 0 and isinstance(
groups, int), " 'groups' must be a positive integer. "
super(Dirac, self).__init__()
self._groups = groups
def __call__(self, var, block=None):
"""Initialize the input tensor with dirac initializer.
Args:
var(Tensor): Tensor that needs to be initialized.
block(Block, optional): The block in which initialization ops
should be added. Used in static graph only, default None.
Returns:
The most critical OP(scatter) in this initializer, which contains 7~8 ops in total.
"""
block = self._check_block(block)
assert isinstance(var, framework.Parameter)
assert isinstance(block, framework.Block)
check_variable_and_dtype(
var, "Out", ['float16', 'bfloat16', 'float32', 'float64'], 'Dirac')
assert len(var.shape) in [
3, 4, 5
], "Only Tensor with 3/4/5 dimensions can be initialized by Dirac"
assert (var.shape[0] % self._groups
) == 0, "Tensor 0-dimension must be divisible by groups"
if var.dtype != VarDesc.VarType.FP32:
out_var = block.create_var(
name=unique_name.generate(".".join(['dirac', var.name, 'tmp'])),
shape=var.shape,
dtype=VarDesc.VarType.FP32,
type=VarDesc.VarType.LOD_TENSOR,
persistable=False)
else:
out_var = var
block.append_op(
type='fill_constant',
inputs={},
outputs={'Out': out_var},
attrs={
'value': float(0),
'dtype': out_var.dtype,
'shape': out_var.shape,
},
stop_gradient=True)
origin_shape = var.shape
num_per_group = origin_shape[0] // self._groups
min_shape = min(num_per_group, origin_shape[1])
idx_list = []
value_list = []
strides = []
prod = 1
for dim in reversed(origin_shape):
strides.insert(0, prod)
prod *= dim
for i in range(self._groups):
for j in range(min_shape):
value_list.append(1.0)
offset = 0
for (k, stride) in enumerate(strides):
if (k == 0):
offset += (j + i * num_per_group) * stride
elif (k == 1):
offset += j * stride
else:
offset += origin_shape[k] // 2 * stride
idx_list.append(offset)
block.append_op(
type="reshape",
inputs={"X": out_var},
attrs={'shape': [-1]},
outputs={"Out": out_var},
stop_gradient=True)
index_tensor = block.create_var(
name=unique_name.generate('scatter_index'),
persistable=False,
stop_gradient=True)
block.append_op(
type='assign_value',
outputs={'Out': index_tensor},
attrs={
'dtype': VarDesc.VarType.INT64,
'shape': [len(idx_list)],
'int64_values': idx_list
},
stop_gradient=True)
value_tensor = block.create_var(
name=unique_name.generate('scatter_value'),
persistable=False,
stop_gradient=True)
block.append_op(
type='assign_value',
outputs={'Out': value_tensor},
attrs={
'dtype': VarDesc.VarType.FP32,
'shape': [len(value_list)],
'fp32_values': value_list
},
stop_gradient=True)
op = block.append_op(
type="scatter",
inputs={
"X": out_var,
"Ids": index_tensor,
"Updates": value_tensor
},
attrs={'overwrite': True},
outputs={"Out": out_var},
stop_gradient=True)
block.append_op(
type="reshape",
inputs={"X": out_var},
attrs={'shape': origin_shape},
outputs={"Out": out_var},
stop_gradient=True)
if var.dtype != VarDesc.VarType.FP32:
block.append_op(
type="cast",
inputs={"X": out_var},
outputs={"Out": var},
attrs={"in_dtype": out_var.dtype,
"out_dtype": var.dtype},
stop_gradient=True)
if not in_dynamic_mode():
var.op = op
return op
|
PaddlePaddle/Paddle
|
python/paddle/nn/initializer/dirac.py
|
Python
|
apache-2.0
| 7,958
|
[
"DIRAC"
] |
d20300442db00576613e7e6d3af4f385f67910926cefcc2291a544293265ec86
|
import msmexplorer as msme
from msmexplorer.utils import msme_colors
from matplotlib.ticker import FuncFormatter
from matplotlib import pyplot as pp
from traj_utils import split_trajs_by_type
import numpy as np
import pandas as pd
import seaborn as sns
from glob import glob
from natsort import natsorted, order_by_index, index_natsorted
import os
from contact_utils import renumber_mask
from traj_utils import get_source_sink
def figure_dims(width_pt, factor=0.45):
"""
I copied this from here:
https://www.archer.ac.uk/training/course-material/2014/07/SciPython_Cranfield/Slides/L04_matplotlib.pdf
"""
WIDTH = width_pt # Figure width in pt (usually from LaTeX)
FACTOR = factor # Fraction of the width you'd like the figure to occupy
widthpt = WIDTH * FACTOR
inperpt = 1.0 / 72.27
golden_ratio = (np.sqrt(5) - 1.0) / 2.0 # because it looks good
widthin = widthpt * inperpt
heightin = widthin * golden_ratio
figdims = [widthin, heightin] # Dimensions as list
return figdims
def cleanup_top_right_axes(ax):
"""
Removes the top and right axis lines of a matplotlib axis
"""
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
return ax
def split(a, n):
"""
Splits a list into approximately equally sized n chunks
"""
k, m = divmod(len(a), n)
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))
def get_diff_pkls(glob_expression):
"""
Load several pkl files which have the MMGBSA information for a run.
:param glob_expression: str, A str representing a glob pattern for the pkl files to load
:return concat_split_melted_dfs: list, A list with 4 sublists, each with a pd.DataFrame with MMGBSA data
"""
pickle_list = natsorted(glob(glob_expression))
if len(pickle_list) == 0:
print('Found no pkl files!')
return None
# Load each pickle as a pd.DataFrame
df_list = [pd.read_pickle(pkl) for pkl in pickle_list]
# Add a column to the data frame that tells us which run this data belongs to
# This will be used later on for plotting
for df, dir_path in zip(df_list, pickle_list):
run_name = dir_path.split('/')[-3].split('_')[0]
df['run'] = run_name
# Recover the 'TOTAL' value, which we are interested in, and identify by the 'run' column
melt_dfs = [df.melt(id_vars='run', value_vars='TOTAL', value_name='MMGBSA (kcal/mol)')
for df in df_list]
# Build a list with the mean MMGBSA of each run
# Each element of this list is a tuple with the mean value and the name of the run
list_of_means = [(df.mean()[0], df['run'].unique()[0]) for df in melt_dfs]
# Now sort this list by mean value
sorted_means = sorted([(t[0], t[1]) for t in list_of_means])
# Finally, build the list of melt dfs that will be passed to the split function
melts_sorted = []
for mean, run in sorted_means:
for df in melt_dfs:
if df['run'].unique()[0] == run:
melts_sorted.append(df)
# Split the list into four almost-equally-sized chunks, for later on plotting in a 4x4 plot
concat_split_melt_dfs = [pd.concat(x) for x in split(melts_sorted, 4)]
return concat_split_melt_dfs
def plot_dssp_results(dssp_array, mask, top_fn, ts=2, ylabel=None,
cmap='viridis', simplified=True):
"""
Plots the dssp assignments in dssp_array as a function of time.
Parameters
----------
dssp_array: np.array of strings, DSSP assignment arrays as calculated from
mdtraj.compute_dssp
mask: list of ints, The list of residues that will be sliced from the dssp
array
top_fn: str, The path to the topology function belonging to the trajectory
the DSSP assignments have been computed from.
ts: float, Conversion factor between the frames in the trajectory and time
in nanoseconds
ylabel: str, Label of vertical axis
cmap: str, A matplotlib color map name
simplified: bool, Wether the dssp_array contains simplified DSSP codes or
not
Return
------
ax: matplotlib axes
"""
if simplified:
dssp_array[dssp_array == 'H'] = 0
dssp_array[dssp_array == 'E'] = 1
dssp_array[dssp_array == 'C'] = 2
else:
dssp_array[dssp_array == 'H'] = 0
dssp_array[dssp_array == 'B'] = 1
dssp_array[dssp_array == 'E'] = 2
dssp_array[dssp_array == 'G'] = 3
dssp_array[dssp_array == 'I'] = 4
dssp_array[dssp_array == 'T'] = 5
dssp_array[dssp_array == 'S'] = 6
dssp_array[dssp_array == ' '] = 7
dssp = dssp_array.astype(int)
mask1 = renumber_mask(mask, top_fn=top_fn)
if simplified:
n = 2
else:
n = 8
df = pd.DataFrame(
dssp[:, mask],
columns=mask1,
index=[int(x * ts) for x in range(len(dssp))]
)
fig, ax = pp.subplots(figsize=figure_dims(1400))
cmap = pp.cm.get_cmap(cmap, n)
# Get the middle position for the ticks in the colorbar
# If we split a segment of length n into n+1 chunks, the middle position
# between chunks is given by the following formula:
# (n / (2(n+1))) * (2i + 1) where i goes from 0 to n
tick_positions = [
(n / (2 * (n + 1))) * (2 * i + 1) for i in range(n + 1)
]
sns.heatmap(
df.T,
cmap=cmap,
ax=ax,
linewidths=0,
cbar_kws={
'ticks': tick_positions,
'spacing': 'proportional'
}
)
cax = fig.axes[-1]
if simplified:
cax.set_yticklabels(['Helix', 'Strand', 'Coil'])
else:
cax.set_yticklabels([r'$\alpha$ helix',
r'Isolated $\beta$-bridge',
'Extended strand',
r'$3_{10}$ helix',
r'$\pi$ helix',
'H-bonded turn',
'Bend',
'Coil'])
pp.xticks(rotation=75)
ax.set(xlabel='Time (ns)')
if ylabel is not None:
ax.set(ylabel=ylabel)
fig.tight_layout()
return fig, ax
def plot_box_mmgbsa_results(ligand_data, figsize=(12, 12), title=None):
"""
Returns a 4x4 figure with the violin plot distributions of MMGBSA energy of every run
:param ligand_data: list, A list with pd.DataFrames of the MMGBSA energies for each run
:param figsize: tuple, The figure size (in inches)
:param title: str, The title of the figure
:return f: a matplotlib figure
"""
if len(ligand_data) != 4:
raise ValueError('ligand_data must be a list of length 4')
f, ((ax0, ax1), (ax2, ax3)) = pp.subplots(2, 2, figsize=figsize, sharey=True)
for ax, df in zip((ax0, ax1, ax2, ax3), ligand_data):
sns.boxplot(x='run', y='MMGBSA (kcal/mol)', data=df, ax=ax)
ax.set_xlabel('')
ax.xaxis.set_tick_params(rotation=70)
if title is not None:
f.suptitle(title, size=22)
ax0.set_ylabel(ylabel=r'$\Delta$G binding (kcal/mol)', size=14)
ax2.set_ylabel(ylabel=r'$\Delta$G binding (kcal/mol)', size=14)
ax1.set_ylabel('')
ax3.set_ylabel('')
return f
def bar_plot_mmgbsa_results(excel_file, sort=True, titles=None):
"""
Load data from an Excel file with the summary of the MMGBSA results, in a sheet which has to be called "MMGBSA".
Create a plot for each ligand that is found under the 'Ligand' column in the table.
:param excel_file: str, Name of the Excel file with the data
:param sort: bool, Whether to sort the plot by increasing MMGBSA values
:param titles: list, Name for each of the plots (as many as there are ligands in the table)
:return f_list: list, A list of matplotlib figures
"""
# sns.set_style('whitegrid')
df = pd.read_excel(excel_file, sheetname="MMGBSA")
df = df.reindex(index=order_by_index(df.index, index_natsorted(df.Run)))
ligands = df.Ligand.unique()
f_list = []
if titles is None:
titles = [None for _ in ligands]
elif len(titles) != len(ligands):
raise ValueError('len of ligands and titles is not equal.')
for lig, title in zip(ligands, titles):
lig_df = df[df.Ligand == lig]
if sort:
lig_df.sort_values(by='MMGBSA (mean)', inplace=True)
ax = lig_df.plot(x="Run", y="MMGBSA (mean)", yerr='MMGBSA (Std)', kind='bar',
legend=False, figsize=figure_dims(1400), title=title)
overall_mean = lig_df['MMGBSA (mean)'].mean()
overall_std = lig_df['MMGBSA (mean)'].std()
print("{} {:02f} {:02f}".format(lig, overall_mean, overall_std))
xmin, xmax = ax.get_xlim()
# Mean line
ax.plot(
[xmin, xmax], [overall_mean, overall_mean],
linewidth=1.5,
color='blue'
)
# Upper std bar
ax.plot(
[xmin, xmax],
[overall_mean + overall_std, overall_mean + overall_std],
linestyle='dashed',
linewidth=1,
color='blue'
)
# Lower std bar
ax.plot(
[xmin, xmax],
[overall_mean - overall_std, overall_mean - overall_std],
linestyle='dashed',
linewidth=1,
color='blue'
)
ax.set_ylim(top=0)
ax.set_ylabel(ylabel=r'$\Delta$G binding (kcal/mol)', size=14)
ax.set_xlabel(xlabel='Run', size=14)
f = pp.gcf()
f.tight_layout()
f_list.append(f)
return f_list
@msme_colors
def plot_tica_timescales(tica, meta, ax=None, color='cyan'):
"""
Plot the timescales of a tica object
:param tica: an msmbuilder tica object
:param meta: an msmbuilder metadata object
:param ax: a matplotlib axis
:param color string: the color to plot
:return ax: drawn matplotlib axis
"""
if ax is None:
ax = pp.gca()
timestep = meta['step_ps'].unique()
assert len(timestep) == 1, timestep
timestep = float(timestep[0]) # ps
to_us = (
(1.0 / 1000) # ps -> ns
* (1.0 / 1000) # ns -> us
* (timestep / 1) # steps -> ps
)
ax.hlines(
tica.timescales_ * to_us,
0, 1,
color=color
)
ax.set_ylabel(r'Timescales / $\mathrm{\mu s}$', fontsize=18)
ax.set_xticks([])
ax.set_xlim((0, 1))
return ax
def plot_ergodic_subspace(msm, clusterer, obs=(0, 1), ax=None, alpha=1.,
color='blue', label=None, xlabel=None, ylabel=None,
scatter_kwargs=None):
"""
Plot which cluster centers out of the clusterer object have been visited
in the msm object.
:param msm: A trained msmbuilder MSM object
:param clusterer: A trained msmbuilder clusterer object
:param obs: tuple, which dimensions to plot
:param ax: a matplotlib.axes object
:param alpha: float, transparency parameter for ax.scatter
:param color: string, parameter for ax.scatter
:param label: string, parameter for ax.scatter
:param xlabel: string, parameter for ax.scatter
:param ylabel: string, parameter for ax.scatter
:param scatter_kwargs: dict, any other parameters for ax.scatter
:return ax: a matplotlib.axes object
"""
if ax is None:
ax = pp.gca()
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if scatter_kwargs is None:
scatter_kwargs = {}
prune = clusterer.cluster_centers_[:, obs]
ax.scatter(
prune[msm.state_labels_][:, 0],
prune[msm.state_labels_][:, 1],
color=color,
label=label,
alpha=alpha,
**scatter_kwargs
)
return ax
def plot_singletic_trajs(ttrajs, meta, system, alpha=1, stride=1,
obs=(0, 1, 2), ylabels=None,
xlabel=None, title=None, figsize=None):
"""
Plot each tIC vs. time on it's own axis and stack them vertically.
By default, the first three tICs are plotted, but less (or more) can be
chosen.
:param ttrajs: dict, with keys as integers and values of np.arrays (tICA converted trajectories)
:param meta: an msmbuilder metadata object
:param system: str, the system inside 'meta' to plot
:param obs: tuple (optional), the dimensions to plot
:param ylabels: list or tuple (optional) of str, the labels of y axis
:param xlabel: str (optional), the x label (to be shared)
:param title: str (optional), the title of the plot
:param figsize: tuple (optional), the figure dimensions
:return axarr: an array of matplotlib axis
"""
if ylabels is None:
ylabels = ['tIC1', 'tIC2', 'tIC3']
if len(obs) != len(ylabels):
raise ValueError('Length of obs and ylabels is not equal.')
def to_ns(x, pos):
timestep = meta['step_ps'].unique()
return "%d" % (x * timestep * stride / 1000)
# Get dictionary of specific sub system
ttrajs_subtypes = split_trajs_by_type(ttrajs, meta)
ttrajs_specific = ttrajs_subtypes[system]
# Create the figure
if figsize is None:
figsize = figure_dims(1200)
fig, axarr = pp.subplots(len(obs), 1, figsize=figsize, sharex=True, sharey=True)
if title is not None:
axarr[0].set(title=title)
if xlabel is not None:
axarr[-1].set(xlabel=xlabel)
formatter = FuncFormatter(to_ns)
indexes = meta[meta['type'] == system].index
for j, ylabel in zip(range(len(obs)), ylabels):
for index in indexes:
ax = axarr[j]
ax.plot(ttrajs_specific[index][::stride, obs[j]], alpha=alpha)
ax.set_ylabel(ylabel)
ax.xaxis.set_major_formatter(formatter)
return axarr
def plot_overlayed_types(ttrajs, meta, obs=(0, 1), ax=None, stride=100,
xlabel=None, ylabel=None, plot_free_energy_kwargs=None, plot_kwargs=None):
"""
Overlay each type of system inside the meta object onto the overall tICA
free energy landscape.
:param ttrajs: dict, with keys as integers and values of np.arrays (tICA converted trajectories)
:param meta: an msmbuilder metadata object
:param obs: tuple (optional), the dimensions to plot
:param ax: matplotlib axis to plot in (optional)
:param stride: int (optional, default=100), scatter every `stride` frames to avoid a cluttered plot
:param xlabel: str (optional), the x label
:param ylabel: str (optional), the y label
:param plot_free_energy_kwargs: dict (optional), extra parameters to pass to msme.plot_free_energy
:param plot_kwargs: dict (optional), extra parameters to pass to pyplot.plot
:return ax: drawn matplotlib axis
"""
if ax is None:
ax = pp.gca()
if plot_free_energy_kwargs is None:
plot_free_energy_kwargs = {}
if plot_kwargs is None:
plot_kwargs = {}
txx = np.concatenate(list(ttrajs.values()))
ttrajs_subtypes = split_trajs_by_type(ttrajs, meta)
msme.plot_free_energy(txx, obs=obs, ax=ax, **plot_free_energy_kwargs)
for traj_id, traj_dict in ttrajs_subtypes.items():
system_txx = np.concatenate(list(traj_dict.values()))
ax.scatter(system_txx[::stride, obs[0]], system_txx[::stride, obs[1]], label=traj_id, **plot_kwargs)
pp.legend(loc='best')
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
return ax
def plot_microstates(msm, txx, clusterer, obs=(0, 1), eigenvector=1, ax=None,
clabel=None):
"""
Taken from the msmbuilder template.
Plot the microstate centers of an msm object on top of a grey hexbin
tICA landscape. Color the microstates by the sign of the chosen eigenvector
:param ax: matplotlib axis to plot in (optional)
:param msm:
:param txx:
:param clusterer:
:param obs:
:param eigenvector:
:param clabel:
:return:
"""
if ax is None:
ax = pp.gca()
txx = txx[:, obs]
ax.hexbin(txx[:, 0], txx[:, 1],
cmap='Greys',
mincnt=1,
bins='log',
)
scale = 100 / np.max(msm.populations_)
add_a_bit = 5
prune = clusterer.cluster_centers_[:, obs]
c = ax.scatter(prune[msm.state_labels_, 0],
prune[msm.state_labels_, 1],
s=scale * msm.populations_ + add_a_bit,
c=msm.left_eigenvectors_[:, eigenvector],
cmap='RdBu'
)
ax.set_xlabel("tIC 1", fontsize=16)
ax.set_ylabel("tIC 2", fontsize=16)
pp.colorbar(c, label=clabel)
return ax
def plot_src_sink(msm, clusterer, ev, txx, src, sink, clabel=None, title='', ax=None):
"""
:param msm:
:param clusterer:
:param ev:
:param txx:
:param src:
:param sink:
:param clabel:
:param title:
:param ax:
:return:
"""
if ax is None:
ax = pp.gca()
ax.set_title(title)
plot_microstates(msm=msm, eigenvector=ev, clabel=clabel, txx=txx, clusterer=clusterer, ax=ax)
# source
ax.scatter(
clusterer.cluster_centers_[src][0],
clusterer.cluster_centers_[src][1],
marker='D',
color='red',
s=200,
zorder=10
)
# sink
ax.scatter(
clusterer.cluster_centers_[sink][0],
clusterer.cluster_centers_[sink][1],
marker='D',
color='blue',
s=200,
zorder=10
)
return ax
def plot_efhand_dists_src_sinks(src_glob, sink_glob, title=None, ax=None):
if ax is None:
ax = pp.gca()
src = glob(src_glob)
snk = glob(sink_glob)
data = {}
for f in src:
atom = os.path.basename(f).split('-')[1].split('_')[0]
data[atom] = [None, None]
for f1, f2 in zip(src, snk):
atom = os.path.basename(f1).split('-')[1].split('_')[0]
src_mean = np.loadtxt(f1).mean(axis=0)[1]
src_std = np.loadtxt(f1).std(axis=0)[1]
snk_mean = np.loadtxt(f2).mean(axis=0)[1]
snk_std = np.loadtxt(f2).std(axis=0)[1]
data[atom][0] = (src_mean, src_std)
data[atom][1] = (snk_mean, snk_std)
# cpptraj names E32 of cTni as E280, rename that
data['E32O'] = data.pop('E280O')
data['E32OE1'] = data.pop('E280OE1')
data['E32OE2'] = data.pop('E280OE2')
# now do plot
ax.set_title(title)
n_feats_plot = len(data)
xx = np.arange(1, n_feats_plot + 1)
ax.errorbar(xx, [x[0][0] for x in data.values()],
yerr=[x[0][1] for x in data.values()],
label='source', linestyle='None', marker='s', color='red')
ax.errorbar(xx, [x[1][0] for x in data.values()],
yerr=[x[1][1] for x in data.values()],
label='sink', linestyle='None', marker='*', color='blue')
pp.legend()
ax.set_xticks(xx)
ax.set_xlim((0, n_feats_plot + 1))
ax.set_xticklabels(
list(data.keys())
)
for tick in ax.get_xticklabels():
tick.set_rotation(60)
ax.tick_params(labelsize=14)
ax.set_ylabel("Distance (Å)", fontsize=16)
return ax
def plot_cluster_centers(clusterer, centers, txx, surface=True, ax=None,
obs=(0, 1), from_clusterer=True, msm=None,
add_bit=20, scatter_kwargs=None):
"""
Plots cluster centers as scatter points on top of a tICA landscape.
Center IDs can be either in MSM labeling
or directly from the clustering labelling.
Parameters
----------
clusterer: a fit clusterer object, with .cluster_centers_ attribute
centers: list of ints, the IDs of the cluster centers to plot
txx: np.array of concatenated tIC trajs, shape = (n_frames, n_features)
surface: bool, Control if energy surface is plotted or not
ax: a matplotlib axes object (optional)
obs: tuple of ints, dimensions to plot (optional)
from_clusterer: bool, are the centers id in clusterer
indexing or msm?
default True means IDs are from clusterer
msm: a MarkovStateModel object which has been fit
add_bit: int, control size of scatter plots
scatter_kwargs: dict, Extra parameters to pass to scatter plot
Returns
-------
ax: a matplotlib axes object
Raises
------
ValueError: if we select from_clusterer=False it means that
the center IDs are in MSM-internal labeling,
so we need an MSM object to map those back to the clusterer naming
"""
if ax is None:
ax = pp.gca()
if scatter_kwargs is None:
scatter_kwargs = {}
if surface:
ax = msme.plot_free_energy(txx, obs=obs, n_samples=5000,
gridsize=100, vmax=5.,
n_levels=8, cut=5, xlabel='tIC1',
ylabel='tIC2'
)
prune = clusterer.cluster_centers_[:, obs]
if from_clusterer:
chosen_centers = prune[centers]
ax.scatter(chosen_centers[:, 0], chosen_centers[:, 1])
else:
if msm is None:
raise ValueError('if from_clusterer is False please provide a fit MSM in the msm parameter')
else:
# Retrieve cluster centers from clusterer objects that have been used in this MSM
centers_in_clusterer = []
for k, v in msm.mapping_.items():
if v in centers:
centers_in_clusterer.append(k)
scale = 100 / np.max(msm.populations_)
chosen_centers = prune[centers_in_clusterer]
ax.scatter(
chosen_centers[:, 0], chosen_centers[:, 1],
s=add_bit + (scale * msm.populations_),
**scatter_kwargs
)
return ax
def plot_tpt(msm, clusterer, txx, ev=1, ax=None, title=None,
obs=(0, 1), num_paths=1):
"""
Automatically plot a TPT plot of msmexplorer by selecting the microstates
that have
lowest (source) and highest (sink) value of the provided eigenvector
:param msm: an MSM object
:param clusterer: a clusterer object
:param txx: np.array of concatenated tIC trajs,
shape = (n_frames, n_features)
:param ev: int, the eigenvector to plot the transition for
:param ax: a matplotlib axes object (optional)
:param title: str, the title
:param obs: tuple of ints, dimensions to plot (optional)
:param num_paths: int, the number of paths to plot
:return ax:
"""
if ax is None:
pp.gca()
prune = clusterer.cluster_centers_[:, obs]
msm_states = prune[msm.state_labels_]
pos = dict(zip(range(len(msm_states)), msm_states))
w = (msm.left_eigenvectors_[:, ev] - msm.left_eigenvectors_[:, ev].min())
w /= w.max()
src, snk = get_source_sink(
msm, clusterer=clusterer, eigenvector=ev, out_naming='msm'
)
# ax.hexbin(txx[:, 0], txx[:, 1],
# cmap='Greys',
# mincnt=1,
# bins='log',
# )
cmap = msme.utils.make_colormap(['pomegranate', 'lightgrey', 'rawdenim'])
ax = msme.plot_tpaths(
msm, [src], [snk], pos=pos, node_color=cmap(w),
alpha=1, edge_color='black', ax=ax, num_paths=num_paths,
with_labels=False
)
if title is not None:
ax.set_title(title)
return ax
def plot_tic_loadings(tica, ax=None, n_tics=3, alpha=1):
"""
Plot the tICA weights on each feature
"""
if ax is None:
ax = pp.gca()
for i in range(n_tics):
ax.plot(
tica.components_[i, :],
alpha=alpha,
label='tIC{}'.format(i + 1)
)
# ax.legend(loc='best')
ax.set(ylabel='tIC weight', xlabel='Feature index')
return ax
# ----------
# FOR GEPHI
# ----------
def add_colors(graph, color_arr):
"""
Adds colors to the nodes in a graph
Parameters
----------
graph: a networkx graph (directed or undirected)
color_arr: np.array, shape=(n_nodes, 4)
An RGBA in 255 scale, indicating the color for each node
Returns
-------
graph: the networkx graph
Example
-------
cmap = plt.cm.inferno
colors = np.asarray(cmap(pcca.microstate_mapping_ / pcca.microstate_mapping_.max())*255, dtype=int)
g = nx.DiGraph(tmat)
g = add_colors(g, colors)
"""
for n in graph.nodes:
node = graph.nodes[n]
if 'viz' not in node:
node['viz'] = {}
node['viz']['color'] = {
'r': color_arr[n][0],
'g': color_arr[n][1],
'b': color_arr[n][2],
'a': int(color_arr[n][3] / 255)
}
return graph
def add_counts(graph, count_matrix):
"""
Adds a 'count' attribute to each node in the graph. Also, set the size
proportional to this count.
Parameters
----------
graph: a networkx graph (directed or undirected)
count_matrix: np.array of shape=(n_nodes, ) indicating the count
or population as obtained from the stationary distribution of an msm
of each node
Returns
-------
graph: the networkx graph
"""
count_matrix = count_matrix / count_matrix.max()
for n in graph.nodes:
node = graph.nodes[n]
if 'viz' not in node:
node['viz'] = {}
node['viz']['size'] = float(count_matrix[n]) * 10
node['count'] = float(count_matrix[n])
return graph
def add_positions(graph, position_matrix):
"""
Set an xyz coordinate for each node in a graph
Parameters
----------
graph: a networkx graph (directed or undirected)
position_matrix: a np.array of shape=(n_nodes, 2 or 3) indicating the xy(z)
coordinates for every node in the graph. If only 2 columns are passed,
the z coordinate is set to 0 for all the nodes.
Returns
-------
graph: the networkx graph
"""
if position_matrix.shape[1] < 3:
zeros = np.zeros(shape=(len(position_matrix), 1))
position_matrix = np.hstack((position_matrix, zeros))
for n in graph.nodes:
node = graph.nodes[n]
if 'viz' not in node:
node['viz'] = {}
node['viz']['position'] = {
'x': float(position_matrix[n, 0]),
'y': float(position_matrix[n, 1]),
'z': float(position_matrix[n, 2])
}
return graph
def rgba_colors_from_array(array, cmap):
"""
Returns an RGBA array on a 255 scale mapped to an input array of scalars
Parameters
----------
array: np.array, shape=(n,) scalars onto which we map cmap
cmap: mpl.cmap instance
Returns
-------
colors: np.array, shape=(n, 4), rgba colors on 255 scale
"""
colors = np.asarray(cmap(array / array.max()) * 255, dtype=int)
return colors
|
jeiros/Scripts
|
AnalysisMDTraj/plot_utils.py
|
Python
|
mit
| 26,888
|
[
"MDTraj"
] |
a3a7681d47b30c27e777f0a66d928fab7433af459ae851728e5e82b4c81659b7
|
#
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2017-2018 Francois Beaune, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from appleseed.studio import *
def register():
# In the future, this plugin will be able to add menus or panels to appleseed.studio.
pass
def list_objects():
"""Print names of all objects in the scene."""
scene = current_project().get_scene()
assemblies = scene.assemblies()
for ass_key in assemblies:
list_objects_in_assembly(assemblies[ass_key])
def list_objects_in_assembly(ass):
"""Print names of objects in a given assembly and all its child assemblies."""
# Print names of objects inside this assembly.
for obj in ass.objects():
print(obj.get_name())
# Recurse into child assemblies.
child_assemblies = ass.assemblies()
for sub_ass_key in child_assemblies:
list_objects_in_assembly(child_assemblies[sub_ass_key])
|
Biart95/appleseed
|
sandbox/samples/python/studio/plugins/basicenumerator/__init__.py
|
Python
|
mit
| 2,107
|
[
"VisIt"
] |
ae4c5bf7de50fd8def4fc291623217b56b2ffd1be958027a64667bb312f6651d
|
from __future__ import annotations
import collections
import glob
import os
import procrunner
import pytest
from cctbx import uctbx
from dxtbx.model import ExperimentList
from dxtbx.serialize import load
from dials.array_family import flex
def unit_cells_are_similar(
uc1, uc2, relative_length_tolerance=0.01, absolute_angle_tolerance=1
):
# see also uctbx.cpp unit_cell::is_similar_to()
l1 = uc1.parameters()
l2 = uc2.parameters()
for i in range(3):
if abs(min(l1[i], l2[i]) / max(l1[i], l2[i]) - 1) > relative_length_tolerance:
return False
for i in range(3, 6):
if abs(l1[i] - l2[i]) > absolute_angle_tolerance:
if abs(l1[i] - (180 - l2[i])) > absolute_angle_tolerance:
return False
return True
_indexing_result = collections.namedtuple(
"indexing", ["indexed_reflections", "experiments", "rmsds"]
)
def run_indexing(
reflections,
experiment,
working_directory,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
n_expected_lattices=1,
relative_length_tolerance=0.005,
absolute_angle_tolerance=0.5,
):
commands = ["dials.index"]
if isinstance(reflections, list):
commands.extend(reflections)
else:
commands.append(reflections)
if isinstance(experiment, list):
commands.extend(experiment)
else:
commands.append(experiment)
commands.extend(extra_args)
result = procrunner.run(commands, working_directory=working_directory)
assert not result.returncode and not result.stderr
out_expts = working_directory.join("indexed.expt")
out_refls = working_directory.join("indexed.refl")
assert out_expts.check()
assert out_refls.check()
experiments_list = load.experiment_list(out_expts.strpath, check_format=False)
assert len(experiments_list.crystals()) == n_expected_lattices
indexed_reflections = flex.reflection_table.from_file(out_refls.strpath)
indexed_reflections.assert_experiment_identifiers_are_consistent(experiments_list)
rmsds = None
for i, experiment in enumerate(experiments_list):
assert unit_cells_are_similar(
experiment.crystal.get_unit_cell(),
expected_unit_cell,
relative_length_tolerance=relative_length_tolerance,
absolute_angle_tolerance=absolute_angle_tolerance,
), (
experiment.crystal.get_unit_cell().parameters(),
expected_unit_cell.parameters(),
)
sg = experiment.crystal.get_space_group()
assert sg.type().hall_symbol() == expected_hall_symbol, (
sg.type().hall_symbol(),
expected_hall_symbol,
)
reflections = indexed_reflections.select(indexed_reflections["id"] == i)
mi = reflections["miller_index"]
assert (mi != (0, 0, 0)).count(False) == 0
reflections = reflections.select(mi != (0, 0, 0))
reflections = reflections.select(
reflections.get_flags(reflections.flags.used_in_refinement)
)
assert len(reflections) > 0
obs_x, obs_y, obs_z = reflections["xyzobs.mm.value"].parts()
calc_x, calc_y, calc_z = reflections["xyzcal.mm"].parts()
rmsd_x = flex.mean(flex.pow2(obs_x - calc_x)) ** 0.5
rmsd_y = flex.mean(flex.pow2(obs_y - calc_y)) ** 0.5
rmsd_z = flex.mean(flex.pow2(obs_z - calc_z)) ** 0.5
rmsds = (rmsd_x, rmsd_y, rmsd_z)
for actual, expected in zip(rmsds, expected_rmsds):
assert actual <= expected, f"{rmsds} {expected_rmsds}"
assert experiment.identifier != ""
expt = ExperimentList()
expt.append(experiment)
reflections.assert_experiment_identifiers_are_consistent(expt)
return _indexing_result(indexed_reflections, experiments_list, rmsds)
def test_index_i04_weak_data_fft3d(dials_regression, tmpdir):
# thaumatin
data_dir = os.path.join(dials_regression, "indexing_test_data", "i04_weak_data")
pickle_path = os.path.join(data_dir, "full.pickle")
sequence_path = os.path.join(data_dir, "experiments_import.json")
extra_args = [
"bin_size_fraction=0.25",
"image_range=1,20",
"image_range=250,270",
"image_range=520,540",
]
expected_unit_cell = uctbx.unit_cell((57.7, 57.7, 149.8, 90, 90, 90))
expected_rmsds = (0.05, 0.04, 0.0005)
expected_hall_symbol = " P 1"
run_indexing(
pickle_path,
sequence_path,
tmpdir,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
def test_index_trypsin_four_lattice_P212121(dials_regression, tmpdir):
# synthetic trypsin multi-lattice dataset (4 lattices)
data_dir = os.path.join(dials_regression, "indexing_test_data", "trypsin")
pickle_path = os.path.join(data_dir, "P1_X6_1_2_3_4.pickle")
sequence_path = os.path.join(data_dir, "experiments_P1_X6_1_2_3_4.json")
extra_args = [
"indexing.method=real_space_grid_search",
"reflections_per_degree=10",
"n_macro_cycles=5",
"known_symmetry.unit_cell=54.3,58.3,66.5,90,90,90",
"known_symmetry.space_group=P212121",
"image_range=0,10",
"beam.fix=all",
"detector.fix=all",
"max_cell=70",
]
expected_unit_cell = uctbx.unit_cell((54.3, 58.3, 66.5, 90, 90, 90))
expected_rmsds = (0.28, 0.30, 0.006)
expected_hall_symbol = " P 2ac 2ab"
n_expected_lattices = 1
run_indexing(
pickle_path,
sequence_path,
tmpdir,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
n_expected_lattices=n_expected_lattices,
relative_length_tolerance=0.02,
absolute_angle_tolerance=1,
)
def test_index_i04_weak_data_fft1d(dials_regression, tmpdir):
# thaumatin
data_dir = os.path.join(dials_regression, "indexing_test_data", "i04_weak_data")
pickle_path = os.path.join(data_dir, "full.pickle")
sequence_path = os.path.join(data_dir, "experiments_import.json")
extra_args = [
"n_macro_cycles=2",
"indexing.method=fft1d",
"bin_size_fraction=0.25",
"image_range=1,20",
"image_range=250,270",
"image_range=520,540",
]
expected_unit_cell = uctbx.unit_cell((57.7, 57.7, 149.9, 90, 90, 90))
expected_rmsds = (0.06, 0.05, 0.0005)
expected_hall_symbol = " P 1"
run_indexing(
pickle_path,
sequence_path,
tmpdir,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
def test_index_trypsin_index_assignment_local(dials_regression, tmpdir):
# synthetic trypsin multi-lattice dataset (3 lattices)
data_dir = os.path.join(dials_regression, "indexing_test_data", "trypsin")
pickle_path = os.path.join(data_dir, "P1_X6_1_2_3.pickle")
sequence_path = os.path.join(data_dir, "experiments_P1_X6_1_2_3.json")
extra_args = [
"indexing.method=real_space_grid_search",
"d_min_start=3",
"n_macro_cycles=3",
"known_symmetry.unit_cell=54.3,58.3,66.5,90,90,90",
"known_symmetry.space_group=P212121",
"image_range=0,10",
"beam.fix=all",
"detector.fix=all",
"max_lattices=3",
"index_assignment.method=local",
"nearest_neighbours=50",
]
expected_unit_cell = uctbx.unit_cell((54.3, 58.3, 66.5, 90, 90, 90))
expected_rmsds = (0.33, 0.40, 0.0024)
expected_hall_symbol = " P 2ac 2ab"
n_expected_lattices = 3
run_indexing(
pickle_path,
sequence_path,
tmpdir,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
n_expected_lattices=n_expected_lattices,
relative_length_tolerance=0.02,
absolute_angle_tolerance=1,
)
def test_index_peak_search_clean(dials_regression, tmpdir):
# test indexing from single image of i04_weak_data
data_dir = os.path.join(dials_regression, "indexing_test_data", "i04_weak_data")
pickle_path = os.path.join(data_dir, "first_image.pickle")
sequence_path = os.path.join(data_dir, "experiments_import.json")
extra_args = [
"indexing.method=fft3d",
"known_symmetry.space_group=P4",
"known_symmetry.unit_cell=57.8,57.8,150,90,90,90",
"peak_search=clean",
"min_samples=15",
"n_macro_cycles=4",
"reciprocal_space_grid.d_min=4",
]
expected_unit_cell = uctbx.unit_cell((57.8, 57.8, 150, 90, 90, 90))
expected_rmsds = (0.06, 0.07, 0.003)
expected_hall_symbol = " P 4"
run_indexing(
pickle_path,
sequence_path,
tmpdir,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
@pytest.mark.parametrize("specify_unit_cell", [False, True])
def test_index_imosflm_tutorial(dials_regression, tmpdir, specify_unit_cell):
# test on spots derived from imosflm tutorial data:
# http://www.ccp4.ac.uk/courses/BCA2005/tutorials/dataproc-tutorial.html
data_dir = os.path.join(dials_regression, "indexing_test_data", "imosflm_hg_mar")
pickle_path = os.path.join(data_dir, "strong.pickle")
sequence_path = os.path.join(data_dir, "experiments.json")
unit_cell = uctbx.unit_cell((58.373, 58.373, 155.939, 90, 90, 120))
hall_symbol = '-R 3 2"'
extra_args = [
"bin_size_fraction=0.25",
'known_symmetry.space_group="Hall: %s"' % hall_symbol.replace('"', '\\"'),
]
if specify_unit_cell:
extra_args.append(
'known_symmetry.unit_cell="%s %s %s %s %s %s"' % unit_cell.parameters()
)
expected_unit_cell = unit_cell
expected_hall_symbol = hall_symbol
expected_rmsds = (0.08, 0.11, 0.004)
run_indexing(
pickle_path,
sequence_path,
tmpdir,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
@pytest.fixture(scope="session")
def insulin_spotfinding(dials_data, tmpdir_factory):
"""Return experiment and reflection files for 2 images of the insulin dataset"""
data_dir = dials_data("insulin")
tmpdir = tmpdir_factory.mktemp("insulin")
command = ["dials.import"]
for i, image_path in enumerate(("insulin_1_001.img", "insulin_1_045.img")):
target = "image_00%i.img" % (i + 1)
data_dir.join(image_path).copy(tmpdir.join(target))
command.append(target)
result = procrunner.run(command, working_directory=tmpdir)
assert not result.returncode and not result.stderr
experiment = tmpdir.join("imported.expt")
assert experiment.check()
command = ["dials.find_spots", "nproc=1", experiment]
result = procrunner.run(command, working_directory=tmpdir)
assert not result.returncode and not result.stderr
reflections = tmpdir.join("strong.refl")
assert reflections.check()
return experiment, reflections
@pytest.mark.parametrize("method", ["fft3d", "fft1d", "real_space_grid_search"])
def test_index_insulin_multi_sequence(insulin_spotfinding, tmpdir, method):
experiment, reflections = insulin_spotfinding
expected_unit_cell = uctbx.unit_cell(
(78.163, 78.163, 78.163, 90.000, 90.000, 90.000)
)
expected_hall_symbol = " I 2 2 3"
expected_rmsds = (0.05, 0.06, 0.01)
extra_args = [
'known_symmetry.unit_cell="%s %s %s %s %s %s"'
% expected_unit_cell.parameters(),
f'known_symmetry.space_group="Hall: {expected_hall_symbol}"',
f"indexing.method={method}",
"treat_single_image_as_still=False",
]
run_indexing(
reflections,
experiment,
tmpdir,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
@pytest.fixture(scope="session")
def insulin_spotfinding_stills(dials_data, tmpdir_factory):
"""Return experiment and reflection files for 1 image of the insulin
dataset treated as still image"""
data_dir = dials_data("insulin")
tmpdir = tmpdir_factory.mktemp("insulin")
command = [
"dials.import",
"convert_sequences_to_stills=True",
data_dir.join("insulin_1_001.img"),
]
result = procrunner.run(command, working_directory=tmpdir)
assert not result.returncode and not result.stderr
experiment = tmpdir.join("imported.expt")
assert experiment.check()
command = ["dials.find_spots", "nproc=1", experiment]
result = procrunner.run(command, working_directory=tmpdir)
assert not result.returncode and not result.stderr
reflections = tmpdir.join("strong.refl")
assert reflections.check()
return experiment, reflections
@pytest.mark.parametrize("method", ["fft3d", "fft1d", "real_space_grid_search"])
def test_index_insulin_force_stills(insulin_spotfinding_stills, tmpdir, method):
experiment, reflections = insulin_spotfinding_stills
expected_unit_cell = uctbx.unit_cell(
(78.163, 78.163, 78.163, 90.000, 90.000, 90.000)
)
expected_hall_symbol = " I 2 2 3"
expected_rmsds = (0.05, 0.06, 0.01)
extra_args = [
"stills.indexer=stills",
'known_symmetry.unit_cell="%s %s %s %s %s %s"'
% expected_unit_cell.parameters(),
f'known_symmetry.space_group="Hall: {expected_hall_symbol}"',
f"indexing.method={method}",
]
run_indexing(
reflections,
experiment,
tmpdir,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
def test_multiple_experiments(dials_regression, tmpdir):
# Test indexing 4 lysozyme still shots in a single dials.index job
# - the first image doesn't index
# - the last three images do index
data_dir = os.path.join(
dials_regression, "indexing_test_data", "i24_lysozyme_stills"
)
pickle_path = os.path.join(data_dir, "strong.pickle")
experiments_json = os.path.join(data_dir, "imported_experiments.json")
expected_unit_cell = uctbx.unit_cell((38.06, 78.78, 78.91, 90, 90, 90))
expected_hall_symbol = " P 1"
expected_rmsds = (0.1, 0.07, 0.0)
extra_args = [
"stills.indexer=sequences",
"joint_indexing=False",
"outlier.algorithm=sauter_poon",
]
run_indexing(
pickle_path,
experiments_json,
tmpdir,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
n_expected_lattices=3,
relative_length_tolerance=0.01,
)
def test_index_4rotation(dials_regression, tmpdir):
data_dir = os.path.join(dials_regression, "indexing_test_data", "4rotation")
pickle_path = os.path.join(data_dir, "strong.pickle")
sequence_path = os.path.join(data_dir, "experiments.json")
extra_args = [
"max_refine=10",
"reflections_per_degree=50",
"known_symmetry.space_group=R3",
"n_macro_cycles=3",
]
expected_unit_cell = uctbx.unit_cell((48.397, 48.397, 284.767, 90, 90, 120))
expected_rmsds = (0.06, 0.08, 0.22)
expected_hall_symbol = " R 3"
result = run_indexing(
pickle_path,
sequence_path,
tmpdir,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
assert len(result.indexed_reflections) > 276800, len(result.indexed_reflections)
def test_index_small_molecule_multi_sequence_4(dials_regression, tmpdir):
# test for small molecule multi-sequence indexing, 4 sequences with different values
# of goniometer.fixed_rotation()
data_dir = os.path.join(dials_regression, "indexing_test_data", "multi_sweep")
pickle_paths = [
glob.glob(
os.path.join(data_dir, "SWEEP%i" % (i + 1), "index", "*_strong.pickle")
)[0]
for i in range(4)
]
sequence_paths = [
glob.glob(
os.path.join(data_dir, "SWEEP%i" % (i + 1), "index", "experiments.json")
)[0]
for i in range(4)
]
extra_args = ["known_symmetry.space_group=I4", "filter_ice=False"]
expected_unit_cell = uctbx.unit_cell((7.310, 7.310, 6.820, 90.000, 90.000, 90.000))
expected_rmsds = (0.10, 0.7, 0.5)
expected_hall_symbol = " I 4"
result = run_indexing(
pickle_paths,
sequence_paths,
tmpdir,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
assert len(result.indexed_reflections) > 1250, len(result.indexed_reflections)
def test_index_small_molecule_multi_sequence_3(dials_regression, tmpdir):
# test for small molecule multi-sequence indexing, 3 sequences with different values
# of goniometer setting rotation (i.e. phi scans)
data_dir = os.path.join(dials_regression, "dials-191")
pickle_paths = [
glob.glob(os.path.join(data_dir, "*SWEEP%i*_strong.pickle" % (i + 1)))[0]
for i in range(3)
]
sequence_paths = [
glob.glob(os.path.join(data_dir, "*SWEEP%i*_experiments.json" % (i + 1)))[0]
for i in range(3)
]
extra_args = ["filter_ice=False"]
expected_unit_cell = uctbx.unit_cell(
(9.440, 15.313, 17.126, 90.073, 90.106, 79.248)
)
expected_rmsds = (0.32, 0.34, 0.005)
expected_hall_symbol = " P 1"
result = run_indexing(
pickle_paths,
sequence_paths,
tmpdir,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
assert len(result.indexed_reflections) > 12000, len(result.indexed_reflections)
# expect at least indexed 2000 reflections per experiment
for i in range(3):
assert (result.indexed_reflections["id"] == i).count(True) > 2000
def test_index_small_molecule_ice_max_cell(dials_regression, tmpdir):
# test for small molecule indexing: presence of ice rings makes max-cell
# estimation tricky
data_dir = os.path.join(dials_regression, "indexing_test_data", "MXSW-904")
pickle_path = os.path.join(data_dir, "1_SWEEP1_strong.pickle")
experiments = os.path.join(data_dir, "1_SWEEP1_experiments.json")
extra_args = ["filter_ice=False"]
expected_unit_cell = uctbx.unit_cell((11.72, 11.72, 11.74, 109.08, 109.24, 108.99))
expected_rmsds = (0.06, 0.05, 0.04)
expected_hall_symbol = " P 1"
result = run_indexing(
pickle_path,
experiments,
tmpdir,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
assert len(result.indexed_reflections) > 1300, len(result.indexed_reflections)
@pytest.mark.xfail
def test_refinement_failure_on_max_lattices_a15(dials_regression, tmpdir):
"""Problem: Sometimes there is enough data to index, but not enough to
refine. If this happens in the (N>1)th crystal of max_lattices, then
all existing solutions are also dropped."""
data_dir = os.path.join(dials_regression, "indexing_test_data", "lattice_failures")
result = procrunner.run(
[
"dials.index",
os.path.join(data_dir, "lpe4-2-a15_strong.pickle"),
os.path.join(data_dir, "lpe4-2-a15_datablock.json"),
"max_lattices=3",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("indexed.refl").check() and tmpdir.join("indexed.expt").check()
experiments_list = load.experiment_list(
tmpdir.join("indexed.expt").strpath, check_format=False
)
assert len(experiments_list) == 2
# now try to reindex with existing model
result = procrunner.run(
[
"dials.index",
tmpdir.join("indexed.expt").strpath,
os.path.join(data_dir, "lpe4-2-a15_strong.pickle"),
"max_lattices=2",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("indexed.refl").check() and tmpdir.join("indexed.expt").check()
experiments_list = load.experiment_list(
tmpdir.join("indexed.expt").strpath, check_format=False
)
assert len(experiments_list) == 2
def test_stills_indexer_multi_lattice_bug_MosaicSauter2014(dials_regression, tmpdir):
"""Problem: In stills_indexer, before calling the refine function, the
experiment list contains a list of dxtbx crystal models (that are not
MosaicSauter2014 models). The conversion to MosaicSauter2014 is made
during the refine step when functions from nave_parameters is called.
If the experiment list contains more than 1 experiment, for eg.
multiple lattices, only the first crystal gets assigned mosaicity. In
actuality, all crystal models should be assigned mosaicity. This test
only compares whether or not all crystal models have been assigned a
MosaicSauter2014 model."""
import dxtbx.model
from dxtbx.model import Crystal
from dxtbx.model.experiment_list import (
Experiment,
ExperimentList,
ExperimentListFactory,
)
from dials.algorithms.indexing.stills_indexer import StillsIndexer
from dials.array_family import flex
from dials.command_line.stills_process import (
phil_scope as stills_process_phil_scope,
)
experiment_data = os.path.join(
dials_regression,
"refinement_test_data",
"cspad_refinement",
"cspad_refined_experiments_step6_level2_300.json",
)
reflection_data = os.path.join(
dials_regression,
"refinement_test_data",
"cspad_refinement",
"cspad_reflections_step7_300.pickle",
)
refl = flex.reflection_table.from_file(reflection_data)
explist = ExperimentListFactory.from_json_file(experiment_data, check_format=False)[
0:2
]
reflist = refl.select(refl["id"] < 2) # Only use the first 2 for convenience
# Construct crystal models that don't have mosaicity. These A,B,C values are the same
# as read in from the dials_regression folder
# Crystal-0
cs0 = Crystal(explist[0].crystal)
exp0 = Experiment(
imageset=explist[0].imageset,
beam=explist[0].beam,
detector=explist[0].detector,
goniometer=None,
scan=None,
crystal=cs0,
)
# Crystal-1
cs1 = Crystal(explist[1].crystal)
exp1 = Experiment(
imageset=explist[1].imageset,
beam=explist[1].beam,
detector=explist[1].detector,
goniometer=None,
scan=None,
crystal=cs1,
)
# Construct a new experiment_list that will be passed on for refinement
unrefined_explist = ExperimentList([exp0, exp1])
# Get default params from stills_process and construct StillsIndexer, then run refinement
params = stills_process_phil_scope.extract()
SI = StillsIndexer(reflist, unrefined_explist, params=params)
refined_explist, new_reflist = SI.refine(unrefined_explist, reflist)
# Now check whether the models have mosaicity after stills_indexer refinement
# Also check that mosaicity values are within expected limits
for ii, crys in enumerate(refined_explist.crystals()):
assert isinstance(crys, dxtbx.model.MosaicCrystalSauter2014)
if ii == 0:
assert crys.get_domain_size_ang() == pytest.approx(2242.0, rel=0.1)
if ii == 1:
assert crys.get_domain_size_ang() == pytest.approx(2689.0, rel=0.1)
@pytest.mark.parametrize(
"indexer_type,fix_cell", (("sequences", False), ("stills", True))
)
def test_index_ED_still_low_res_spot_match(dials_data, tmpdir, indexer_type, fix_cell):
# test indexing from a single simulated lysozyme ED still
image_path = dials_data("image_examples").join("simtbx_FormatSMVJHSim_001.img")
command = ["dials.import", image_path]
result = procrunner.run(command, working_directory=tmpdir)
assert not result.returncode and not result.stderr
experiment = tmpdir.join("imported.expt")
assert experiment.check()
command = ["dials.find_spots", "nproc=1", experiment]
result = procrunner.run(command, working_directory=tmpdir)
assert not result.returncode and not result.stderr
reflections = tmpdir.join("strong.refl")
extra_args = [
"indexing.method=low_res_spot_match",
"known_symmetry.space_group=P43212",
"known_symmetry.unit_cell=78.84,78.84,38.29,90,90,90",
"stills.indexer=" + indexer_type,
"n_macro_cycles=2",
"detector.fix_list=Dist",
]
if fix_cell:
extra_args += ["crystal.fix=cell"]
expected_unit_cell = uctbx.unit_cell((78.84, 78.84, 38.29, 90, 90, 90))
expected_rmsds = (0.0065, 0.0065, 0.000)
expected_hall_symbol = " P 4nw 2abw"
run_indexing(
reflections,
experiment,
tmpdir,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
@pytest.mark.parametrize(
"cell_params",
[
(44.47, 52.85, 62.23, 115.14, 101.72, 90.01),
(52.85, 62.23, 44.47, 101.72, 90.01, 115.14),
],
)
def test_unconventional_P1_cell(dials_data, tmpdir, cell_params):
"""
Indexing in P1 should succeed even if the cell parameters are provided in
a non-conventional setting
"""
data_dir = dials_data("mpro_x0305_processed", pathlib=True)
experiment = data_dir / "imported.expt"
reflections = data_dir / "strong.refl"
cell_params_str = ",".join([str(x) for x in cell_params])
extra_args = [
"indexing.method=fft3d",
"known_symmetry.space_group=P1",
"known_symmetry.unit_cell=" + cell_params_str,
]
expected_unit_cell = uctbx.unit_cell(cell_params)
expected_rmsds = (1, 1, 1)
expected_hall_symbol = " P 1"
run_indexing(
reflections,
experiment,
tmpdir,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
def test_real_space_grid_search_no_unit_cell(dials_regression, tmpdir):
data_dir = os.path.join(dials_regression, "indexing_test_data", "i04_weak_data")
experiments_json = os.path.join(data_dir, "experiments_import.json")
pickle_path = os.path.join(data_dir, "full.pickle")
commands = [
"dials.index",
experiments_json,
pickle_path,
"indexing.method=real_space_grid_search",
]
result = procrunner.run(commands, working_directory=tmpdir)
assert result.stderr
assert (
result.stderr.strip()
== b"Target unit cell must be provided for real_space_grid_search"
)
def test_index_known_orientation(dials_data, tmpdir):
data_dir = dials_data("vmxi_proteinase_k_sweeps")
experiments_json = data_dir.join("experiments_0.json").strpath
reflections = data_dir.join("reflections_0.pickle").strpath
expected_unit_cell = uctbx.unit_cell((68.395, 68.395, 104, 90, 90, 90))
expected_rmsds = (0.013, 0.012, 0.008)
expected_hall_symbol = " P 4"
run_indexing(
reflections,
experiments_json,
tmpdir,
[],
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
def test_all_expt_ids_have_expts(dials_data, tmpdir):
result = procrunner.run(
[
"dials.index",
dials_data("vmxi_thaumatin_grid_index").join("split_07602.expt"),
dials_data("vmxi_thaumatin_grid_index").join("split_07602.refl"),
"stills.indexer=sequences",
"indexing.method=real_space_grid_search",
"space_group=P4",
"unit_cell=58,58,150,90,90,90",
"max_lattices=8",
"beam.fix=all",
"detector.fix=all",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("indexed.expt").check(file=1)
assert tmpdir.join("indexed.refl").check(file=1)
refl = flex.reflection_table.from_file(tmpdir / "indexed.refl")
expt = ExperimentList.from_file(tmpdir / "indexed.expt", check_format=False)
assert flex.max(refl["id"]) + 1 == len(expt)
|
dials/dials
|
tests/algorithms/indexing/test_index.py
|
Python
|
bsd-3-clause
| 28,216
|
[
"CRYSTAL"
] |
ce5e82e6d04918833991fcdf1382c911d13f38ff4f126313772504f3fdce401f
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-06-15 20:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stein', '0006_auto_20151115_1744'),
]
operations = [
migrations.AddField(
model_name='mineraltype',
name='cleavage2',
field=models.CharField(blank=True, choices=[('PE', 'Perfect'), ('LP', 'Less perfect'), ('GO', 'Good'),
('DI', 'Distinct'), ('ID', 'Indistinct'), ('NO', 'None')],
max_length=2, verbose_name='cleavage 2'),
),
migrations.AddField(
model_name='mineraltype',
name='density',
field=models.CharField(default=0, max_length=20, verbose_name='density'),
),
migrations.AddField(
model_name='mineraltype',
name='fracture2',
field=models.CharField(blank=True, choices=[('CF', 'Conchoidal'), ('EF', 'Earthy'), ('HF', 'Hackly'),
('SF', 'Splintery'), ('UF', 'Uneven')], max_length=2,
verbose_name='fracture 2'),
),
migrations.AddField(
model_name='mineraltype',
name='lustre2',
field=models.CharField(blank=True, choices=[('AM', 'Adamantine'), ('DL', 'Dull'), ('GR', 'Greasy'),
('MT', 'Metallic'), ('PY', 'Pearly'), ('SL', 'Silky'),
('SM', 'Submetallic'), ('VT', 'Vitreous'), ('WY', 'Waxy')],
max_length=2, verbose_name='lustre 2'),
),
migrations.AddField(
model_name='mineraltype',
name='other',
field=models.CharField(blank=True, max_length=100, verbose_name='other'),
),
migrations.AddField(
model_name='mineraltype',
name='resource_mindat',
field=models.CharField(blank=True, max_length=100, verbose_name='MinDat ID'),
),
migrations.AddField(
model_name='mineraltype',
name='resource_mineralienatlas',
field=models.CharField(blank=True, max_length=100, verbose_name='MineralienAtlas ID'),
),
migrations.AddField(
model_name='mineraltype',
name='systematics',
field=models.CharField(
choices=[('EL', 'Elements'), ('SF', 'Sulfides & Sulfosalts'), ('HG', 'Halogenides'),
('OH', 'Oxides and Hydroxides'), ('CN', 'Carbonates and Nitrates'), ('BR', 'Borates'),
('SL', 'Sulfates'), ('PV', 'Phosphates, Arsenates & Vanadates'),
('SG', 'Silicates & Germanates'), ('OC', 'Organic Compounds')], default='EL', max_length=2,
verbose_name='systematics'),
),
migrations.AlterField(
model_name='handpiece',
name='current_location',
field=models.CharField(blank=True, max_length=200, verbose_name='current location'),
),
migrations.AlterField(
model_name='handpiece',
name='finding_place',
field=models.CharField(blank=True, max_length=200, verbose_name='place of discovery'),
),
migrations.AlterField(
model_name='mineraltype',
name='crystal_system',
field=models.CharField(blank=True,
choices=[('TC', 'Triclinic'), ('MC', 'Monoclinic'), ('OR', 'Orthorhombic'),
('TT', 'Tetragonal'), ('TR', 'Trigonal'), ('HG', 'Hexagonal'),
('CB', 'Cubic')], max_length=2, verbose_name='crystal system'),
),
migrations.AlterField(
model_name='mineraltype',
name='minerals',
field=models.CharField(blank=True, max_length=100, verbose_name='minerals'),
),
migrations.AlterField(
model_name='mineraltype',
name='trivial_name',
field=models.CharField(blank=True, max_length=100, verbose_name='trivial name'),
),
migrations.AlterField(
model_name='mineraltype',
name='variety',
field=models.CharField(blank=True, max_length=100, verbose_name='variety'),
),
]
|
GeoMatDigital/django-geomat
|
geomat/stein/migrations/0007_auto_20160615_2017.py
|
Python
|
bsd-3-clause
| 4,515
|
[
"CRYSTAL"
] |
e932c881f635a4b99b97364a9d9a65f4e8bb78e687d4553c6ad11437ea63cee9
|
"""
Module: audit_tie_breaker
Author: Berj K. Chilingirian
Date: 10 August 2016
Description:
The Australian Senate Election (ASE) may require an Australian Election
Official (AEO) to break a tie. There are three cases in which ties must
be broken manually by an AEO:
Case 1: Election Order Tie
If multiple candidates hold the same number of votes and that
number is greater than the quota, then a previous round in which
those candidates held a differing number of votes is used to
determine the election order. If no such round exists, the AEO
determines a permutation of candidate IDs, specifying the order
in which those candidates are elected.
Case 2: Election Tie
If there are two final continuing candidates, with one remaining
vacancy, and both candidates hold the same number of votes, the
AEO decides which candidate is elected.
Case 3: Exclusion Tie
If a candidate must be excluded, then the lowest number of votes
held by any candidate is found. If multiple candidates hold that
number of votes, the same tie breaker system as in (1) is used.
If that fails, the AEO decides what candidate is excluded.
We want our auditing procedures to be as consistent as possible with the
real ASE. However, the Commonwealth Electoral Act of 1981 does not specify
the tie-breaking procedure of an AEO. Thus, we use tie-breaking information
from the real election to resolve ties encountered during our audit.
To do this, we:
(1) Ingest tie-breaking information from the real ASE.
(2) Construct a directed, acyclic graph where a directed edge from A to B
represents situations where A is elected over B (cases 1, 2) or B is
excluded (case 3). Note that more than one edge may be created by a
given tie-breaking event.
(3) Sort the vertices into a linear order using a random topological sort.
(4) Use the linear ordering discovered in Step 3 to break all ties
encountered during the audit. In other words, given candidate IDs A
and B, prefer electing/not-excluding the candidate earlier in the
linear order.
Usage:
.. code-block:: python3
import audit_tie_breaker
atb = AuditTieBreaker(['A', 'B', 'C', 'D', 'E', 'F', 'G'], verbose=True)
atb.load_election(
[ # Election Order Tie Events
[
[
['A', 'B', 'C'],
['A', 'C', 'B'],
['B', 'A', 'C'],
['B', 'C', 'A'],
['C', 'A', 'B'],
['C', 'B', 'A'],
],
['B', 'A', 'C'],
],
],
[ # Election Tie Events
[
['D', 'G'],
'G',
],
],
[ # Exclusion Tie Events
[
['D', 'E', 'F'],
'E',
],
],
)
# (print out)
# = Building Audit Tie-Breaking Graph...
# - Added edge B -> C because B preferred to C in resolution for election order tie.
# - Added edge B -> A because B preferred to A in resolution for election order tie.
# - Added edge C -> A because C preferred to A in resolution for election order tie.
# - Added edge G -> D because G elected over D.
# - Added edge D -> E because E excluded over D.
# - Added edge F -> E because E excluded over F.
# --> Linear order determined as B, C, A, F, G, D, E.
# = Verifying linear order is consistent with real election's tie-breaking events...
# - Election order tie between ['A', 'B', 'C'] broken with permutation ['B', 'C', 'A'].
# - Election tie between ['D', 'G'] broken by electing G.
# - Exclusion tie between ['D', 'E', 'F'] broken by excluding E.
# --> Linear order is consistent with real election's tie-breaking events.
atb.break_tie(['A', 'B', 'C'], 1)
# - Election order tie between ['A', 'B', 'C'] broken with permutation ['B', 'C', 'A'].
"""
import itertools
import random
import sys
class AuditTieBreaker(object):
""" Implements a class for breaking ties encountered during an audit.
:ivar dict _vertices: A mapping from a vertex in the audit tie breaking graph to its neighbors.
:ivar function _print_fn: A function which takes a string as input and writes it to the appropriate file.
:ivar dict _linear_order: A linear ordering of the candidate IDs of all candidates in the contest being audited. The
linear ordering is represented as a mapping from a candidate ID to its position in the linear order.
"""
WRITE_OPT = 'w'
COMMA_DELIM = ', '
# Tie-Breaking Event IDs.
ELECTION_ORDER_TIE_ID = 1
ELECTION_TIE_ID = 2
EXCLUSION_TIE_ID = 3
# Messages to be printed during verbose mode.
BUILDING_GRAPH_MSG = '= Building Audit Tie-Breaking Graph...'
VERIFY_LINEAR_ORDER_MSG = '\n= Verifying linear order is consistent with real election\'s tie-breaking events...'
IS_CONSISTENT_MSG = ' --> Linear order is consistent with real election\'s tie-breaking events.\n'
# Messages to be printed during verbose mode - require formatting.
ADDED_ELECTION_ORDER_TIE_EDGE \
= ' - Added edge {0} -> {1} because {0} preferred to {1} in resolution for election order tie.'
ADDED_ELECTION_TIE_EDGE = ' - Added edge {0} -> {1} because {0} elected over {1}.'
ADDED_EXCLUSION_TIE_EDGE = ' - Added edge {0} -> {1} because {1} excluded over {0}.'
LINEAR_ORDER_MSG = ' --> Linear order determined as {0}.'
ELECTION_ORDER_TIE_BREAK = ' - Election order tie between {0} broken with permutation {1}.'
ELECTION_TIE_BREAK = ' - Election tie between {0} broken by electing {1}.'
EXCLUSION_TIE_BREAK = ' - Exclusion tie between {0} broken by excluding {1}.'
def __init__(self, candidate_ids, seed=1, verbose=False, out_f=None):
""" Initializes the `AuditTieBreaker` object.
:param list candidate_ids: A list of the candidate IDs of all candidates in the election contest.
:param bool verbose: A flag indicating whether or not the `AuditTieBreaker` object should be verbose when
loading data/breaking ties.
:param int seed: An integer specifying the random seed to use when determining the random topological sort of
the candidate IDs of all candidates in the election (default: 1).
:param str out_f: A string representing the name of a file to write all debug information to (default: stdout).
Only used when `verbose` is true.
"""
random.seed(seed)
self._vertices = {candidate_id: [] for candidate_id in candidate_ids}
self._print_fn = AuditTieBreaker._setup_print_fn(out_f) if verbose else lambda x: None
self._linear_order = {}
@staticmethod
def _setup_print_fn(out_f):
""" Returns a function to be used for writing verbose information.
:param str out_f: A string representing the name of a file to write all debug information to.
:return: A function which takes a string as input and writes it to the appropriate file.
:rtype: function
"""
def print_wrapper(x):
print(x)
if out_f is not None:
sys.stdout = open(out_f, AuditTieBreaker.WRITE_OPT)
return print_wrapper
def _visit(self, v, linear_order):
""" Explores the neighbors of the given node recursively and then adds the explored node to the head of the
linear order.
:param int v: A candidate ID.
:param list linear_order: The linear order of candidate IDs so far.
"""
if v in linear_order:
# Do not explore a node twice.
return
random.shuffle(self._vertices[v])
for u in self._vertices[v]:
self._visit(u, linear_order)
linear_order.insert(0, v)
def load_events(self, election_order_ties, election_ties, exclusion_ties):
""" Loads all tie-breaking events specified in `events_f`.
:param list election_order_ties: A list of 2-tuples representing election order tie events where the first entry
is the permutations of election orders and the second entry is the permutation that resolved the tie.
:param list election_ties: A list of 2-tuples representing election tie events where the first entry is the IDs
of the candidates tied for election and the second entry is the candidate ID that resolved the tie.
:param list exclusion_ties: A list of 2-tuples representing exclusion tie events where the first entry is the
IDs of the candidates tied for exclusion and the second entry is the candidate ID that resolved the tie.
"""
# Construct audit tie-breaking graph.
self._print_fn(AuditTieBreaker.BUILDING_GRAPH_MSG)
# Process Election Order Tie Events.
for _, resolution in election_order_ties:
for src_cid, dest_cid in itertools.combinations(resolution, 2):
self._vertices[src_cid].append(dest_cid)
self._print_fn(AuditTieBreaker.ADDED_ELECTION_ORDER_TIE_EDGE.format(src_cid, dest_cid))
# Process Election Tie Events.
for candidate_ids, resolution in election_ties:
for cid in candidate_ids:
if cid != resolution:
self._vertices[resolution].append(cid)
self._print_fn(AuditTieBreaker.ADDED_ELECTION_TIE_EDGE.format(resolution, cid))
# Process Exclusion Tie Events.
for candidate_ids, resolution in exclusion_ties:
for cid in candidate_ids:
if cid != resolution:
self._vertices[cid].append(resolution)
self._print_fn(AuditTieBreaker.ADDED_EXCLUSION_TIE_EDGE.format(cid, resolution))
# Determine a random topological sorting of the vertices in the audit tie-breaking graph.
vertices = sorted(self._vertices.keys())
random.shuffle(vertices)
linear_order = []
for v in vertices:
self._visit(v, linear_order)
self._linear_order = {linear_order[i]: i for i in range(len(linear_order))}
self._print_fn(AuditTieBreaker.LINEAR_ORDER_MSG.format(
AuditTieBreaker.COMMA_DELIM.join([str(x) for x in linear_order]))
)
# Verify linear order is consistent with the real election's tie-breaking events.
self._print_fn(AuditTieBreaker.VERIFY_LINEAR_ORDER_MSG)
for permutations, resolution in election_order_ties:
assert self.break_election_order_tie(permutations) == permutations.index(resolution)
for candidate_ids, resolution in election_ties:
assert self.break_election_tie(candidate_ids) == candidate_ids.index(resolution)
for candidate_ids, resolution in exclusion_ties:
assert self.break_exclusion_tie(candidate_ids) == candidate_ids.index(resolution)
self._print_fn(AuditTieBreaker.IS_CONSISTENT_MSG)
def break_tie(self, candidate_ids, case_num):
""" Returns the resolution for the given candidate IDs and case number.
:param list candidate_ids: A list of candidate IDs.
:param int case_num: A integer identifying the tie-breaking case.
:return: The resolution for the given candidate IDs and case number.
:rtype: A single candidate ID (cases 2,3) or a permutation of the given candidate IDs (case 1).
"""
cids_to_order = {cid: self._linear_order[cid] for cid in candidate_ids}
resolution = sorted(cids_to_order, key=cids_to_order.__getitem__)
if case_num == AuditTieBreaker.ELECTION_ORDER_TIE_ID:
result = resolution
self._print_fn(AuditTieBreaker.ELECTION_ORDER_TIE_BREAK.format(candidate_ids, result))
elif case_num == AuditTieBreaker.ELECTION_TIE_ID:
result = resolution[0]
self._print_fn(AuditTieBreaker.ELECTION_TIE_BREAK.format(candidate_ids, result))
else:
result = resolution[-1]
self._print_fn(AuditTieBreaker.EXCLUSION_TIE_BREAK.format(candidate_ids, result))
return result
def break_election_order_tie(self, permutations):
""" Convenience wrapper for breaking election order ties. """
return permutations.index(tuple(self.break_tie(permutations[0], AuditTieBreaker.ELECTION_ORDER_TIE_ID)))
def break_election_tie(self, candidate_ids):
""" Convenience wrapper for breaking election ties. """
return candidate_ids.index(self.break_tie(candidate_ids, AuditTieBreaker.ELECTION_TIE_ID))
def break_exclusion_tie(self, candidate_ids):
""" Convenience wrapper for breaking exclusion ties. """
return candidate_ids.index(self.break_tie(candidate_ids, AuditTieBreaker.EXCLUSION_TIE_ID))
def test_audit_tie_breaker():
""" Tests the `AuditTieBreaker` implementation. """
# Test `AuditTieBreaker` implementation.
audit_tb = AuditTieBreaker(['A', 'B', 'C', 'D', 'E', 'F', 'G'], verbose=True)
audit_tb.load_events(
[ # Election Order Tie Events
[
[
['A', 'B', 'C'],
['A', 'C', 'B'],
['B', 'A', 'C'],
['B', 'C', 'A'],
['C', 'A', 'B'],
['C', 'B', 'A'],
],
['B', 'A', 'C'],
],
],
[ # Election Tie Events
[
['D', 'G'],
'G',
],
],
[ # Exclusion Tie Events
[
['D', 'E', 'F'],
'E',
],
],
)
audit_tb._print_fn('= Running AuditTieBreaker tests...')
assert audit_tb.break_tie(['A', 'B', 'C'], 1) == ['B', 'A', 'C']
assert audit_tb.break_tie(['D', 'E', 'F'], 3) == 'E'
assert audit_tb.break_tie(['D', 'G'], 2) == 'G'
assert audit_tb.break_tie(['B', 'F'], 2) == 'B' # Test depends on random.seed of 1.
assert audit_tb.break_tie(['B', 'F'], 3) == 'F' # Test depends on random.seed of 1.
audit_tb._print_fn(' --> Tests PASSED!')
if __name__ == '__main__':
# Runs AuditTieBreaker Tests.
test_audit_tie_breaker()
|
berjc/aus-senate-audit
|
aus_senate_audit/audit_tie_breaker.py
|
Python
|
apache-2.0
| 14,693
|
[
"ASE"
] |
eae5eeb2f8fe6eae498c5e0d7b9c8008154b19494eeb4ce61a2e96718cca977a
|
# Tests for basic Tkinter widgets.
import Tkinter
import Test
Test.initialise()
testData = ()
if Tkinter.TkVersion >= 8.0:
button_num = 31
frame_num = 16
menu_num = 20
menubutton_num = 32
else:
button_num = 30
frame_num = 15
menu_num = 19
menubutton_num = 31
c = Tkinter.Button
tests = (
(c.pack, ()),
(Test.num_options, (), button_num),
('text', 'Hello World'),
('background', 'lightsteelblue1'),
('foreground', 'seagreen4'),
('command', Test.callback),
(c.flash, ()),
(c.invoke, (), '1'),
)
testData = testData + ((c, ((tests, {}),)),)
c = Tkinter.Canvas
tests = (
(c.pack, ()),
(Test.num_options, (), 27),
('background', 'aliceblue'),
(c.create_oval, (100, 100, 200, 200),
{'fill' : 'lightsteelblue1', 'tags' : 'circle'}, 1),
(c.create_rectangle, (200, 100, 300, 200),
{'fill' : 'lightsteelblue2', 'tags' : 'square'}, 2),
(c.create_text, (0, 200),
{'text' : 'Hello, world', 'tags' : 'words', 'anchor' : 'w'}, 3),
(c.addtag_withtag, ('lightsteelblue1', 'circle')),
(c.bbox, ('circle', 'square'), (99, 99, 301, 201)),
(c.tag_bind, ('circle', '<1>', Test.callback)),
(c.tag_bind, 'circle', '<Button-1>'),
(c.tag_unbind, ('circle', '<1>')),
(c.canvasx, 100, 100.0),
(c.canvasy, 100, 100.0),
(c.coords, 'circle', [100.0, 100.0, 200.0, 200.0]),
(c.coords, ('circle', 0, 0, 300, 300), []),
(c.coords, 'circle', [0.0, 0.0, 300.0, 300.0]),
(c.find_withtag, 'lightsteelblue1', (1,)),
(c.focus, 'circle', ''),
(c.gettags, 'circle', ('circle', 'lightsteelblue1')),
(c.icursor, ('words', 7)),
(c.index, ('words', 'insert'), 7),
(c.insert, ('words', 'insert', 'cruel ')),
(c.itemconfigure, 'circle', {'fill': 'seagreen4'}),
(c.itemcget, ('circle', 'fill'), 'seagreen4'),
(c.lower, 'words'),
(c.move, ('square', -50, -50)),
(c.tkraise, ('words', 'circle')),
(c.scale, ('circle', 150, 150, 1.0, 0.5)),
(c.select_from, ('words', 0)),
(c.select_to, ('words', 'end')),
(c.delete, 'square'),
(c.type, 'circle', 'oval'),
(c.dtag, 'lightsteelblue1'),
)
testData = testData + ((c, ((tests, {}),)),)
c = Tkinter.Checkbutton
tests = (
(c.pack, ()),
(Test.num_options, (), 36),
('text', 'Hello World'),
('background', 'lightsteelblue1'),
('foreground', 'seagreen4'),
('command', Test.callback),
(c.flash, ()),
(c.invoke, (), '1'),
)
testData = testData + ((c, ((tests, {}),)),)
c = Tkinter.Entry
tests = (
(c.pack, ()),
(Test.num_options, (), 28),
('background', 'lightsteelblue1'),
(c.insert, ('insert', 'Hello, Brian!')),
(c.delete, (7, 12)),
(c.icursor, 7),
(c.insert, ('insert', 'world')),
(c.get, (), 'Hello, world!'),
(c.index, 'insert', 12),
(c.selection_from, 7),
(c.selection_to, '12'),
)
testData = testData + ((c, ((tests, {}),)),)
c = Tkinter.Frame
tests = (
(c.pack, ()),
(Test.num_options, (), frame_num),
('background', 'lightsteelblue1'),
('width', 300),
('height', 50),
('background', 'lightsteelblue1'),
)
testData = testData + ((c, ((tests, {}),)),)
c = Tkinter.Label
tests = (
(c.pack, ()),
(Test.num_options, (), 25),
('text', 'Hello World'),
('background', 'lightsteelblue1'),
('foreground', 'seagreen4'),
('image', Test.earthris),
)
testData = testData + ((c, ((tests, {}),)),)
c = Tkinter.Listbox
tests = (
(c.pack, ()),
(Test.num_options, (), 23),
('background', 'lightsteelblue1'),
('foreground', 'seagreen4'),
(c.insert, (0, 'ABC', 'DEF', 'GHI', 'XXXXXXXXXXXX')),
(c.activate, 1),
(c.select_set, (2, 3)),
(c.curselection, (), ('2', '3')),
(c.delete, 1),
(c.get, 1, 'GHI'),
(c.get, (0, 1), ('ABC', 'GHI')),
(c.index, 'end', 3),
(c.nearest, 1, 0),
(c.see, 1),
(c.size, (), 3),
)
testData = testData + ((c, ((tests, {}),)),)
c = Tkinter.Menu
tests = (
(Test.num_options, (), menu_num),
('background', 'lightsteelblue1'),
('foreground', 'seagreen4'),
(c.add_command, (),
{'background': 'lightsteelblue2', 'label': 'Hello World'}),
(c.add_checkbutton, (),
{'background': 'lightsteelblue2', 'label': 'Charm'}),
(c.post, (100, 100)),
(c.activate, 1),
(c.entryconfigure, 'Hello World', {'background': 'aliceblue'}),
(c.entrycget, ('Hello World', 'background'), 'aliceblue'),
(c.index, 'end', 2),
('tearoff', 0),
(c.index, 'end', 1),
(c.insert_radiobutton, 'Charm',
{'background': 'lightsteelblue2', 'label': 'Niceness',
'command': Test.callback}),
(c.invoke, 'Niceness', '1'),
(c.delete, 'Charm'),
(c.type, 'Hello World', 'command'),
(c.yposition, 'Hello World', 2),
(c.unpost, ()),
)
testData = testData + ((c, ((tests, {}),)),)
c = Tkinter.Menubutton
tests = (
(c.pack, ()),
(Test.num_options, (), menubutton_num),
('text', 'Hello World'),
('background', 'lightsteelblue1'),
('foreground', 'seagreen4'),
)
testData = testData + ((c, ((tests, {}),)),)
c = Tkinter.Message
tests = (
(c.pack, ()),
(Test.num_options, (), 21),
('text', 'Hello World'),
('background', 'lightsteelblue1'),
('foreground', 'seagreen4'),
('text', 'Hello\nCruel Cruel World'),
('borderwidth', 100),
('justify', 'center'),
('justify', 'right'),
('justify', 'left'),
)
testData = testData + ((c, ((tests, {}),)),)
c = Tkinter.Radiobutton
tests = (
(c.pack, ()),
(Test.num_options, (), 35),
('text', 'Hello World'),
('value', 'Foo Bar'),
('variable', Test.stringvar),
('background', 'lightsteelblue1'),
('foreground', 'seagreen4'),
('text', 'Hello\nCruel Cruel World'),
('command', Test.callback),
(c.select, ()),
(Test.stringvar.get, (), 'Foo Bar'),
(c.flash, ()),
(c.invoke, (), '1'),
(c.deselect, ()),
(Test.stringvar.get, (), ''),
)
testData = testData + ((c, ((tests, {}),)),)
c = Tkinter.Scale
tests = (
(c.pack, ()),
(Test.num_options, (), 33),
('showvalue', 1),
('orient', 'horizontal'),
('from', 100.0),
('to', 200.0),
('variable', Test.floatvar),
('background', 'lightsteelblue1'),
('foreground', 'seagreen4'),
('command', Test.callback1),
(c.set, 150.0),
(c.get, (), 150.0),
(c.get, 123, 'TypeError: too many arguments; expected 1, got 2'),
)
testData = testData + ((c, ((tests, {}),)),)
c = Tkinter.Scrollbar
tests = (
(c.pack, (), {'fill': 'x'}),
(Test.num_options, (), 20),
('orient', 'horizontal'),
(Test.set_geom, (300, 50)),
(c.set, (0.3, 0.7)),
('background', 'lightsteelblue1'),
('troughcolor', 'aliceblue'),
(c.get, (), (0.3, 0.7)),
(c.activate, 'slider'),
(c.set, (0.5, 0.9)),
(c.delta, (0, 0), 0),
(c.fraction, (0, 0), 0),
)
testData = testData + ((c, ((tests, {}),)),)
c = Tkinter.Text
tests = (
(c.pack, ()),
(Test.num_options, (), 35),
('background', 'lightsteelblue1'),
(c.insert, ('end', 'This little piggy is bold.', 'bold', '\n')),
(c.insert, ('end', 'This little piggy is in green.', 'green', '\n')),
(c.insert, ('end', 'This line is a mistake.\n')),
(c.insert, ('end', 'This little piggy is crossed out.', 'overstrike', '\n')),
(c.insert, ('end', 'This little piggy is raised.', 'raised', '\n')),
(c.insert, ('end', 'This little piggy is underlined.', 'underline', '\n')),
(c.tag_configure, 'bold', {'font': Test.font['variable']}),
(c.tag_configure, 'green', {'background': 'seagreen1'}),
(c.tag_configure, 'overstrike', {'overstrike': 1}),
(c.tag_configure, 'raised',
{'background': 'aliceblue', 'borderwidth': 2, 'relief': 'raised'}),
(c.tag_configure, 'underline', {'underline': 1}),
(c.compare, ('2.0', '<', 'end'), 1),
(c.delete, ('3.0', '4.0')),
(c.get, ('1.0', '1.4'), 'This'),
(c.index, 'end', '7.0'),
(c.mark_set, ('my_mark', '4.9')),
(c.mark_gravity, ('my_mark', 'right'), ''),
(c.mark_gravity, 'my_mark', 'right'),
(c.mark_names, (), ('my_mark', 'insert', 'current')),
(c.mark_unset, 'my_mark'),
(c.insert, ('end', '\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n')),
(c.insert, ('end', 'This is the last line.')),
(c.scan_mark, (0, 20)),
(c.scan_dragto, (0, 0)),
(c.scan_dragto, (0, 20)),
(c.tag_add, ('green', '1.0', '1.4')),
(c.tag_cget, ('raised', 'background'), 'aliceblue'),
(c.tag_lower, 'green'),
(c.tag_names, (),
('green', 'sel', 'bold', 'overstrike', 'raised', 'underline')),
(c.tag_nextrange, ('raised', '0.0'), ('4.0', '4.28')),
(c.tag_raise, 'green'),
(c.tag_ranges, 'green', ('1.0', '1.4', '2.0', '2.30')),
(c.tag_remove, ('green', '1.0', '1.4')),
(c.tag_ranges, 'green', ('2.0', '2.30')),
(c.tag_delete, 'green'),
(c.search, ('Gre.n', '0.0'), {'regexp': 1, 'nocase': 1}, '2.24'),
(c.search, ('Gre.n', '3.0', 'end'), {'regexp': 1, 'nocase': 1}, ''),
(c.see, 'end'),
(c.see, '0.0'),
)
testData = testData + ((c, ((tests, {}),)),)
#=============================================================================
# Grid command
def _makeGridButtons():
w = Test.currentWidget()
b1 = Tkinter.Button(w, text = 'Button 1')
b2 = Tkinter.Button(w, text = 'Button 2')
b3 = Tkinter.Button(w, text = 'Button 3')
b4 = Tkinter.Button(w, text = 'Button 4')
b5 = Tkinter.Button(w, text = 'Button 5')
b6 = Tkinter.Button(w, text = 'Button 6')
b7 = Tkinter.Button(w, text = 'Button 7')
b8 = Tkinter.Button(w, text = 'Button 8')
b1.grid(column=0, row=0)
b2.grid(column=1, row=0)
b3.grid(column=2, row=0, ipadx=50, ipady=50, padx=50, pady=50, sticky='nsew')
b4.grid(column=3, row=0)
b5.grid(column=0, row=1)
b6.grid(column=2, row=1, columnspan=2, rowspan=2, sticky='nsew')
b7.grid(column=0, row=2)
b8.grid(column=0, row=3, columnspan=4, padx=50, sticky='ew')
def _checkGridSlaves():
w = Test.currentWidget()
return len(w.grid_slaves())
def _checkGridInfo():
w = Test.currentWidget()
b8 = w.grid_slaves(column=0, row=3)[0]
info = b8.grid_info()
if info['in'] == w:
rtn = {}
for key, value in info.items():
if key != 'in':
rtn[key] = value
return rtn
return 'BAD'
def _checkGridForget():
w = Test.currentWidget()
b8 = w.grid_slaves(column=0, row=3)[0]
b8.grid_forget()
return w.grid_size()
# The -pad grid option was added in Tk 4.2.
# Could not do columnconfigure(0) before Tk 4.2.
if Tkinter.TkVersion >= 4.2:
padTest = {'pad': 25}
colTest = {'minsize': 100, 'pad': 25, 'weight': 1}
rowTest = {'minsize': 100, 'pad': 0, 'weight': 1}
else:
padTest = {'minsize': 100}
colTest = 'TclError: wrong # args: should be "grid columnconfigure master index ?-option value...?"'
rowTest = 'TclError: wrong # args: should be "grid rowconfigure master index ?-option value...?"'
c = Tkinter.Frame
tests = (
(c.pack, (), {'fill': 'both', 'expand': 1}),
(_makeGridButtons, ()),
# (c.grid_bbox, (1, 2), (85, 268, 85, 34)),
(c.grid_columnconfigure, (0, 'minsize'), 0),
(c.grid_columnconfigure, (0, 'weight'), 0),
(c.grid_columnconfigure, 0, {'minsize': 100, 'weight': 1}),
(c.grid_columnconfigure, 0, padTest),
(c.grid_columnconfigure, 0, {}, colTest),
(c.grid_columnconfigure, (0, 'minsize'), 100),
(c.grid_columnconfigure, (0, 'weight'), 1),
(c.location, (200, 100), (2, 0)),
(c.grid_propagate, (), 1),
(c.grid_propagate, 0),
(c.grid_propagate, (), 0),
(c.grid_rowconfigure, (0, 'minsize'), 0),
(c.grid_rowconfigure, (0, 'weight'), 0),
(c.grid_rowconfigure, 0, {'minsize': 100, 'weight': 1}),
(c.grid_rowconfigure, 0, {}, rowTest),
(c.grid_size, (), (4, 4)),
(_checkGridSlaves, (), 8),
(_checkGridInfo, (), {}, {'column': '0', 'columnspan': '4',
'ipadx': '0', 'ipady': '0', 'padx': '50', 'pady': '0',
'row': '3', 'rowspan': '1', 'sticky': 'ew',
}),
(_checkGridForget, (), (4, 3)),
(_checkGridSlaves, (), 7),
)
testData = testData + ((c, ((tests, {}),)),)
if __name__ == '__main__':
#Test.setverbose(1)
#Test.setdelay(1000)
Test.runTests(testData)
|
CasataliaLabs/biscuit_drishtiman
|
Pmw-2.0.0/build/lib.linux-x86_64-2.7/Pmw/Pmw_1_3_3/tests/Tkinter_test.py
|
Python
|
gpl-3.0
| 12,105
|
[
"Brian"
] |
4cd0991c892a3cc25df6c9462cd86deb6884742cca3c9fd24ee14bc6b80752ab
|
""" Unit test for ConsistencyInspector
"""
import unittest
import datetime
from mock import MagicMock
from DIRAC import gLogger
from DIRAC.Resources.Catalog.test.mock_FC import fc_mock
# sut
from DIRAC.DataManagementSystem.Client.ConsistencyInspector import ConsistencyInspector
class UtilitiesTestCase( unittest.TestCase ):
def setUp( self ):
gLogger.setLevel('DEBUG')
self.lfnDict = {'aa.raw': { 'aa.raw':{'FileType': 'RAW', 'RunNumber': 97019},
'/lhcb/1_2_1.Semileptonic.dst': {'FileType': 'SEMILEPTONIC.DST'}},
'cc.raw': { 'cc.raw':{'FileType': 'RAW', 'RunNumber': 97019},
'/lhcb/1_1.semileptonic.dst': {'FileType': 'SEMILEPTONIC.DST'}}
}
dmMock = MagicMock()
dicMock = MagicMock()
self.ci = ConsistencyInspector( transClient = MagicMock(), dm = dmMock, fc = fc_mock, dic = dicMock )
self.ci.fileType = ['SEMILEPTONIC.DST', 'LOG', 'RAW']
self.ci.fileTypesExcluded = ['LOG']
self.ci.prod = 0
self.maxDiff = None
class ConsistencyInspectorSuccess( UtilitiesTestCase ):
def test_getReplicasPresence(self):
res = self.ci.getReplicasPresence(['/this/is/dir1/file1.txt', '/this/is/dir1/file2.foo.bar'])
self.assertEqual( res, (['/this/is/dir1/file1.txt', '/this/is/dir1/file2.foo.bar'], []) )
def test__selectByFileType( self ):
lfnDict = {'aa.raw': {'bb.raw':{'FileType': 'RAW', 'RunNumber': 97019},
'bb.log':{'FileType': 'LOG'},
'/bb/pippo/aa.dst':{'FileType': 'DST'},
'/lhcb/1_2_1.Semileptonic.dst':{'FileType': 'SEMILEPTONIC.DST'}},
'cc.raw': {'dd.raw':{'FileType': 'RAW', 'RunNumber': 97019},
'bb.log':{'FileType': 'LOG'},
'/bb/pippo/aa.dst':{'FileType': 'LOG'},
'/lhcb/1_1.semileptonic.dst':{'FileType': 'SEMILEPTONIC.DST'}}
}
res = self.ci._selectByFileType( lfnDict )
lfnDictExpected = {'aa.raw': {'/lhcb/1_2_1.Semileptonic.dst': {'FileType': 'SEMILEPTONIC.DST'},
'bb.raw': {'RunNumber': 97019, 'FileType': 'RAW'}
},
'cc.raw': {'dd.raw': {'RunNumber': 97019, 'FileType': 'RAW'},
'/lhcb/1_1.semileptonic.dst': {'FileType': 'SEMILEPTONIC.DST'}
}
}
self.assertEqual( res, lfnDictExpected )
lfnDict = {'aa.raw': {'/bb/pippo/aa.dst':{'FileType': 'LOG'},
'bb.log':{'FileType': 'LOG'}
}
}
res = self.ci._selectByFileType( lfnDict )
lfnDictExpected = {}
self.assertEqual( res, lfnDictExpected )
def test__getFileTypesCount( self ):
lfnDict = {'aa.raw': {'bb.log':{'FileType': 'LOG'},
'/bb/pippo/aa.dst':{'FileType': 'DST'}}}
res = self.ci._getFileTypesCount( lfnDict )
resExpected = {'aa.raw': {'DST':1, 'LOG':1}}
self.assertEqual( res, resExpected )
lfnDict = {'aa.raw': {'bb.log':{'FileType': 'LOG'},
'/bb/pippo/aa.dst':{'FileType': 'DST'},
'/bb/pippo/cc.dst':{'FileType': 'DST'}}}
res = self.ci._getFileTypesCount( lfnDict )
resExpected = {'aa.raw': {'DST':2, 'LOG':1}}
self.assertEqual( res, resExpected )
# def test__catalogDirectoryToSE(self):
# lfnDir = ['/this/is/dir1/', '/this/is/dir2/']
#
# res = self.ci.catalogDirectoryToSE(lfnDir)
# self.assertTrue(res['OK'])
def test__getCatalogDirectoryContents(self):
lfnDirs = ['/this/is/dir1/', '/this/is/dir2/']
res = self.ci._getCatalogDirectoryContents(lfnDirs)
self.assertTrue(res['OK'])
resExpected = {'Metadata': {'/this/is/dir1/file1.txt': { 'MetaData': { 'Checksum': '7149ed85',
'ChecksumType': 'Adler32',
'CreationDate': datetime.datetime(2014, 12, 4, 12, 16, 56),
'FileID': 156301805L,
'GID': 2695L,
'GUID': '6A5C6C86-AD7B-E411-9EDB-AC162DA8C2B0',
'Mode': 436,
'ModificationDate': datetime.datetime(2014, 12, 4, 12, 16, 56),
'Owner': 'phicharp',
'OwnerGroup': 'lhcb_prod',
'Size': 206380531L,
'Status': 'AprioriGood',
'Type': 'File',
'UID': 19503L}},
'/this/is/dir1/file2.foo.bar': {'MetaData': {'Checksum': '7149ed86',
'ChecksumType': 'Adler32',
'CreationDate': datetime.datetime(2014, 12, 4, 12, 16, 56),
'FileID': 156301805L,
'GID': 2695L,
'GUID': '6A5C6C86-AD7B-E411-9EDB-AC162DA8C2B1',
'Mode': 436,
'ModificationDate': datetime.datetime(2014, 12, 4, 12, 16, 56),
'Owner': 'phicharp',
'OwnerGroup': 'lhcb_prod',
'Size': 206380532L,
'Status': 'AprioriGood',
'Type': 'File',
'UID': 19503L}},
'/this/is/dir2/subdir1/file3.pippo': {'MetaData': {'Checksum': '7149ed86',
'ChecksumType': 'Adler32',
'CreationDate': datetime.datetime(2014, 12, 4, 12, 16, 56),
'FileID': 156301805L,
'GID': 2695L,
'GUID': '6A5C6C86-AD7B-E411-9EDB-AC162DA8C2B1',
'Mode': 436,
'ModificationDate': datetime.datetime(2014, 12, 4, 12, 16, 56),
'Owner': 'phicharp',
'OwnerGroup': 'lhcb_prod',
'Size': 206380532L,
'Status': 'AprioriGood',
'Type': 'File',
'UID': 19503L}}},
'Replicas': {'/this/is/dir1/file1.txt': {'SE1': 'smr://srm.SE1.ch:8443/srm/v2/server?SFN=/this/is/dir1/file1.txt',
'SE2': 'smr://srm.SE2.fr:8443/srm/v2/server?SFN=/this/is/dir1/file1.txt'},
'/this/is/dir1/file2.foo.bar': {'SE1': 'smr://srm.SE1.ch:8443/srm/v2/server?SFN=/this/is/dir1/file2.foo.bar',
'SE3': 'smr://srm.SE3.es:8443/srm/v2/server?SFN=/this/is/dir1/file2.foo.bar'}}}
self.assertEqual(res['Value'], resExpected)
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( UtilitiesTestCase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( ConsistencyInspectorSuccess ) )
testResult = unittest.TextTestRunner( verbosity = 3 ).run( suite )
|
Andrew-McNab-UK/DIRAC
|
DataManagementSystem/Client/test/Test_Client_DataManagementSystem.py
|
Python
|
gpl-3.0
| 9,190
|
[
"DIRAC"
] |
765d71200a05bb98f5776f7efd42efb1f74195005e1bf70d268a81133b613717
|
#!/usr/bin/env python
"""
login to my wifi
"""
from splinter.browser import Browser
from time import sleep
URL = 'https://controller...'
def main():
br = Browser('chrome')
br.visit(URL)
sleep(3)
if br.is_text_present('Connection', wait_time=7):
br.fill('login', '...')
br.fill('password', '...')
br.find_by_css('#logonForm_connect_button').first.click()
#############################################################################
if __name__ == "__main__":
main()
|
jabbalaci/jabbapylib
|
demos/browser_automation/splinter_wifi.py
|
Python
|
gpl-3.0
| 523
|
[
"VisIt"
] |
9d54d59e084d17cd25655b1064b706933b3f3896a6f9e57f212327006351b7b9
|
#! /usr/bin/env python
# -*- coding:utf-8
# Convolutional Neural Network (CNN) for recognizing handwritten digits, using tflearn
# Special thanks: Harisson@pythonprogramming.net
'''
File name: cnn_simple.py
Author: chimney37
Date created: 12/09/2017
Python Version: 3.62
'''
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import tflearn.datasets.mnist as mnist
import argparse
import numpy as np
import os.path
'''
We will use the MNIST data, using 60000 training samples and 10000 testing samples of
handwritten and labeled digits, 0 through 9, i.e. 10 total "classes". In actual deep
learning requires half a billion samples for accuracy. It's small enough to work on
any computers. MNIST dataset of images 28x28:784 pixels. Either pixel is "blank" i.e.
0 or there is something there : 0. We will predict the number we're looking at
(0,1,2,...8 or 9).
In a traditional neural network, input data will send to hidden layer 1, that is weighted. It will undergo an
activation function, so neuron can decided to fire and output data to either output
layer, or another hidden layer.
In a Convolutional Neural Network (CNN), the processes go by: Convolution =>
Pooling => Convolution => Pooling => Fully connected layer => Output. Each term
between the arrows ia a layer. Convolution is the act of taking original data, and creating feature maps from
it. Pooling is down-sampling, most often in the form of "max-pooling", where we
select a region, and then take the maximum value in that region, and that
becomes the new value for the region. Each convolution and pooling step is a
hidden layer. Fully connected layer are typically neural
networks (multilayer perceptron), where all nodes are "fully connected". Convolutional layers are not
fully connected like a traditional neural network.
Reference: https://pythonprogramming.net/convolutional-neural-network-cnn-machine-learning-tutorial/?completed=/rnn-tensorflow-python-machine-learning-tutorial/
Are convolution networks related to convolution of 2 functions in mathematics.
For people who come from signal processing, image processing, computer vision background,
convolution may be very familiar. In fact, CNN utilitizes the convolution
operation. The only difference is that in convolutional (filtering and encoding
by transformation) neural network (CNN), each network layer acts as a
detection filter for the presence of specfic features or patterns present in
the data. First layers in CNN detect large features that can be recognized
easily, then later layers detect increasingly larger features thare more
abstract and present in many of the larger feature present by earlier layers.
In a way, the CNN detects a specific feature present in input data of the
layer, by convoluting the filter with the input data. (which is why
mathematical convolution and CNN is similar)
Reference: https://www.quora.com/Are-convolutional-neural-networks-related-to-the-convolution-of-two-functions-in-mathematics
for more understanding on convolutional_neural_network and the act of
convolution, refer to:
https://www.analyticsvidhya.com/blog/2017/06/architecture-of-convolutional-neural-networks-simplified-demystified/
Another concept introduced is "dropout". Idea is to mimic dead neuraons in the brain. Actual impact
is that it seems to decrease the chance of over-weighted or otherwise biasing neurons in the
artificial neural network.
We will use a cost function (loss function), to determine how wrong we are. We
will use an optimizer function: Adam optimizer, to minimize the cost. Cost is
minimized by tinkering with weights. How quickly we want to lower the cost is determined by
learning rate. The lower the value for learning rate, the slower we will learn, and
more likely we'd get better results.
One training cycle over the entire piece of data is called an Epoch. We can pick any number for number of epochs. After each epoch, we've
hopefully further fine-tuned our weights lowering our cost and improving accuracy.
This implementation is based on TFLearn, which is a higher level library, and less prone to make mistakes.
'''
model_file = 'quicktest.model'
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Train a neural network of predicting digits from images')
parser.add_argument('-e', '--epochs', default=10, help='an integer specifying\
number of epochs for training.',
type=int)
parser.add_argument('-t', '--train', action='store_true', help='training mode')
parser.add_argument('-p', '--predict', action='store_true', help='prediction mode')
args = parser.parse_args()
# one_hot means one eleent out of others is literally "hot" or on. This is useful for a
# multi-class classification, from 0,1,...to 9. So we want the output to be like
#
# 0 = [1,0,0,0,0,0,0,0,0]
# 1 = [0,1,0,0,0,0,0,0,0]
# ...
# 9 = [0,0,0,0,0,0,0,0,1]
X, Y, test_x, test_y = mnist.load_data(one_hot=True)
# input: 784 is pixels. The matrix is 1 x 2 because we flatten the image: 28x28 to a
# 784 values. This is also known as the shape. If an input data is out of place that doen't
X = X.reshape([-1, 28, 28, 1])
test_x = test_x.reshape([-1, 28, 28, 1])
# input layer
# fit the shape, this specification will ignore the data, without throwing an error.
convnet = input_data(shape=[None, 28, 28, 1], name='input')
# 2 layers of convolution and pooling
convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
# add a fully connected layer and dropout
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
# output layer
convnet = fully_connected(convnet, 10, activation='softmax')
convnet = regression(convnet, optimizer='adam',
learning_rate=0.01,
loss='categorical_crossentropy',
name='targets')
# create the model
model = tflearn.DNN(convnet)
if args.train is True and args.epochs is not None:
# train the model
model.fit({'input': X},
{'targets': Y},
n_epoch=10,
validation_set=({'input': test_x},
{'targets': test_y}),
snapshot_step=500,
show_metric=True,
run_id='mnist')
# save the model
model.save(model_file)
if os.path.isfile(model_file):
model.load(model_file)
print("loaded model:", model_file)
else:
print('cannot load model. train it first')
if args.predict is not None:
print(np.round(model.predict([test_x[1]])[0]))
print(test_y[1])
|
chimney37/ml-snippets
|
cnn_simple.py
|
Python
|
mit
| 6,827
|
[
"NEURON"
] |
35c84a24cbe67986934c32090e61b4a5398602587d0aa7014f8e63015966f519
|
# -*- coding: utf-8 -*-
#
# librosa documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 25 13:12:33 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If librosa cache is enabled, disable it for now
try:
os.environ.pop('LIBROSA_CACHE_DIR')
except:
pass
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../librosa'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'librosa'
copyright = u'2014, Dawen Liang, Brian McFee, Matt McVicar, Colin Raffel'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = '0.4.0pre'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_bootstrap_theme
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'bootswatch_theme': 'yeti',
'bootstrap_version': '3',
'navbar_title': 'LibROSA',
'source_link_position': None,
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'librosadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'librosa.tex', u'librosa Documentation',
u'Dawen Liang, Brian McFee, Matt McVicar, Colin Raffel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'librosa', u'librosa Documentation',
[u'Dawen Liang, Brian McFee, Matt McVicar, Colin Raffel'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'librosa', u'librosa Documentation',
u'Dawen Liang, Brian McFee, Matt McVicar, Colin Raffel', 'librosa', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
ebattenberg/librosa
|
docs/conf.py
|
Python
|
isc
| 8,244
|
[
"Brian"
] |
4b7741fef757c43792c347d69a57aac37fc2eada28dd2566f7d130c708d13a91
|
from DIRAC.Core.Base import Script
Script.parseCommandLine()
import unittest
import itertools
from DIRAC.DataManagementSystem.DB.FileCatalogDB import FileCatalogDB
from DIRAC.Core.Security.Properties import FC_MANAGEMENT
seName = "mySE"
testUser = 'atsareg'
testGroup = 'dirac_user'
testDir = '/vo.formation.idgrilles.fr/user/a/atsareg/testdir'
parentDir = '/vo.formation.idgrilles.fr/user/a/atsareg'
nonExistingDir = "/I/Dont/exist/dir"
testFile = '/vo.formation.idgrilles.fr/user/a/atsareg/testdir/testfile'
nonExistingFile = "/I/Dont/exist"
credDict = {'DN': '/DC=ch/DC=cern/OU=computers/CN=volhcb12.cern.ch',
'extraCredentials': 'hosts',
'group': 'visitor',
'CN': 'volhcb12.cern.ch',
'x509Chain': "<X509Chain 3 certs [/DC=ch/DC=cern/OU=computers/CN=volhcb12.cern.ch][/DC=ch/DC=cern/CN=CERN Trusted Certification Authority][/DC=ch/DC=cern/CN=CERN Root CA]>",
'username': 'anonymous',
'isLimitedProxy': False,
'properties': [FC_MANAGEMENT],
'isProxy': False}
# TESTS WERE DESIGNED WITH THIS CONFIGURATION
# DATABASE_CONFIG = { 'UserGroupManager' : 'UserAndGroupManagerDB',
# 'SEManager' : 'SEManagerDB',
# 'SecurityManager' : 'NoSecurityManager',
# 'DirectoryManager' : 'DirectoryLevelTree',
# 'FileManager' : 'FileManager',
# 'DirectoryMetadata' : 'DirectoryMetadata',
# 'FileMetadata' : 'FileMetadata',
# 'DatasetManager' : 'DatasetManager',
# 'UniqueGUID' : False,
# 'GlobalReadAccess' : True,
# 'LFNPFNConvention' : 'Strong',
# 'ResolvePFN' : True,
# 'DefaultUmask' : 0775,
# 'ValidFileStatus' : ['AprioriGood', 'Trash', 'Removing', 'Probing'],
# 'ValidReplicaStatus' : ['AprioriGood', 'Trash', 'Removing', 'Probing'],
# 'VisibleFileStatus' : ['AprioriGood'],
# 'VisibleReplicaStatus': ['AprioriGood'] }
DATABASE_CONFIG = { 'UserGroupManager' : 'UserAndGroupManagerDB', # UserAndGroupManagerDB, UserAndGroupManagerCS
'SEManager' : 'SEManagerDB', # SEManagerDB, SEManagerCS
'SecurityManager' : 'NoSecurityManager', # NoSecurityManager, DirectorySecurityManager, FullSecurityManager
'DirectoryManager' : 'DirectoryLevelTree', # DirectorySimpleTree, DirectoryFlatTree, DirectoryNodeTree, DirectoryLevelTree
'FileManager' : 'FileManager', # FileManagerFlat, FileManager
'DirectoryMetadata' : 'DirectoryMetadata',
'FileMetadata' : 'FileMetadata',
'DatasetManager' : 'DatasetManager',
'UniqueGUID' : True,
'GlobalReadAccess' : True,
'LFNPFNConvention' : 'Strong',
'ResolvePFN' : True,
'DefaultUmask' : 0775,
'ValidFileStatus' : ['AprioriGood', 'Trash', 'Removing', 'Probing'],
'ValidReplicaStatus' : ['AprioriGood', 'Trash', 'Removing', 'Probing'],
'VisibleFileStatus' : ['AprioriGood'],
'VisibleReplicaStatus': ['AprioriGood'] }
ALL_MANAGERS = { "UserGroupManager" : ["UserAndGroupManagerDB", "UserAndGroupManagerCS"],
"SEManager" : ["SEManagerDB", "SEManagerCS"],
"SecurityManager" : ["NoSecurityManager", "DirectorySecurityManager", "FullSecurityManager"],
"DirectoryManager" : ["DirectorySimpleTree", "DirectoryFlatTree", "DirectoryNodeTree", "DirectoryLevelTree"],
"FileManager" : ["FileManagerFlat", "FileManager"],
}
ALL_MANAGERS_NO_CS = { "UserGroupManager" : ["UserAndGroupManagerDB"],
"SEManager" : ["SEManagerDB"],
"SecurityManager" : ["NoSecurityManager", "DirectorySecurityManager", "FullSecurityManager"],
"DirectoryManager" : ["DirectorySimpleTree", "DirectoryFlatTree", "DirectoryNodeTree", "DirectoryLevelTree"],
"FileManager" : ["FileManagerFlat", "FileManager"],
}
DEFAULT_MANAGER = { "UserGroupManager" : ["UserAndGroupManagerDB"],
"SEManager" : ["SEManagerDB"],
"SecurityManager" : ["DirectorySecurityManagerWithDelete"],
"DirectoryManager" : ["DirectoryClosure"],
"FileManager" : ["FileManagerPs"],
}
DEFAULT_MANAGER_2 = { "UserGroupManager" : ["UserAndGroupManagerDB"],
"SEManager" : ["SEManagerDB"],
"SecurityManager" : ["NoSecurityManager"],
"DirectoryManager" : ["DirectoryLevelTree"],
"FileManager" : ["FileManager"],
}
MANAGER_TO_TEST = DEFAULT_MANAGER
class FileCatalogDBTestCase( unittest.TestCase ):
""" Base class for the FileCatalogDB test cases
"""
def setUp( self ):
self.db = FileCatalogDB()
# for table in self.db._query( "Show tables;" )["Value"]:
# self.db.deleteEntries( table[0] )
self.db.setConfig( DATABASE_CONFIG )
def tearDown(self):
pass
# for table in self.db._query( "Show tables;" )["Value"]:
# self.db.deleteEntries( table[0] )
class SECase ( FileCatalogDBTestCase ):
def test_seOperations( self ):
"""Testing SE related operation"""
# create SE
ret = self.db.addSE( seName, credDict )
self.assert_( ret["OK"], "addSE failed when adding new SE: %s" % ret )
seId = ret["Value"]
# create it again
ret = self.db.addSE( seName, credDict )
self.assertEqual( ret["Value"], seId, "addSE failed when adding existing SE: %s" % ret )
# remove it
ret = self.db.deleteSE( seName, credDict )
self.assert_( ret["OK"], "deleteE failed %s" % ret )
class UserGroupCase( FileCatalogDBTestCase ):
def test_userOperations( self ):
"""Testing the user related operations"""
# Add the user
result = self.db.addUser( testUser, credDict )
self.assert_( result['OK'], "AddUser failed when adding new user: %s" % result )
# Add an existing user
result = self.db.addUser( testUser, credDict )
self.assert_( result['OK'], "AddUser failed when adding existing user: %s" % result )
# Fetch the list of user
result = self.db.getUsers( credDict )
self.assert_( result['OK'], "getUsers failed: %s" % result )
# Check if our user is present
self.assert_( testUser in result['Value'], "getUsers failed: %s" % result )
# remove the user we created
result = self.db.deleteUser( testUser, credDict )
self.assert_( result['OK'], "deleteUser failed: %s" % result )
def test_groupOperations( self ):
"""Testing the group related operations"""
# Create new group
result = self.db.addGroup( testGroup, credDict )
self.assert_( result['OK'], "AddGroup failed when adding new user: %s" % result )
result = self.db.addGroup( testGroup, credDict )
self.assert_( result['OK'], "AddGroup failed when adding existing user: %s" % result )
result = self.db.getGroups( credDict )
self.assert_( result['OK'], "getGroups failed: %s" % result )
self.assert_( testGroup in result['Value'] )
result = self.db.deleteGroup( testGroup, credDict )
self.assert_( result['OK'], "deleteGroup failed: %s" % result )
class FileCase( FileCatalogDBTestCase ):
def test_fileOperations( self ):
"""
Tests the File related Operations
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new file
result = self.db.addFile( { testFile: { 'PFN': 'testfile',
'SE': 'testSE' ,
'Size':123,
'GUID':'1000',
'Checksum':'0' } }, credDict )
self.assert_( result['OK'], "addFile failed when adding new file %s" % result )
result = self.db.exists( testFile , credDict )
self.assert_( result['OK'] )
self.assertEqual( result['Value'].get( 'Successful', {} ).get( testFile ),
testFile, "exists( testFile) should be the same lfn %s" % result )
result = self.db.exists( {testFile:'1000'} , credDict )
self.assert_( result['OK'] )
self.assertEqual( result['Value'].get( 'Successful', {} ).get( testFile ),
testFile, "exists( testFile : 1000) should be the same lfn %s" % result )
result = self.db.exists( {testFile:{'GUID' : '1000', 'PFN' : 'blabla'}} , credDict )
self.assert_( result['OK'] )
self.assertEqual( result['Value'].get( 'Successful', {} ).get( testFile ),
testFile, "exists( testFile : 1000) should be the same lfn %s" % result )
# In fact, we don't check if the GUID is correct...
result = self.db.exists( {testFile:'1001'}, credDict )
self.assert_( result['OK'] )
self.assertEqual( result['Value'].get( 'Successful', {} ).get( testFile ),
testFile, "exists( testFile : 1001) should be the same lfn %s" % result )
result = self.db.exists( {testFile + '2' : '1000'}, credDict )
self.assert_( result['OK'] )
self.assertEqual( result['Value'].get( 'Successful', {} ).get( testFile + '2' ),
testFile, "exists( testFile2 : 1000) should return testFile %s" % result )
# Re-adding the same file
result = self.db.addFile( { testFile: { 'PFN': 'testfile',
'SE': 'testSE' ,
'Size':123,
'GUID':'1000',
'Checksum':'0' } }, credDict )
self.assert_( result["OK"], "addFile failed when adding existing file with same param %s" % result )
self.assert_( testFile in result["Value"]["Successful"], "addFile failed: it should be possible to add an existing lfn with same param %s" % result )
# Adding same file with different param
result = self.db.addFile( { testFile: { 'PFN': 'testfile',
'SE': 'testSE' ,
'Size':123,
'GUID':'1000',
'Checksum':'1' } }, credDict )
self.assert_( result["OK"], "addFile failed when adding existing file with different parem %s" % result )
self.assert_( testFile in result["Value"]["Failed"], "addFile failed: it should not be possible to add an existing lfn with different param %s" % result )
result = self.db.addFile( { testFile + '2': { 'PFN': 'testfile',
'SE': 'testSE' ,
'Size':123,
'GUID':'1000',
'Checksum':'0' } }, credDict )
self.assert_( result["OK"], "addFile failed when adding existing file %s" % result )
self.assert_( testFile + '2' in result["Value"]["Failed"], "addFile failed: it should not be possible to add a new lfn with existing GUID %s" % result )
##################################################################################
# Setting existing status of existing file
result = self.db.setFileStatus( {testFile:"AprioriGood"}, credDict )
self.assert_( result["OK"], "setFileStatus failed when setting existing status of existing file %s" % result )
self.assert_( testFile in result["Value"]["Successful"], "setFileStatus failed: %s should be in successful (%s)" % ( testFile, result ) )
# Setting unexisting status of existing file
result = self.db.setFileStatus( {testFile:"Happy"}, credDict )
self.assert_( result["OK"], "setFileStatus failed when setting un-existing status of existing file %s" % result )
self.assert_( testFile in result["Value"]["Failed"], "setFileStatus should have failed %s" % result )
# Setting existing status of unexisting file
result = self.db.setFileStatus( {nonExistingFile:"Trash"}, credDict )
self.assert_( result["OK"], "setFileStatus failed when setting existing status of non-existing file %s" % result )
self.assert_( nonExistingFile in result["Value"]["Failed"], "setFileStatus failed: %s should be in failed (%s)" % ( nonExistingFile, result ) )
##################################################################################
result = self.db.isFile( [testFile, nonExistingFile], credDict )
self.assert_( result["OK"], "isFile failed: %s" % result )
self.assert_( testFile in result["Value"]["Successful"], "isFile : %s should be in Successful %s" % ( testFile, result ) )
self.assert_( result["Value"]["Successful"][testFile], "isFile : %s should be seen as a file %s" % ( testFile, result ) )
self.assert_( nonExistingFile in result["Value"]["Successful"], "isFile : %s should be in Successful %s" % ( nonExistingFile, result ) )
self.assert_( result["Value"]["Successful"][nonExistingFile] == False, "isFile : %s should be seen as a file %s" % ( nonExistingFile, result ) )
result = self.db.setFileOwner( {testFile : "toto", nonExistingFile : "tata"}, credDict )
self.assert_( result["OK"], "setFileOwner failed: %s" % result )
self.assert_( testFile in result["Value"]["Successful"], "setFileOwner : %s should be in Successful %s" % ( testFile, result ) )
self.assert_( nonExistingFile in result["Value"]["Failed"], "setFileOwner : %s should be in Failed %s" % ( nonExistingFile, result ) )
result = self.db.setFileGroup( {testFile : "toto", nonExistingFile :"tata"}, credDict )
self.assert_( result["OK"], "setFileGroup failed: %s" % result )
self.assert_( testFile in result["Value"]["Successful"], "setFileGroup : %s should be in Successful %s" % ( testFile, result ) )
self.assert_( nonExistingFile in result["Value"]["Failed"], "setFileGroup : %s should be in Failed %s" % ( nonExistingFile, result ) )
result = self.db.setFileMode( {testFile : 044, nonExistingFile : 044}, credDict )
self.assert_( result["OK"], "setFileMode failed: %s" % result )
self.assert_( testFile in result["Value"]["Successful"], "setFileMode : %s should be in Successful %s" % ( testFile, result ) )
self.assert_( nonExistingFile in result["Value"]["Failed"], "setFileMode : %s should be in Failed %s" % ( nonExistingFile, result ) )
result = self.db.getFileSize( [testFile, nonExistingFile], credDict )
self.assert_( result["OK"], "getFileSize failed: %s" % result )
self.assert_( testFile in result["Value"]["Successful"], "getFileSize : %s should be in Successful %s" % ( testFile, result ) )
self.assertEqual( result["Value"]["Successful"][testFile], 123, "getFileSize got incorrect file size %s" % result )
self.assert_( nonExistingFile in result["Value"]["Failed"], "getFileSize : %s should be in Failed %s" % ( nonExistingFile, result ) )
result = self.db.getFileMetadata( [testFile, nonExistingFile], credDict )
self.assert_( result["OK"], "getFileMetadata failed: %s" % result )
self.assert_( testFile in result["Value"]["Successful"], "getFileMetadata : %s should be in Successful %s" % ( testFile, result ) )
self.assertEqual( result["Value"]["Successful"][testFile]["Owner"], "toto", "getFileMetadata got incorrect Owner %s" % result )
self.assertEqual( result["Value"]["Successful"][testFile]["Status"], "AprioriGood", "getFileMetadata got incorrect status %s" % result )
self.assert_( nonExistingFile in result["Value"]["Failed"], "getFileMetadata : %s should be in Failed %s" % ( nonExistingFile, result ) )
# DOES NOT FOLLOW THE SUCCESSFUL/FAILED CONVENTION
# result = self.db.getFileDetails( [testFile, nonExistingFile], credDict )
# self.assert_( result["OK"], "getFileDetails failed: %s" % result )
# self.assert_( testFile in result["Value"]["Successful"], "getFileDetails : %s should be in Successful %s" % ( testFile, result ) )
# self.assertEqual( result["Value"]["Successful"][testFile]["Owner"], "toto", "getFileDetails got incorrect Owner %s" % result )
# self.assert_( nonExistingFile in result["Value"]["Failed"], "getFileDetails : %s should be in Failed %s" % ( nonExistingFile, result ) )
# ADD SOMETHING ABOUT FILE ANCESTORS AND DESCENDENTS
result = self.db.removeFile( [testFile, nonExistingFile], credDict )
self.assert_( result["OK"], "removeFile failed: %s" % result )
self.assert_( testFile in result["Value"]["Successful"], "removeFile : %s should be in Successful %s" % ( testFile, result ) )
self.assert_( result["Value"]["Successful"][testFile], "removeFile : %s should be in True %s" % ( testFile, result ) )
self.assert_( result["Value"]["Successful"][nonExistingFile], "removeFile : %s should be in True %s" % ( nonExistingFile, result ) )
class ReplicaCase( FileCatalogDBTestCase ):
def test_replicaOperations( self ):
"""
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new file
result = self.db.addFile( { testFile: { 'PFN': 'testfile',
'SE': 'testSE' ,
'Size':123,
'GUID':'1000',
'Checksum':'0' } }, credDict )
self.assert_( result['OK'], "addFile failed when adding new file %s" % result )
# Adding new replica
result = self.db.addReplica( {testFile : {"PFN" : "testFile", "SE" : "otherSE"}}, credDict )
self.assert_( result['OK'], "addReplica failed when adding new Replica %s" % result )
self.assert_( testFile in result['Value']["Successful"], "addReplica failed when adding new Replica %s" % result )
# Adding the same replica
result = self.db.addReplica( {testFile : {"PFN" : "testFile", "SE" : "otherSE"}}, credDict )
self.assert_( result['OK'], "addReplica failed when adding new Replica %s" % result )
self.assert_( testFile in result['Value']["Successful"], "addReplica failed when adding new Replica %s" % result )
# Adding replica of a non existing file
result = self.db.addReplica( {nonExistingFile : {"PFN" : "Idontexist", "SE" : "otherSE"}}, credDict )
self.assert_( result['OK'], "addReplica failed when adding Replica to non existing Replica %s" % result )
self.assert_( nonExistingFile in result['Value']["Failed"], "addReplica for non existing file should go in Failed %s" % result )
# Setting existing status of existing Replica
result = self.db.setReplicaStatus( {testFile: {"Status" : "Trash", "SE" : "otherSE"}}, credDict )
self.assert_( result["OK"], "setReplicaStatus failed when setting existing status of existing Replica %s" % result )
self.assert_( testFile in result["Value"]["Successful"], "setReplicaStatus failed: %s should be in successful (%s)" % ( testFile, result ) )
# Setting non existing status of existing Replica
result = self.db.setReplicaStatus( {testFile: {"Status" : "randomStatus", "SE" : "otherSE"}}, credDict )
self.assert_( result["OK"], "setReplicaStatus failed when setting non-existing status of existing Replica %s" % result )
self.assert_( testFile in result["Value"]["Failed"], "setReplicaStatus failed: %s should be in Failed (%s)" % ( testFile, result ) )
# Setting existing status of non-existing Replica
result = self.db.setReplicaStatus( {testFile: {"Status" : "Trash", "SE" : "nonExistingSe"}}, credDict )
self.assert_( result["OK"], "setReplicaStatus failed when setting existing status of non-existing Replica %s" % result )
self.assert_( testFile in result["Value"]["Failed"], "setReplicaStatus failed: %s should be in Failed (%s)" % ( testFile, result ) )
# Setting existing status of non-existing File
result = self.db.setReplicaStatus( {nonExistingFile: {"Status" : "Trash", "SE" : "nonExistingSe"}}, credDict )
self.assert_( result["OK"], "setReplicaStatus failed when setting existing status of non-existing File %s" % result )
self.assert_( nonExistingFile in result["Value"]["Failed"], "setReplicaStatus failed: %s should be in Failed (%s)" % ( nonExistingFile, result ) )
# Getting existing status of existing Replica but not visible
result = self.db.getReplicaStatus( {testFile: "testSE"}, credDict )
self.assert_( result["OK"], "getReplicaStatus failed when getting existing status of existing Replica %s" % result )
self.assert_( testFile in result["Value"]["Successful"], "getReplicaStatus failed: %s should be in Successful (%s)" % ( testFile, result ) )
# Getting existing status of existing Replica but not visible
result = self.db.getReplicaStatus( {testFile : "otherSE"}, credDict )
self.assert_( result["OK"], "getReplicaStatus failed when getting existing status of existing Replica but not visible %s" % result )
self.assert_( testFile in result["Value"]["Successful"], "getReplicaStatus failed: %s should be in Successful (%s)" % ( testFile, result ) )
# Getting status of non-existing File but not visible
result = self.db.getReplicaStatus( {nonExistingFile: "testSE"}, credDict )
self.assert_( result["OK"], "getReplicaStatus failed when getting status of non existing File %s" % result )
self.assert_( nonExistingFile in result["Value"]["Failed"], "getReplicaStatus failed: %s should be in failed (%s)" % ( nonExistingFile, result ) )
# Getting replicas of existing File and non existing file, seeing all replicas
result = self.db.getReplicas( [testFile, nonExistingFile], allStatus = True, credDict = credDict )
self.assert_( result["OK"], "getReplicas failed %s" % result )
self.assert_( testFile in result["Value"]["Successful"], "getReplicas failed, %s should be in Successful %s" % ( testFile, result ) )
self.assertEqual( result["Value"]["Successful"][testFile], {"otherSE" : "", "testSE" : ""}, "getReplicas failed, %s should be in Successful %s" % ( testFile, result ) )
self.assert_( nonExistingFile in result["Value"]["Failed"], "getReplicas failed, %s should be in Failed %s" % ( nonExistingFile, result ) )
# removing master replica
result = self.db.removeReplica( {testFile : { "SE" : "testSE"}}, credDict )
self.assert_( result['OK'], "removeReplica failed when removing master Replica %s" % result )
self.assert_( testFile in result['Value']["Successful"], "removeReplica failed when removing master Replica %s" % result )
# removing non existing replica of existing File
result = self.db.removeReplica( {testFile : { "SE" : "nonExistingSe2"}}, credDict )
self.assert_( result['OK'], "removeReplica failed when removing non existing Replica %s" % result )
self.assert_( testFile in result['Value']["Successful"], "removeReplica failed when removing new Replica %s" % result )
# removing non existing replica of non existing file
result = self.db.removeReplica( {nonExistingFile : { "SE" : "nonExistingSe3"}}, credDict )
self.assert_( result['OK'], "removeReplica failed when removing replica of non existing File %s" % result )
self.assert_( nonExistingFile in result['Value']["Successful"], "removeReplica of non existing file, %s should be in Successful %s" % ( nonExistingFile, result ) )
# removing last replica
result = self.db.removeReplica( {testFile : { "SE" : "otherSE"}}, credDict )
self.assert_( result['OK'], "removeReplica failed when removing last Replica %s" % result )
self.assert_( testFile in result['Value']["Successful"], "removeReplica failed when removing last Replica %s" % result )
# Cleaning after us
result = self.db.removeFile( testFile, credDict )
self.assert_( result["OK"], "removeFile failed: %s" % result )
class DirectoryCase( FileCatalogDBTestCase ):
def test_directoryOperations( self ):
"""
Tests the Directory related Operations
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new directory
result = self.db.createDirectory( testDir, credDict )
self.assert_( result['OK'], "addDirectory failed when adding new directory %s" % result )
result = self.db.addFile( { testFile: { 'PFN': 'testfile',
'SE': 'testSE' ,
'Size':123,
'GUID':'1000',
'Checksum':'0' } }, credDict )
self.assert_( result['OK'], "addFile failed when adding new file %s" % result )
# Re-adding the same directory (CAUTION, different from addFile)
result = self.db.createDirectory( testDir, credDict )
self.assert_( result["OK"], "addDirectory failed when adding existing directory %s" % result )
self.assert_( testDir in result["Value"]["Successful"], "addDirectory failed: it should be possible to add an existing lfn %s" % result )
result = self.db.isDirectory( [testDir, nonExistingDir], credDict )
self.assert_( result["OK"], "isDirectory failed: %s" % result )
self.assert_( testDir in result["Value"]["Successful"], "isDirectory : %s should be in Successful %s" % ( testDir, result ) )
self.assert_( result["Value"]["Successful"][testDir], "isDirectory : %s should be seen as a directory %s" % ( testDir, result ) )
self.assert_( nonExistingDir in result["Value"]["Successful"], "isDirectory : %s should be in Successful %s" % ( nonExistingDir, result ) )
self.assert_( result["Value"]["Successful"][nonExistingDir] == False, "isDirectory : %s should be seen as a directory %s" % ( nonExistingDir, result ) )
result = self.db.getDirectorySize( [testDir, nonExistingDir], False, False, credDict )
self.assert_( result["OK"], "getDirectorySize failed: %s" % result )
self.assert_( testDir in result["Value"]["Successful"], "getDirectorySize : %s should be in Successful %s" % ( testDir, result ) )
self.assertEqual( result["Value"]["Successful"][testDir], {'LogicalFiles': 1, 'LogicalDirectories': 0, 'LogicalSize': 123}, "getDirectorySize got incorrect directory size %s" % result )
self.assert_( nonExistingDir in result["Value"]["Failed"], "getDirectorySize : %s should be in Failed %s" % ( nonExistingDir, result ) )
result = self.db.getDirectorySize( [testDir, nonExistingDir], False, True, credDict )
self.assert_( result["OK"], "getDirectorySize (calc) failed: %s" % result )
self.assert_( testDir in result["Value"]["Successful"], "getDirectorySize (calc): %s should be in Successful %s" % ( testDir, result ) )
self.assertEqual( result["Value"]["Successful"][testDir], {'LogicalFiles': 1, 'LogicalDirectories': 0, 'LogicalSize': 123}, "getDirectorySize got incorrect directory size %s" % result )
self.assert_( nonExistingDir in result["Value"]["Failed"], "getDirectorySize (calc) : %s should be in Failed %s" % ( nonExistingDir, result ) )
result = self.db.listDirectory( [parentDir, testDir, nonExistingDir], credDict )
self.assert_( result["OK"], "listDirectory failed: %s" % result )
self.assert_( parentDir in result["Value"]["Successful"], "listDirectory : %s should be in Successful %s" % ( parentDir, result ) )
self.assertEqual( result["Value"]["Successful"][parentDir]["SubDirs"].keys(), [testDir], \
"listDir : incorrect content for %s (%s)" % ( parentDir, result ) )
self.assert_( testDir in result["Value"]["Successful"], "listDirectory : %s should be in Successful %s" % ( testDir, result ) )
self.assertEqual( result["Value"]["Successful"][testDir]["Files"].keys(), [testFile.split( "/" )[-1]], \
"listDir : incorrect content for %s (%s)" % ( testDir, result ) )
self.assert_( nonExistingDir in result["Value"]["Failed"], "listDirectory : %s should be in Failed %s" % ( nonExistingDir, result ) )
# Cleaning after us
result = self.db.removeFile( testFile, credDict )
self.assert_( result["OK"], "removeFile failed: %s" % result )
# result = self.db.removeDirectory( [testDir, nonExistingDir], credDict )
# self.assert_( result["OK"], "removeDirectory failed: %s" % result )
# self.assert_( testDir in result["Value"]["Successful"], "removeDirectory : %s should be in Successful %s" % ( testDir, result ) )
# self.assert_( result["Value"]["Successful"][testDir], "removeDirectory : %s should be in True %s" % ( testDir, result ) )
# self.assert_( nonExistingDir in result["Value"]["Successful"], "removeDirectory : %s should be in Successful %s" % ( nonExistingDir, result ) )
# self.assert_( result["Value"]["Successful"][nonExistingDir], "removeDirectory : %s should be in True %s" % ( nonExistingDir, result ) )
class DirectoryUsageCase ( FileCatalogDBTestCase ):
def getPhysicalSize(self, sizeDict, dirName, seName):
""" Extract the information from a ret dictionary
and return the tuple (files, size) for a given
directory and a se
"""
try:
val = sizeDict[dirName]['PhysicalSize'][seName]
files = val['Files']
size = val['Size']
return ( files, size )
except Exception as e:
raise
def getLogicalSize( self, sizeDict, dirName ):
""" Extract the information from a ret dictionary
and return the tuple (files, size) for a given
directory and a se
"""
try:
files = sizeDict[dirName]['LogicalFiles']
size = sizeDict[dirName]['LogicalSize']
return ( files, size )
except Exception as e:
# print e
# print "sizeDict %s" % sizeDict
# print "dirName %s" % dirName
raise
def test_directoryUsage( self ):
"""Testing DirectoryUsage related operation"""
# create SE
d1 = '/sizeTest/d1'
d2 = '/sizeTest/d2'
f1 = d1 + '/f1'
f2 = d1 + '/f2'
f3 = d2 + '/f3'
f1Size = 3000000000
f2Size = 3000000001
f3Size = 3000000002
# f1Size = 1
# f2Size = 2
# f3Size = 5
for sen in ['se1', 'se2', 'se3']:
ret = self.db.addSE( sen, credDict )
self.assert_( ret["OK"] )
for din in [d1, d2]:
ret = self.db.createDirectory( din, credDict )
self.assert_( ret["OK"] )
ret = self.db.addFile( { f1: { 'PFN': 'f1se1',
'SE': 'se1' ,
'Size':f1Size,
'GUID':'1000',
'Checksum':'1' },
f2: { 'PFN': 'f2se2',
'SE': 'se2' ,
'Size':f2Size,
'GUID':'1001',
'Checksum':'2' } }, credDict )
self.assert_( ret["OK"] )
ret = self.db.getDirectorySize( [d1, d2], True, False, credDict )
self.assert_( ret["OK"] )
val = ret['Value']['Successful']
d1s1 = self.getPhysicalSize( val, d1, 'se1' )
d1s2 = self.getPhysicalSize( val, d1, 'se2' )
d1l = self.getLogicalSize( val, d1 )
self.assertEqual( d1s1 , ( 1, f1Size ), "Unexpected size %s, expected %s" % ( d1s1, ( 1, f1Size ) ) )
self.assertEqual( d1s2 , ( 1, f2Size ), "Unexpected size %s, expected %s" % ( d1s2, ( 1, f2Size ) ) )
self.assertEqual( d1l , ( 2, f1Size + f2Size ), "Unexpected size %s, expected %s" % ( d1l, ( 2, f1Size + f2Size ) ) )
ret = self.db.addReplica( {f1 : {"PFN" : "f1se2", "SE" : "se2"},
f2 : {"PFN" : "f1se3", "SE" : "se3"}},
credDict )
self.assert_( ret['OK'] )
ret = self.db.getDirectorySize( [d1, d2], True, False, credDict )
self.assert_( ret["OK"] )
val = ret['Value']['Successful']
d1s1 = self.getPhysicalSize( val, d1, 'se1' )
d1s2 = self.getPhysicalSize( val, d1, 'se2' )
d1s3 = self.getPhysicalSize( val, d1, 'se3' )
d1l = self.getLogicalSize( val, d1 )
self.assertEqual( d1s1 , ( 1, f1Size ), "Unexpected size %s, expected %s" % ( d1s1, ( 1, f1Size ) ) )
self.assertEqual( d1s2 , ( 2, f1Size + f2Size ), "Unexpected size %s, expected %s" % ( d1s2, ( 2, f1Size + f2Size ) ) )
self.assertEqual( d1s3 , ( 1, f2Size ), "Unexpected size %s, expected %s" % ( d1s3, ( 1, f2Size ) ) )
self.assertEqual( d1l , ( 2, f1Size + f2Size ), "Unexpected size %s, expected %s" % ( d1l, ( 2, f1Size + f2Size ) ) )
ret = self.db.removeFile( [f1], credDict )
self.assert_( ret['OK'] )
ret = self.db.getDirectorySize( [d1, d2], True, False, credDict )
self.assert_( ret["OK"] )
val = ret['Value']['Successful']
# Here we should have the KeyError, since there are no files left on s1 in principle
try:
d1s1 = self.getPhysicalSize( val, d1, 'se1' )
except KeyError:
d1s1 = ( 0, 0 )
d1s2 = self.getPhysicalSize( val, d1, 'se2' )
d1s3 = self.getPhysicalSize( val, d1, 'se3' )
d1l = self.getLogicalSize( val, d1 )
self.assertEqual( d1s1 , ( 0, 0 ), "Unexpected size %s, expected %s" % ( d1s1, ( 0, 0 ) ) )
self.assertEqual( d1s2 , ( 1, f2Size ), "Unexpected size %s, expected %s" % ( d1s2, ( 1, f2Size ) ) )
self.assertEqual( d1s3 , ( 1, f2Size ), "Unexpected size %s, expected %s" % ( d1s3, ( 1, f2Size ) ) )
self.assertEqual( d1l , ( 1, f2Size ), "Unexpected size %s, expected %s" % ( d1l, ( 1, f2Size ) ) )
ret = self.db.removeReplica( {f2 : { "SE" : "se2"}}, credDict )
self.assert_( ret['OK'] )
ret = self.db.getDirectorySize( [d1, d2], True, False, credDict )
self.assert_( ret["OK"] )
val = ret['Value']['Successful']
# Here we should have the KeyError, since there are no files left on s1 in principle
try:
d1s2 = self.getPhysicalSize( val, d1, 'se1' )
except KeyError:
d1s2 = ( 0, 0 )
d1s3 = self.getPhysicalSize( val, d1, 'se3' )
d1l = self.getLogicalSize( val, d1 )
self.assertEqual( d1s2 , ( 0, 0 ), "Unexpected size %s, expected %s" % ( d1s2, ( 0, 0 ) ) )
self.assertEqual( d1s3 , ( 1, f2Size ), "Unexpected size %s, expected %s" % ( d1s3, ( 1, f2Size ) ) )
self.assertEqual( d1l , ( 1, f2Size ), "Unexpected size %s, expected %s" % ( d1l, ( 1, f2Size ) ) )
ret = self.db.addFile( { f1: { 'PFN': 'f1se1',
'SE': 'se1' ,
'Size':f1Size,
'GUID':'1000',
'Checksum':'1' },
f3: { 'PFN': 'f3se3',
'SE': 'se3' ,
'Size':f3Size,
'GUID':'1002',
'Checksum':'3' } }, credDict )
self.assert_( ret["OK"] )
ret = self.db.getDirectorySize( [d1, d2], True, False, credDict )
self.assert_( ret["OK"] )
val = ret['Value']['Successful']
d1s1 = self.getPhysicalSize( val, d1, 'se1' )
d1s3 = self.getPhysicalSize( val, d1, 'se3' )
d2s3 = self.getPhysicalSize( val, d2, 'se3' )
d1l = self.getLogicalSize( val, d1 )
d2l = self.getLogicalSize( val, d2 )
self.assertEqual( d1s1 , ( 1, f1Size ), "Unexpected size %s, expected %s" % ( d1s1, ( 1, f1Size ) ) )
self.assertEqual( d1s3 , ( 1, f2Size ), "Unexpected size %s, expected %s" % ( d1s3, ( 1, f2Size ) ) )
self.assertEqual( d2s3 , ( 1, f3Size ), "Unexpected size %s, expected %s" % ( d2s3, ( 1, f3Size ) ) )
self.assertEqual( d1l , ( 2, f1Size + f2Size ), "Unexpected size %s, expected %s" % ( d1l, ( 2, f1Size + f2Size ) ) )
self.assertEqual( d2l , ( 1, f3Size ), "Unexpected size %s, expected %s" % ( d2l, ( 1, f3Size ) ) )
ret = self.db.removeReplica( {f1 : { "SE" : "se1"}}, credDict )
self.assert_( ret['OK'] )
ret = self.db.getDirectorySize( [d1, d2], True, False, credDict )
self.assert_( ret["OK"] )
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize( val, d1, 'se1' )
except KeyError:
d1s1 = ( 0, 0 )
d1s3 = self.getPhysicalSize( val, d1, 'se3' )
d2s3 = self.getPhysicalSize( val, d2, 'se3' )
d1l = self.getLogicalSize( val, d1 )
d2l = self.getLogicalSize( val, d2 )
self.assertEqual( d1s1 , ( 0, 0 ), "Unexpected size %s, expected %s" % ( d1s1, ( 0, 0 ) ) )
self.assertEqual( d1s3 , ( 1, f2Size ), "Unexpected size %s, expected %s" % ( d1s3, ( 1, f2Size ) ) )
self.assertEqual( d2s3 , ( 1, f3Size ), "Unexpected size %s, expected %s" % ( d2s3, ( 1, f3Size ) ) )
# This one is silly... there are no replicas of f1, but since the file is still there,
# the logical size does not change
self.assertEqual( d1l , ( 2, f1Size + f2Size ), "Unexpected size %s, expected %s" % ( d1l, ( 2, f1Size + f2Size ) ) )
self.assertEqual( d2l , ( 1, f3Size ), "Unexpected size %s, expected %s" % ( d2l, ( 1, f3Size ) ) )
ret = self.db.removeFile( [f1], credDict )
self.assert_( ret['OK'] )
ret = self.db.getDirectorySize( [d1, d2], True, False, credDict )
self.assert_( ret["OK"] )
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize( val, d1, 'se1' )
except KeyError:
d1s1 = ( 0, 0 )
d1s3 = self.getPhysicalSize( val, d1, 'se3' )
d2s3 = self.getPhysicalSize( val, d2, 'se3' )
d1l = self.getLogicalSize( val, d1 )
d2l = self.getLogicalSize( val, d2 )
self.assertEqual( d1s1 , ( 0, 0 ), "Unexpected size %s, expected %s" % ( d1s1, ( 0, 0 ) ) )
self.assertEqual( d1s3 , ( 1, f2Size ), "Unexpected size %s, expected %s" % ( d1s3, ( 1, f2Size ) ) )
self.assertEqual( d2s3 , ( 1, f3Size ), "Unexpected size %s, expected %s" % ( d2s3, ( 1, f3Size ) ) )
self.assertEqual( d1l , ( 1, f2Size ), "Unexpected size %s, expected %s" % ( d1l, ( 1, f2Size ) ) )
self.assertEqual( d2l , ( 1, f3Size ), "Unexpected size %s, expected %s" % ( d2l, ( 1, f3Size ) ) )
ret = self.db.removeReplica( {f2 : { "SE" : "se3"},
f3 : { "SE" : "se3"}}, credDict )
self.assert_( ret['OK'] )
ret = self.db.getDirectorySize( [d1, d2], True, False, credDict )
self.assert_( ret["OK"] )
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize( val, d1, 'se1' )
except KeyError:
d1s1 = ( 0, 0 )
try:
d1s3 = self.getPhysicalSize( val, d1, 'se3' )
except KeyError:
d1s3 = ( 0, 0 )
try:
d2s3 = self.getPhysicalSize( val, d2, 'se3' )
except KeyError:
d2s3 = ( 0, 0 )
d1l = self.getLogicalSize( val, d1 )
d2l = self.getLogicalSize( val, d2 )
self.assertEqual( d1s1 , ( 0, 0 ), "Unexpected size %s, expected %s" % ( d1s1, ( 0, 0 ) ) )
self.assertEqual( d1s3 , ( 0, 0 ), "Unexpected size %s, expected %s" % ( d1s3, ( 0, 0 ) ) )
self.assertEqual( d2s3 , ( 0, 0 ), "Unexpected size %s, expected %s" % ( d2s3, ( 0, 0 ) ) )
# This one is silly... there are no replicas of f1, but since the file is still there,
# the logical size does not change
self.assertEqual( d1l , ( 1, f2Size ), "Unexpected size %s, expected %s" % ( d1l, ( 1, f2Size ) ) )
self.assertEqual( d2l , ( 1, f3Size ), "Unexpected size %s, expected %s" % ( d2l, ( 1, f3Size ) ) )
ret = self.db.removeFile( [f2, f3], credDict )
self.assert_( ret['OK'] )
ret = self.db.getDirectorySize( [d1, d2], True, False, credDict )
self.assert_( ret["OK"] )
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize( val, d1, 'se1' )
except KeyError:
d1s1 = ( 0, 0 )
try:
d1s3 = self.getPhysicalSize( val, d1, 'se3' )
except KeyError:
d1s3 = ( 0, 0 )
try:
d2s3 = self.getPhysicalSize( val, d2, 'se3' )
except KeyError:
d2s3 = ( 0, 0 )
d1l = self.getLogicalSize( val, d1 )
d2l = self.getLogicalSize( val, d2 )
self.assertEqual( d1s1 , ( 0, 0 ), "Unexpected size %s, expected %s" % ( d1s1, ( 0, 0 ) ) )
self.assertEqual( d1s3 , ( 0, 0 ), "Unexpected size %s, expected %s" % ( d1s3, ( 0, 0 ) ) )
self.assertEqual( d2s3 , ( 0, 0 ), "Unexpected size %s, expected %s" % ( d2s3, ( 0, 0 ) ) )
# This one is silly... there are no replicas of f1, but since the file is still there,
# the logical size does not change
self.assertEqual( d1l , ( 0, 0 ), "Unexpected size %s, expected %s" % ( d1l, ( 0, 0 ) ) )
self.assertEqual( d2l , ( 0, 0 ), "Unexpected size %s, expected %s" % ( d2l, ( 0, 0 ) ) )
# self.assert_( testDir in result["Value"]["Successful"], "getDirectorySize : %s should be in Successful %s" % ( testDir, result ) )
# self.assertEqual( result["Value"]["Successful"][testDir], {'LogicalFiles': 1, 'LogicalDirectories': 0, 'LogicalSize': 123}, "getDirectorySize got incorrect directory size %s" % result )
# self.assert_( nonExistingDir in result["Value"]["Failed"], "getDirectorySize : %s should be in Failed %s" % ( nonExistingDir, result ) )
#
#
# result = self.db.getDirectorySize( [testDir, nonExistingDir], False, True, credDict )
# self.assert_( result["OK"], "getDirectorySize (calc) failed: %s" % result )
# self.assert_( testDir in result["Value"]["Successful"], "getDirectorySize (calc): %s should be in Successful %s" % ( testDir, result ) )
# self.assertEqual( result["Value"]["Successful"][testDir], {'LogicalFiles': 1, 'LogicalDirectories': 0, 'LogicalSize': 123}, "getDirectorySize got incorrect directory size %s" % result )
# self.assert_( nonExistingDir in result["Value"]["Failed"], "getDirectorySize (calc) : %s should be in Failed %s" % ( nonExistingDir, result ) )
if __name__ == '__main__':
managerTypes = MANAGER_TO_TEST.keys()
all_combinations = list( itertools.product( *MANAGER_TO_TEST.values() ) )
numberOfManager = len( managerTypes )
for setup in all_combinations:
print "Running with:"
print ( "".join( ["\t %s : %s\n" % ( managerTypes[i], setup[i] ) for i in range( numberOfManager )] ) )
for i in range( numberOfManager ):
DATABASE_CONFIG[managerTypes[i]] = setup[i]
suite = unittest.defaultTestLoader.loadTestsFromTestCase( SECase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( UserGroupCase ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( FileCase ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( ReplicaCase ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( DirectoryCase ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( DirectoryUsageCase ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
|
coberger/DIRAC
|
DataManagementSystem/DB/test/TestFileCatalogDB.py
|
Python
|
gpl-3.0
| 43,159
|
[
"DIRAC"
] |
e9b082139833e1511d4cae3138a8303a1dc29fe3fb9e562cb29fa17c55742a19
|
import argparse
import os
import numpy as np
import pylab as pl
import itertools
from astropy.coordinates import Angle
from numpy.polynomial.chebyshev import chebval
from scipy.interpolate import interp1d
import SEDMr.Wavelength as Wavelength
import SEDMr.Spectra as SedSpec
import SEDMr.Version as Version
# Nadia imports
from scipy.interpolate import griddata
import scipy.optimize as opt
import skimage.feature as feature
drp_ver = Version.ifu_drp_version()
def plot_drp_ver():
ax = pl.gca()
ax.annotate('DRP: ' + drp_ver, xy=(0.0, 0.01), xytext=(0, 0),
xycoords=('axes fraction', 'figure fraction'),
textcoords='offset points', size=6,
ha='center', va='bottom')
def get_ellipse_xys(ell):
a = ell[0]
b = ell[1]
pts = np.zeros((361, 2))
beta = -ell[4] * np.pi / 180.
sin_beta = np.sin(beta)
cos_beta = np.cos(beta)
alpha = np.radians(np.r_[0.:360.:1j * 361])
sin_alpha = np.sin(alpha)
cos_alpha = np.cos(alpha)
pts[:, 0] = ell[2] + (a * cos_alpha * cos_beta - b * sin_alpha * sin_beta)
pts[:, 1] = ell[3] + (a * cos_alpha * sin_beta + b * sin_alpha * cos_beta)
return pts
def gaussian_2d(xdata_tuple, amplitude, xo, yo,
sigma_x, sigma_y, theta, offset):
"""
Produces a 2D gaussian centered in xo, yo with the parameters specified.
xdata_tuple: coordinates of the points where the 2D Gaussian is computed.
"""
(x, y) = xdata_tuple
xo = float(xo)
yo = float(yo)
a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
g = offset + amplitude*np.exp(-(a * ((x-xo)**2) +
2 * b * (x-xo) * (y-yo) +
c * ((y-yo)**2)))
return g.ravel()
def identify_spectra_gauss_fit(spectra, prlltc=None, lmin=400., lmax=900.,
airmass=1.0, sigfac=3.0, plotobj=False):
"""
Returns index of spectra picked by Guassian fit.
NOTE: Index is counted against the array, not seg_id
"""
status = 0
pl.ioff()
kt = SedSpec.Spectra(spectra)
# Get X,Y positions (arcsec) and summed values between lmin and lmax
xs, ys, vs = kt.to_xyv(lmin=lmin, lmax=lmax)
xi = np.linspace(np.nanmin(xs), np.nanmax(xs), 200)
yi = np.linspace(np.nanmin(ys), np.nanmax(ys), 200)
x, y = np.mgrid[np.nanmin(xs):np.nanmax(xs):200j,
np.nanmin(ys):np.nanmax(ys):200j]
points = zip(xs, ys)
values = vs
gscl = (np.nanmax(xs) - np.nanmin(xs)) / 200.
# Create image, print(stats)
grid_vs = griddata(points, values, (x, y), method='linear')
grid_vs[np.isnan(grid_vs)] = np.nanmean(grid_vs)
grid_med = np.nanmedian(grid_vs)
print("grid_vs min, max, mean, median: %f, %f, %f, %f\n" %
(float(np.nanmin(grid_vs)), float(np.nanmax(grid_vs)),
float(np.nanmean(grid_vs)), float(grid_med)))
# Find features in image
blobs = feature.blob_log(grid_vs-grid_med, min_sigma=10, max_sigma=20,
threshold=100.0)
print("Found %d blobs" % len(blobs))
goodblob = 0
# Loop over found blobs
objs = []
for blob in blobs:
# Extract blob properties
bx, by, br = blob
br *= gscl
bx = int(bx)
by = int(by)
# How bright is this blob?
gv = grid_vs[bx, by]-grid_med
# Exclude edge blobs and faint blobs
if 0 < bx < 199 and 0 < by < 199 and gv > 100.:
goodblob += 1
print("%3d, z, x, y, dra, ddec: %8.1f, %5d, %5d, %6.2f, %6.2f" %
(goodblob, float(gv), bx, by, xi[bx], yi[by]))
objs.append((gv, xi[bx], yi[by], br, goodblob))
print("Found %d good objects" % len(objs))
if len(objs) <= 0:
objs = [(1000., 0., 0., 2., goodblob)]
# Make sure the brightest object is last
objs.sort()
# Perform 2-D Gaussian fit of good (real) objects
for obj in objs:
# Fill initial fit params
amplitude = obj[0]
xo = obj[1]
yo = obj[2]
ro = obj[3]
objno = obj[4]
print("\nFitting object %d" % objno)
print("initial guess : z,a,b,x,y,theta:"
" %9.1f, %6.2f, %6.2f, %6.2f, %6.2f, %7.2f" %
(amplitude, ro, ro, xo, yo, 0.))
# create initial data
initial_guess = (amplitude, xo, yo, ro, ro, 0, grid_med)
try:
popt, pcov = opt.curve_fit(gaussian_2d, (x, y),
grid_vs.flatten(), p0=initial_guess)
except RuntimeError:
print("ERROR: unable to fit Gaussian")
print("Using initial guess")
status = 3
popt = initial_guess
# Fitted position
xc = popt[1]
yc = popt[2]
a = popt[3]
b = popt[4]
if xc < -30. or xc > 30. or yc < -30. or yc > 30.:
print("ERROR: X,Y out of bounds: %f, %f" % (xc, yc))
print("Using initial guess")
popt = initial_guess
status = 1
# Fitted 3-sigma extent
if a > 14. or b > 14. or a <= 0. or b <= 0.:
print("ERROR: A,B out of bounds: %f, %f" % (a, b))
print("Using initial guess")
popt = initial_guess
status = 2
# Extract values to use
xc = popt[1]
yc = popt[2]
if status == 0:
a = popt[3] * sigfac
b = popt[4] * sigfac
else:
a = popt[3] * 2.0
b = popt[4] * 2.0
pos = (xc, yc)
theta = popt[5]
z = popt[0]
# report position and shape
ellipse = (a, b, xc, yc, theta * (180. / np.pi))
print("PSF FIT on IFU: z,a,b,x,y,theta:"
" %9.1f, %6.2f, %6.2f, %6.2f, %6.2f, %7.2f\n" %
(z, a, b, xc, yc, theta*180./np.pi))
positions = [pos]
# Gather spaxels
all_kix = []
for the_pos in positions:
all_kix.append(list(find_positions_ellipse(kt.KT.data,
the_pos[0], the_pos[1],
a, b, -theta)))
all_kix = list(itertools.chain(*all_kix))
kix = list(set(all_kix))
print("found this many spaxels: %d" % len(kix))
if status == 0 and goodblob == 0:
print("ERROR: no good objects found in image")
status = 4
return kt.good_positions[kix], pos, positions, ellipse, status
def find_positions_ellipse(xy, h, k, a, b, theta):
"""
xy: Vector with pairs [[x0, y0], [x1, y1]] of coordinates.
a: semi-major axis of ellipse in X axis.
b: semi-minor axis of ellipse in Y axis.
h: central point ellipse in X axis.
k: central point ellipse in Y axis.
theta: angle of rotation of ellipse in radians (clockwise).
"""
positions = np.arange(len(xy))
x = xy[:, 0]
y = xy[:, 1]
dist = ((x-h) * np.cos(theta) + (y-k) * np.sin(theta)) ** 2 / (a ** 2) + \
((x-h) * np.sin(theta) - (y-k) * np.cos(theta)) ** 2 / (b ** 2)
return positions[dist < 1]
def identify_sky_spectra(spectra, pos, ellipse=None, lmin=650., lmax=700.):
status = 0
kt = SedSpec.Spectra(spectra)
# outer = inner + 3.
skys = kt.good_positions.tolist()
a = ellipse[0]*1.25
if a > 10:
a = 10.
b = a * (ellipse[1] / ellipse[0])
xc = ellipse[2]
yc = ellipse[3]
theta = ellipse[4] * (np.pi / 180.)
all_kix = []
for the_pos in pos:
all_kix.append(list(find_positions_ellipse(kt.KT.data,
the_pos[0], the_pos[1],
a, b, -theta)))
all_kix = list(itertools.chain(*all_kix))
kix = list(set(all_kix))
objs = kt.good_positions[kix]
for o in objs:
if o in skys:
skys.remove(o)
if len(skys) > 0:
print("Number of starting pure sky spaxels is %d" % len(skys))
else:
print("ERROR: no sky spaxels in this image: using all spaxels")
skys = kt.good_positions.tolist()
status = 1
newspec = [spectra[i] for i in skys]
kt = SedSpec.Spectra(newspec)
xs, ys, vs = kt.to_xyv(lmin=lmin, lmax=lmax)
vmdn = np.nanmedian(vs)
vstd = np.nanstd(vs)
hi_thresh = vmdn + 1.25 * vstd
lo_thresh = vmdn - 2.0 * vstd
print("Median: %6.2f, STD: %6.2f, Hi Thresh: %6.2f, Lo Thresh: %6.2f" %
(float(vmdn), float(vstd), float(hi_thresh), float(lo_thresh)))
n_hi_rem = 0
n_lo_rem = 0
n_tot = 0
for s in skys:
el = spectra[s]
l, fl = el.get_flambda()
ok = (l > lmin) & (l <= lmax)
if np.nanmedian(el.spec[ok]) > hi_thresh:
skys.remove(s)
n_hi_rem += 1
if np.nanmedian(el.spec[ok]) < lo_thresh:
skys.remove(s)
n_lo_rem += 1
n_tot += 1
n_tot -= n_hi_rem + n_lo_rem
print("Removed %d high sky spaxels and %d low sky spaxels leaving %d "
"remaining spaxels" % (n_hi_rem, n_lo_rem, n_tot))
return skys, status
def c_to_nm(coefficients, pix, offset=0.):
t = coefficients[:]
t[0] += offset
return chebval(pix, t)
def interp_spectra(all_spectra, six, onto=None, sky=False):
"""Interp spectra onto common grid
Args:
all_spectra:
six:
onto:
sky:
"""
l_grid = onto
s_grid = []
lamcoeff = None
# for ix,spectrum in enumerate(all_spectra):
for ix in six:
spectrum = all_spectra[ix]
if sky and all_spectra[ix].is_obj:
continue
l, s = spectrum.get_counts(the_spec='specf')
pix = np.arange(*spectrum.xrange)
# check for saturated traces
if np.max(s) > 1000000:
print("saturated extraction: %d with max of %d, skipping" %
(ix, np.max(s)))
continue
# This is correct: preference to lamcoeff over mdn_coeff
if spectrum.lamcoeff is not None:
cs = spectrum.lamcoeff
else:
cs = spectrum.mdn_coeff
# get wavelengths for spectrum
l = c_to_nm(cs, pix)
# skip short spectra (on or near edge of IFU)
if l.max() - l.min() < 300:
continue
# Check if our wavelength grid is defined,
if l_grid is None:
# use the first set of wavelengths and store
l_grid = l
fl = s
lamcoeff = spectrum.lamcoeff
else:
# Interpolate onto our wavelength grid and store
fun = interp1d(l, s, bounds_error=False, fill_value=0)
fl = fun(l_grid)
s_grid.append(fl)
# average of all spectra selected
medspec = np.nanmean(s_grid, axis=0)
# Package results
doc = """Result contains:
nm [N float]: Wavelength solution
ph_10m_nm [N float]: Spectral irradiance in units of phot / 10 min / nm
spectra [? x K float]: List of all the spectra that participated in
the formation of ph_10m_nm. By interpolating these objects onto
a ph_10m_nm and taking the mean, you produce ph_10m_nm
coefficients [3-5 element float]: Chebyshev coefficents that produce
nm. Can be evaluated with numpy chebval().
corrected-spec [N float]: ph_10m_nm * Atmospheric correction, if
available
doc: This doc string
"""
result = [{"nm": l_grid, "ph_10m_nm": medspec, "spectra": s_grid,
"coefficients": lamcoeff, "doc": doc}]
return result
def make_cog(infile, lmin=650., lmax=700., sigfac=7., interact=False,
no_stamp=False):
"""Loads IFU frame "imfile" and extracts spectra using "fine".
Args:
infile (string): input extractions file
lmin (float): lower wavelength limit for image generation
lmax (float): upper wavelength limit for image generation
sigfac (float): sigma multiplier for Gaussian extent of aperture
interact (bool): set for interactive plotting
no_stamp (bool): set to prevent printing DRP version stamp on plot
Returns:
None
Raises:
None
"""
# The spaxel extraction file must already exist, so load extractions in
if os.path.isfile(infile):
print("USING extractions in %s!" % infile)
ex, meta = np.load(infile)
# No file found
else:
print("File not found: %s" % infile)
return
outname = infile.split('.')[0]
# Automatic extraction using Gaussian fit for Standard Stars
sixa, posa, adcpos, ellipse, status = \
identify_spectra_gauss_fit(ex,
prlltc=Angle(meta['PRLLTC'], unit='deg'),
lmin=lmin, lmax=lmax,
airmass=meta['airmass'],
sigfac=sigfac)
# Use all sky spaxels in image
kixa, skystat = identify_sky_spectra(ex, adcpos, ellipse=ellipse)
for ix in sixa:
ex[ix].is_obj = True
for ix in kixa:
ex[ix].is_sky = True
# Get sky spectrum
if skystat == 0:
skya = interp_spectra(ex, kixa, sky=True)
else:
skya = interp_spectra(ex, kixa)
# Define our standard wavelength grid
ll = None
# Resample sky onto standard wavelength grid
try:
sky_a = interp1d(skya[0]['nm'], skya[0]['ph_10m_nm'],
bounds_error=False)
except:
sky_a = None
# Set up curve of growth
kt = SedSpec.Spectra(ex)
elrat = ellipse[1] / ellipse[0]
xc = ellipse[2]
yc = ellipse[3]
theta = ellipse[4]*np.pi/180.
# Set up plot
pl.figure(1)
pl.clf()
if not no_stamp:
pl.title(outname)
xs = range(20)
rs = list(np.linspace(0, ellipse[0], 20))
rs.reverse()
print("max semi-major axis is %.2f asec" % ellipse[0])
c1 = []
c2 = []
c3 = []
c4 = []
c5 = []
px = []
resout = []
# Loop over semi-major axes
for ix in xs:
if rs[ix] <= 0.:
continue
a = rs[ix]
b = rs[ix] * elrat
kix = find_positions_ellipse(kt.KT.data, xc, yc, a, b, -theta)
sixa = kt.good_positions[kix]
print("%02d found %04d spaxels with a = %.3f" % (ix, len(sixa), a))
if len(sixa) > 0:
# get the summed spectrum over the selected spaxels
resa = interp_spectra(ex, sixa)
# get common wavelength scale
if ll is None:
ll = resa[0]['nm']
# Copy and resample object spectrum onto standard wavelength grid
res = {
"doc": resa[0]["doc"],
"ph_10m_nm": np.copy(resa[0]["ph_10m_nm"]),
"spectra": np.copy(resa[0]["spectra"]),
"coefficients": np.copy(resa[0]["coefficients"]),
"nm": np.copy(ll)
}
fl = interp1d(resa[0]['nm'], resa[0]['ph_10m_nm'],
bounds_error=False)
# Calculate output corrected spectrum
# Account for sky and aperture
if sky_a is not None:
res['ph_10m_nm'] = (fl(ll)-sky_a(ll)) * len(sixa)
else:
res['ph_10m_nm'] = fl(ll) * len(sixa)
cog = res['ph_10m_nm']
# 400 - 500 nm
f1 = np.nanmean(cog[(ll > 400) * (ll < 500)])
c1.append(f1)
# 500 - 600 nm
f2 = np.nanmean(cog[(ll > 500) * (ll < 600)])
c2.append(f2)
# 600 - 700 nm
f3 = np.nanmean(cog[(ll > 600) * (ll < 700)])
c3.append(f3)
# 700 - 800 nm
f4 = np.nanmean(cog[(ll > 700) * (ll < 800)])
c4.append(f4)
# 800 - 900 nm
f5 = np.nanmean(cog[(ll > 800) * (ll < 900)])
c5.append(f5)
# Semi-major axis in arcsec
px.append(a)
# Output results
res['a'] = a
res['ix'] = ix
res['Nspax'] = len(sixa)
res['fl_400_500nm'] = f1
res['fl_500_600nm'] = f2
res['fl_600_700nm'] = f3
res['fl_700_800nm'] = f4
res['fl_800_900nm'] = f5
# Store this iteration
resout.append(res)
maxph = np.nanmax([c1, c2, c3, c4, c5])
# Normalize and get minimum half-max radius
hmr = 1.e9
# 400-500 nm
c1 /= maxph
if np.max(c1) == c1[0] and np.max(c1) > 0.5:
c1f = interp1d(c1, px)
hmx = c1f(0.5)
if hmx < hmr:
hmr = hmx
# 500-600 nm
c2 /= maxph
if np.max(c2) == c2[0] and np.max(c2) > 0.5:
c2f = interp1d(c2, px)
hmx = c2f(0.5)
if hmx < hmr:
hmr = hmx
# 600-700 nm
c3 /= maxph
if np.max(c3) == c3[0] and np.max(c3) > 0.5:
c3f = interp1d(c3, px)
hmx = c3f(0.5)
if hmx < hmr:
hmr = hmx
# 700-800 nm
c4 /= maxph
if np.max(c4) == c4[0] and np.max(c4) > 0.5:
c4f = interp1d(c4, px)
hmx = c4f(0.5)
if hmx < hmr:
hmr = hmx
# 800-900 nm
c5 /= maxph
if np.max(c5) == c5[0] and np.max(c5) > 0.5:
c5f = interp1d(c5, px)
hmx = c5f(0.5)
if hmx < hmr:
hmr = hmx
if not no_stamp:
pl.plot(px, c1, label='400-500 nm')
pl.plot(px, c2, label='500-600 nm')
pl.plot(px, c3, label='600-700 nm')
pl.plot(px, c4, label='700-800 nm')
pl.plot(px, c5, label='800-900 nm')
else:
pl.plot(px, c1, label='400-500 nm', linestyle=':')
pl.plot(px, c2, label='500-600 nm', linestyle='-.')
pl.plot(px, c3, label='600-700 nm', linestyle='--')
pl.plot(px, c4, label='700-800 nm', linestyle='-')
pl.plot(px, c5, label='800-900 nm', linestyle=':')
if hmr < 1.e9:
pl.plot([hmr, hmr], [-0.05, 1.05], ls='--', c='black',
label='HalfLight')
# pl.plot([0.05, hmr], [0.5, 0.5], ls='--', c='gray')
pl.xlim(0.05, np.max(px)+0.05)
pl.ylim(-0.05, 1.05)
pl.xlabel('Semi-major axis (arcsec)', {'fontsize': 14})
pl.ylabel('Relative Irradiance', {'fontsize': 14})
pl.legend()
if not no_stamp:
plot_drp_ver()
else:
ax = pl.gca()
ax.tick_params(axis='both', which='major', labelsize=14)
ltext = ax.get_legend().get_texts()
pl.setp(ltext[0], fontsize=16)
pl.setp(ltext[1], fontsize=16)
pl.setp(ltext[2], fontsize=16)
pl.setp(ltext[3], fontsize=16)
pl.setp(ltext[4], fontsize=16)
pl.tight_layout()
if interact:
pl.show()
else:
pl.savefig('cog_' + outname + '.pdf')
print("Wrote cog_"+outname+".pdf")
np.save("cog_" + outname, resout)
print("Wrote cog_"+outname+".npy")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
"""Plot a curve of growth for the brightest object in the extractions.
""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--inname', type=str, help='Input extractions name')
parser.add_argument('--sigfac', type=float,
help='Gaussian Sigma multiplier for maximum extent of'
' aperture', default=7.0)
parser.add_argument('--interact', action='store_true', default=False,
help='Interactive plotting')
parser.add_argument('--no_stamp', action="store_true", default=False,
help='Set to prevent plotting DRP version stamp')
args = parser.parse_args()
print("")
make_cog(args.inname, sigfac=args.sigfac, interact=args.interact,
no_stamp=args.no_stamp)
|
scizen9/kpy
|
SEDMr/CurveOfGrowth.py
|
Python
|
gpl-2.0
| 20,078
|
[
"Gaussian"
] |
46432cf63926ae6b2257b440fbd6ce8f2fcf175bfd5f07ee9b843fe2971d23f4
|
#!/usr/bin/env python3
# Copyright 2017 Jan von Cosel
#
# This file is part of utility-scripts.
#
# utility-scripts is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# utility-scripts is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have recieved a copy of the GNU General Public License
# along with utility-scripts. If not, see <http://www.gnu.org/licenses/>.
#
#
# Generate an ASCII file containing the VIPER pulse sequence
# suitable for use in an MCTDH calculation.
# The IR excitation pulse is a single-sided exponential with a gaussian
# of FWHM of about 150 fs on the rising edge. The other two pulses are
# gaussian shaped
import sys
import numpy as np
fs2au = 41.34137333656
Eh2wn = 219474.6312
if len(sys.argv) == 2 and sys.argv[1] == "-h":
print("Usage: genviper sequence tau1 tau2 tau3 pw1 pw2 pw3 freq1 freq2 freq3 a1 a2 a3 tfinal step outputfile")
print("with times tauX, pwX, tfinal and step in fs")
print("and the frequencies freqX in cm-1")
sys.exit()
a_seq = int(sys.argv[1])
a_tau1 = float(sys.argv[2])
a_tau2 = float(sys.argv[3])
a_tau3 = float(sys.argv[4])
a_pw1 = float(sys.argv[5])
a_pw2 = float(sys.argv[6])
a_pw3 = float(sys.argv[7])
a_freq1 = float(sys.argv[8])
a_freq2 = float(sys.argv[9])
a_freq3 = float(sys.argv[10])
a_a1 = float(sys.argv[11])
a_a2 = float(sys.argv[12])
a_a3 = float(sys.argv[13])
a_tfinal = float(sys.argv[14])
a_step = float(sys.argv[15])
a_filename = sys.argv[16]
nsteps = int(a_tfinal / a_step) + 1
sig_1 = a_pw1 * fs2au / (2.0 * np.sqrt(2.0 * np.log(2.0)))
sig_2 = a_pw2 * fs2au / (2.0 * np.sqrt(2.0 * np.log(2.0)))
sig_3 = a_pw3 * fs2au / (2.0 * np.sqrt(2.0 * np.log(2.0)))
tau_1 = a_tau1 * fs2au
tau_2 = a_tau2 * fs2au
tau_3 = a_tau3 * fs2au
frq_1 = a_freq1 / Eh2wn
frq_2 = a_freq2 / Eh2wn
frq_3 = a_freq3 / Eh2wn
dt = a_step * fs2au
tfinal = a_tfinal * fs2au
# sequence encoding
#
# no. pulse 1 pulse 2 pulse 3
#
# 1 x
# 2 x
# 3 x x
# 4 x
# 5 x x
# 6 x x
# 7 x x x
#
outfile = open(a_filename, "w")
for i in range(nsteps):
time = i * dt
value = 0.0
if a_seq == 1 or a_seq == 3 or a_seq == 5 or a_seq == 7:
if time < tau_1:
value += np.exp(-np.power(time - tau_1, 2) / (13869679.2)) * np.sin(frq_1 * time)
else:
value += np.exp(-(time - tau_1) / sig_1) * np.sin(frq_1 * time)
if a_seq == 2 or a_seq == 3 or a_seq == 6 or a_seq == 7:
value += np.exp(-np.power(time - tau_2, 2) / (2.0 * sig_2 * sig_2)) * np.sin(frq_2 * time)
if a_seq == 4 or a_seq == 5 or a_seq == 6 or a_seq == 7:
value += np.exp(-np.power(time - tau_3, 2) / (2.0 * sig_3 * sig_3)) * np.sin(frq_3 * time)
if abs(value) < 1.0e-8:
value = 0.0
outfile.write("{0:12.8f} {1:12.8f} {2:12.8f} {3:12.8f}\n".format(time, value, 0.0, value))
outfile.close()
|
janvc/utility-scripts
|
scripts/genviper.py
|
Python
|
gpl-3.0
| 3,352
|
[
"Gaussian"
] |
0db281c4ef2a69589ddb0d67f1a407545782f3385700ea7d29092cc5e2016ecb
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from .optproc import *
from .text import *
from .procutil import *
from .util import *
from .exceptions import *
from .inpsight import *
from .numpy_helper import *
from .p4regex import *
from .python_helpers import *
from .solvers import *
|
jH0ward/psi4
|
psi4/driver/p4util/__init__.py
|
Python
|
lgpl-3.0
| 1,155
|
[
"Psi4"
] |
81ae451eb5d4bdafafc594ae45f2623aa45f3195485915e4608b192ca9416687
|
#!/usr/bin/env python
# 12.01.2007, c
import os.path as op
import shutil
from optparse import OptionParser
import sfepy
from sfepy.base.base import *
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.applications import SimpleApp
##
# 26.03.2007, c
def print_terms():
import sfepy.terms as t
tt = t.term_table
ct = t.cache_table
print 'Terms: %d available:' % len( tt )
print sorted( tt.keys() )
print 'Term caches: %d available:' % len( ct )
print sorted( ct.keys() )
usage = """%prog [options] filename_in"""
help = {
'filename' :
'basename of output file(s) [default: <basename of input file>]',
'output_format' :
'output file format, one of: {vtk, h5, mesh} [default: vtk]',
'log' :
"log all messages to specified file (existing file will be overwritten!)",
'quiet' :
"do not print any messages to screen",
'save_ebc' :
"save problem state showing EBC (Dirichlet conditions)",
'save_regions' :
"save problem regions as meshes",
'save_regions_as_groups' :
"save problem regions in a single mesh but mark them by using different"
" element/node group numbers",
'save_field_meshes' :
"save meshes of problem fields (with extra DOF nodes)",
'save_region_field_meshes' :
"save meshes of regions of problem fields (with extra DOF nodes)",
'solve_not' :
"do not solve (use in connection with --save-*)",
'list' :
"list data, what can be one of: {terms}",
}
def main():
parser = OptionParser(usage = usage, version = "%prog " + sfepy.__version__)
parser.add_option( "-o", "", metavar = 'filename',
action = "store", dest = "output_filename_trunk",
default = None, help = help['filename'] )
parser.add_option( "", "--format", metavar = 'format',
action = "store", dest = "output_format",
default = None, help = help['output_format'] )
parser.add_option( "", "--log", metavar = 'file',
action = "store", dest = "log",
default = None, help = help['log'] )
parser.add_option( "-q", "--quiet",
action = "store_true", dest = "quiet",
default = False, help = help['quiet'] )
parser.add_option( "", "--save-ebc",
action = "store_true", dest = "save_ebc",
default = False, help = help['save_ebc'] )
parser.add_option( "", "--save-regions",
action = "store_true", dest = "save_regions",
default = False, help = help['save_regions'] )
parser.add_option( "", "--save-regions-as-groups",
action = "store_true", dest = "save_regions_as_groups",
default = False, help = help['save_regions_as_groups'] )
parser.add_option( "", "--save-field-meshes",
action = "store_true", dest = "save_field_meshes",
default = False, help = help['save_field_meshes'] )
parser.add_option( "", "--save-region-field-meshes",
action = "store_true", dest = "save_region_field_meshes",
default = False, help = help['save_region_field_meshes'] )
parser.add_option( "", "--solve-not",
action = "store_true", dest = "solve_not",
default = False, help = help['solve_not'] )
parser.add_option( "", "--list", metavar = 'what',
action = "store", dest = "_list",
default = None, help = help['list'] )
options, args = parser.parse_args()
# print options; pause()
if (len( args ) == 1):
filename_in = args[0];
else:
if options._list == 'terms':
print_terms()
else:
parser.print_help(),
return
output.set_output(filename=options.log,
quiet=options.quiet,
combined=options.log is not None)
required, other = get_standard_keywords()
if options.solve_not:
required.remove( 'equations' )
required.remove( 'solver_[0-9]+|solvers' )
other.extend( ['equations'] )
conf = ProblemConf.from_file( filename_in, required, other )
opts = conf.options
output_prefix = get_default_attr( opts, 'output_prefix', 'sfepy:' )
app = SimpleApp( conf, options, output_prefix )
if hasattr( opts, 'parametric_hook' ): # Parametric study.
parametric_hook = getattr( conf, opts.parametric_hook )
app.parametrize( parametric_hook )
app()
if __name__ == '__main__':
main()
|
olivierverdier/sfepy
|
simple.py
|
Python
|
bsd-3-clause
| 4,701
|
[
"VTK"
] |
859b7223f7617092cf03af3a68a1196e92f6ce5f110b82af56fe906fbea47572
|
import pandas as pd
import numpy as np
import json
from datetime import datetime
from queries import *
from dataset import *
def price_quartiles(df):
if('price' in df.columns):
prices = df['price']
first, second, third = prices.quantile([.25, .5, .75])
return first, second, third
else:
raise NameError('price column does not exist')
def get_quartile_speeds(quartiles, selling_speeds):
first = selling_speeds[selling_speeds.price <= quartiles[0]]
first_average_speed = first['speed'].mean(axis=0)
second = selling_speeds[(selling_speeds.price > quartiles[0]) & (selling_speeds.price <= quartiles[1])]
print second
second_average_speed = second['speed'].mean(axis=0)
third = selling_speeds[(selling_speeds.price > quartiles[1]) & (selling_speeds.price <= quartiles[2])]
third_average_speed = third['speed'].mean(axis=0)
fourth = selling_speeds[selling_speeds.price > quartiles[2]]
fourth_average_speed = fourth['speed'].mean(axis=0)
return [first_average_speed, second_average_speed, third_average_speed, fourth_average_speed]
def get_sweet_spots(itemid):
item = get_item(itemid)
df = create_dataset(item)
big = df[df.available_quantity > 5]
selling_speeds = get_selling_speeds(list(big.id))
quartiles = price_quartiles(df)
quartile_speeds = get_quartile_speeds(quartiles, selling_speeds)
quartiles = [str(int(quartil)) for quartil in quartiles]
result = [["< $" + quartiles[0], round(quartile_speeds[0],2)],
["$" + quartiles[0] + " - $" +quartiles[1], round(quartile_speeds[1],2)],
["$" + quartiles[1] + " - $" + quartiles[2], round(quartile_speeds[2],2)],
["> $" + quartiles[2], round(quartile_speeds[3],2)]]
return result
if __name__ == '__main__':
# df = pd.read_csv('iphone5_16gb.csv',encoding='utf-8')
df = create_dataset("MLA119876")
# nuevos = df[df.condition == 'new']
# usados = df[df.condition == 'used']
# nuevos.price.describe()
# usados.price.describe()
big = df[df.available_quantity > 5]
selling_speeds = get_selling_speeds(list(big.id))
print selling_speeds.columns
quartiles = price_quartiles(df)
quartile_speeds = get_quartile_speeds(quartiles, selling_speeds)
# Ejemplos:
# "id": "MLA119876",
# "name": "Galaxy S4",til
|
jairot/meliscore
|
meliscore/front/sweet_price_spot.py
|
Python
|
bsd-3-clause
| 2,353
|
[
"Galaxy"
] |
cbc830e2451773839c1b48a9faf075543d4e0f3f6c2c898a0957d738eead14e4
|
from common import Modules, data_strings, load_yara_rules, PEParseModule, ModuleMetadata, is_ip_or_domain
import pefile
class diamondfox(PEParseModule):
def __init__(self):
md = ModuleMetadata(
module_name="diamondfox",
bot_name="diamondfox",
description="Bot that steals passwords, DDoSes, etc, written in VB6",
authors=["Brian Wallace (@botnet_hunter)"],
version="1.1.0",
date="August 22, 2015",
references=[]
)
PEParseModule.__init__(self, md)
self.yara_rules = None
pass
def _generate_yara_rules(self):
if self.yara_rules is None:
self.yara_rules = load_yara_rules("diamondfox.yara")
return self.yara_rules
def get_bot_information(self, file_data):
results = {}
pe = pefile.PE(data=file_data)
custom_resource = [i for i in pe.DIRECTORY_ENTRY_RESOURCE.entries if str(i.name) == 'CUSTOM']
if len(custom_resource) == 0:
# We need to also check the end of the file
if "<--------->" in file_data:
spl = file_data.split("<--------->")
if len(spl) > 1:
config_raw = spl[1]
resource_key = ord(config_raw[0]) ^ ord("<")
config = "".join([chr(ord(i) ^ resource_key) for i in config_raw])
config = config.replace("<Configs>", "").replace("</Configs>", "").strip()
keys_to_decrypt = ["Panel", "FBP", "UsA"]
config_dict = {}
while len(config) > 0:
key = config[:config.find(">") + 1]
config = config[config.find(">") + 1:]
data = config[:config.find(key.replace("<", "</"))]
config = config[config.find(key.replace("<", "</")) + len(key) + 1:].strip()
config_dict[key.replace("<", "").replace(">", "")] = data
xor = ord(config_dict["Xor"][0])
for k in keys_to_decrypt:
if k not in config_dict:
continue
data = config_dict[k]
config_dict[k] = "".join([chr(xor ^ ord(data[i])) for i in xrange(len(data))])
config_dict[k] = "".join([chr(xor ^ ord(i)) for i in data])
results["raw_config"] = config_dict
c2_keys = ["Panel", "FBP"]
if len([i for i in c2_keys if i in config_dict]) > 0:
if "c2s" not in results:
results["c2s"] = []
for i in [config_dict[i] for i in c2_keys if i in config_dict and len(config_dict[i]) > 0]:
results["c2s"].append({"c2_uri": i})
return results
custom_resource = custom_resource[0]
for entry in custom_resource.directory.entries:
if entry.name.string == "101":
data_rva = entry.directory.entries[0].data.struct.OffsetToData
size = entry.directory.entries[0].data.struct.Size
config_raw = pe.get_data(data_rva, size)
resource_key = ord(config_raw[0]) ^ ord("<")
config = "".join([chr(ord(i) ^ resource_key) for i in config_raw])
config = config.replace("<Configs>", "").replace("</Configs>", "").strip()
keys_to_decrypt = ["Panel", "FBP", "UsA"]
config_dict = {}
while len(config) > 0:
key = config[:config.find(">") + 1]
config = config[config.find(">") + 1:]
data = config[:config.find(key.replace("<", "</"))]
config = config[config.find(key.replace("<", "</")) + len(key) + 1:].strip()
config_dict[key.replace("<", "").replace(">", "")] = data
xor = ord(config_dict["Xor"][0])
for k in keys_to_decrypt:
if k not in config_dict:
continue
data = config_dict[k]
config_dict[k] = "".join([chr(xor ^ ord(data[i])) for i in xrange(len(data))])
config_dict[k] = "".join([chr(xor ^ ord(i)) for i in data])
results["raw_config"] = config_dict
c2_keys = ["Panel", "FBP"]
if len([i for i in c2_keys if i in config_dict]) > 0:
if "c2s" not in results:
results["c2s"] = []
for i in [config_dict[i] for i in c2_keys if i in config_dict and len(config_dict[i]) > 0]:
results["c2s"].append({"c2_uri": i})
return results
Modules.list.append(diamondfox())
|
bwall/bamfdetect
|
BAMF_Detect/modules/diamondfox.py
|
Python
|
mit
| 4,881
|
[
"Brian"
] |
c21e4bac3c5bbe3245cb81e9f115feada7dd6c0f2c5a5a608d6def6b68ef415c
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import time as tm
import unittest as ut
import math
import numpy as np
from numpy import linalg as la
from numpy.random import random, seed
import espressomd
import espressomd.magnetostatics
@ut.skipIf(not espressomd.has_features(["DIPOLAR_BARNES_HUT"]),
"Features not available, skipping test!")
class BHGPUPerfTest(ut.TestCase):
# Handle for espresso system
system = espressomd.System(box_l=[10.0, 10.0, 10.0])
def vectorsTheSame(self, a, b):
tol = 15E-2
vec_len = la.norm(a - b)
rel = 2 * vec_len / (la.norm(a) + la.norm(b))
return rel <= tol
def stopAll(self):
for i in range(len(self.system.part)):
self.system.part[i].v = np.array([0.0, 0.0, 0.0])
self.system.part[i].omega_body = np.array([0.0, 0.0, 0.0])
def run_test_case(self):
seed(1)
pf_bh_gpu = 2.34
pf_dds_gpu = 3.524
ratio_dds_gpu_bh_gpu = pf_dds_gpu / pf_bh_gpu
l = 15
self.system.periodicity = [0, 0, 0]
self.system.time_step = 1E-4
self.system.cell_system.skin = 0.1
part_dip = np.zeros((3))
for n in [26487, 147543]:
force_mag_average = 0.0
torque_mag_average = 0.0
dipole_modulus = 1.3
# scale the box for a large number of particles:
if n > 1000:
l *= 1.5 * (n / 541) ** (1 / 3.0)
self.system.box_l = [l, l, l]
for i in range(n):
part_pos = np.array(random(3)) * l
costheta = 2 * random() - 1
sintheta = np.sin(np.arcsin(costheta))
phi = 2 * np.pi * random()
part_dip[0] = sintheta * np.cos(phi) * dipole_modulus
part_dip[1] = sintheta * np.sin(phi) * dipole_modulus
part_dip[2] = costheta * dipole_modulus
self.system.part.add(id=i, type=0, pos=part_pos, dip=part_dip, v=np.array(
[0, 0, 0]), omega_body=np.array([0, 0, 0]))
self.system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=10.0, sigma=0.5,
cutoff=0.55, shift="auto")
self.system.thermostat.set_langevin(kT=0.0, gamma=10.0)
self.system.integrator.set_steepest_descent(
f_max=0.0, gamma=0.1, max_displacement=0.1)
self.system.integrator.run(500)
self.stopAll()
self.system.integrator.set_vv()
self.system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=0.0, sigma=0.0,
cutoff=-1, shift=0.0)
self.system.cell_system.skin = 0.0
self.system.time_step = 0.01
self.system.thermostat.turn_off()
# gamma should be zero in order to avoid the noise term in force
# and torque
self.system.thermostat.set_langevin(kT=1.297, gamma=0.0)
dds_gpu = espressomd.magnetostatics.DipolarDirectSumGpu(
prefactor=pf_dds_gpu)
self.system.actors.add(dds_gpu)
t1 = tm.time()
self.system.integrator.run(steps=0, recalc_forces=True)
t2 = tm.time()
dt_dds_gpu = t2 - t1
dds_gpu_f = []
dds_gpu_t = []
for i in range(n):
dds_gpu_f.append(self.system.part[i].f)
dds_gpu_t.append(self.system.part[i].torque_lab)
dds_gpu_e = self.system.analysis.energy()["total"]
del dds_gpu
self.system.actors.clear()
self.system.integrator.run(steps=0, recalc_forces=True)
bh_gpu = espressomd.magnetostatics.DipolarBarnesHutGpu(
prefactor=pf_bh_gpu, epssq=400.0, itolsq=36.0)
self.system.actors.add(bh_gpu)
t1 = tm.time()
self.system.integrator.run(steps=0, recalc_forces=True)
t2 = tm.time()
dt_bh_gpu = t2 - t1
bhgpu_f = []
bhgpu_t = []
for i in range(n):
bhgpu_f.append(self.system.part[i].f)
bhgpu_t.append(self.system.part[i].torque_lab)
bhgpu_e = self.system.analysis.energy()["total"]
for i in range(n):
force_mag_average += la.norm(dds_gpu_f[i])
torque_mag_average += la.norm(dds_gpu_t[i])
force_mag_average /= n
torque_mag_average /= n
cutoff = 1E-2
# compare
for i in range(n):
if la.norm(dds_gpu_t[i]) > cutoff * torque_mag_average:
self.assertTrue(
self.vectorsTheSame(
np.array(
dds_gpu_t[i]), ratio_dds_gpu_bh_gpu * np.array(
bhgpu_t[i])),
msg='Torques on particle do not match. i={0} dds_gpu_t={1} ratio_dds_gpu_bh_gpu*bhgpu_t={2}'.format(i, np.array(dds_gpu_t[i]), ratio_dds_gpu_bh_gpu * np.array(bhgpu_t[i])))
if la.norm(dds_gpu_f[i]) > cutoff * force_mag_average:
self.assertTrue(
self.vectorsTheSame(
np.array(
dds_gpu_f[i]), ratio_dds_gpu_bh_gpu * np.array(
bhgpu_f[i])),
msg='Forces on particle do not match: i={0} dds_gpu_f={1} ratio_dds_gpu_bh_gpu*bhgpu_f={2}'.format(i, np.array(dds_gpu_f[i]), ratio_dds_gpu_bh_gpu * np.array(bhgpu_f[i])))
self.assertTrue(
abs(dds_gpu_e - bhgpu_e * ratio_dds_gpu_bh_gpu) <= abs(
1E-3 * dds_gpu_e),
msg='Energies for dawaanr {0} and dds_gpu {1} do not match.'.format(dds_gpu_e, ratio_dds_gpu_bh_gpu * bhgpu_e))
print("=== Performance comparison ===")
print("dt_dds_gpu = {0}".format(dt_dds_gpu))
print("dt_bh_gpu = {0}".format(dt_bh_gpu))
self.system.integrator.run(steps=0, recalc_forces=True)
del bh_gpu
for i in range(len(self.system.actors.active_actors)):
self.system.actors.remove(self.system.actors.active_actors[i])
self.system.part.clear()
def test(self):
if (self.system.cell_system.get_state()["n_nodes"] > 1):
print("NOTE: Ignoring testcase for n_nodes > 1")
else:
self.run_test_case()
if __name__ == '__main__':
ut.main()
|
hmenke/espresso
|
testsuite/python/dds-and-bh-gpu-perf.py
|
Python
|
gpl-3.0
| 7,321
|
[
"ESPResSo"
] |
d06b7b99feab128171bff8f0b703af05f8684a133612b17f144660d4e1efc20e
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**********************************
**espressopp.soap.Descriptor**
**********************************
.. function:: espressopp.soap.Descriptor.compute()
:rtype:
"""
from espressopp import pmi
from _espressopp import soap_Descriptor
class result_types:
none, real_scalar, int_scalar, real_vector, int_vector = range(5)
class DescriptorLocal(object):
def compute(self):
res_type = self.cxxclass.getResultType(self)
if res_type == result_types.none:
return
elif res_type == result_types.real_scalar:
return self.cxxclass.compute_real(self)
elif res_type == result_types.int_scalar:
return self.cxxclass.compute_int(self)
elif res_type == result_types.real_vector:
return self.cxxclass.compute_real_vector_python(self)
elif res_type == result_types.int_vector:
return self.cxxclass.compute_int_vector_python(self)
else:
return self.cxxclass.compute(self)
if pmi.isController :
class Descriptor(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
pmicall = [ "compute" ]
)
|
capoe/espressopp.soap
|
src/soap/Descriptor.py
|
Python
|
gpl-3.0
| 2,040
|
[
"ESPResSo"
] |
2fd549fad1b2799a9a7e3a100a51971193ac730b43f9c40031d6f527418ca513
|
import numpy as np
from . import _get_default_resolution
def conv(a,b,padding_things_equal=[1,3,4],padding_things_tail=[1],*args,**kwargs):
raise Exception("Needs reimplementation!")
a_ = a.copy()
b_ = b.copy()
a_ = np.pad(a_,[(s-1,s-1) for si,s in enumerate(b_.shape)],mode='constant')
return _conv_func(a_,b_)
"""
Filters for convolutions
------------------------
"""
def exponential_filter_5d(tau = 0.01, n=0, normalize=True, resolution=None,amplification=1.0,max_length=1000,min_steps=10):
kernel = exponential_filter_1d(tau=tau,n=n,normalize=normalize,resolution=resolution,amplification=amplification,max_length=max_length,min_steps=min_steps)
return kernel.reshape((1,len(kernel),1,1,1))
def exponential_filter_1d(tau = 0.01, n=0, normalize=True, resolution=None,amplification=1.0, max_length=1000,min_steps=10,even=None):
if resolution is None:
resolution = _get_default_resolution()
tau_in_steps = resolution.seconds_to_steps(tau)
if n == 0:
a = amplification/tau_in_steps
length = min(max(int(-tau_in_steps*np.log(resolution.filter_epsilon/a))+1.0,min_steps),max_length)
if length <= 1:
return np.ones(1)
if even is False and length%2 == 0:
length += 1
if even is True and length%2 == 1:
length += 1
t = np.linspace(1.0,length,int(length))
kernel = np.exp(-np.linspace(0.4,length-0.6,int(length))/float(tau_in_steps))
if normalize:
kernel *= a
else:
a = amplification
length = (int(-tau_in_steps*np.log(resolution.filter_epsilon/a))-n)
if length > max_length:
length = max_length
if length <= 1:
return np.ones(1)
if even is False and length%2 == 0:
length += 1
if even is True and length%2 == 1:
length += 1
t = np.linspace(1.0,n*length,n*length)
kernel = amplification * (n*t)**n * np.exp(-n*t/tau_in_steps) / (np.math.factorial(n-1) * tau_in_steps**(n+1))
if np.any(np.array(kernel.shape) == 0):
return np.ones(1)
return kernel
def exponential_highpass_filter_1d(tau = 0.01, relative_weight=0.1, normalize=True, resolution=None,max_length=1000,min_steps=10):
if resolution is None:
return np.ones(1)
#tau_in_steps = resolution.seconds_to_steps(tau)
# we amplify the kernel to enforce greater precision
kernel = -exponential_filter_1d(tau=tau,normalize=normalize,resolution=resolution,amplification=1.0*relative_weight,max_length=max_length,min_steps=min_steps)/1.0
return np.concatenate([[1], kernel],axis=0)
def exponential_highpass_filter_3d(tau = 0.01, relative_weight=0.1, normalize=True, resolution=None,max_length=1000,min_steps=10):
kernel = exponential_highpass_filter_1d(tau=tau, relative_weight=relative_weight, normalize=normalize,resolution=resolution,max_length=max_length,min_steps=min_steps)
return kernel.reshape((len(kernel),1,1))
def exponential_highpass_filter_5d(tau = 0.01, relative_weight=0.1, normalize=True, resolution=None,max_length=1000,min_steps=10):
kernel = exponential_highpass_filter_1d(tau=tau, relative_weight=relative_weight, normalize=normalize,resolution=resolution,max_length=max_length,min_steps=min_steps)
return kernel.reshape((1,len(kernel),1,1,1))
def gauss_filter_2d(x_sig,y_sig,normalize=False,resolution=None,minimize=False, even=False, border_factor=1.0):
"""
A 2d gaussian.
x_sig and y_sig are the standard deviations in x and y direction.
if :py:obj:`even` is not None, the kernel will be either made to have even or uneven side lengths, depending on the truth value of :py:obj:`even`.
"""
if resolution is None:
resolution = _get_default_resolution()
if x_sig == 0 or y_sig == 0:
return np.ones((1,1))
x_sig = resolution.degree_to_pixel(x_sig)
y_sig = resolution.degree_to_pixel(y_sig)
a_x = 1.0/(x_sig * np.sqrt(2.0*np.pi))
x_min = border_factor*np.ceil(np.sqrt(-2.0*(x_sig**2)*np.log(resolution.filter_epsilon/a_x)))
a_y = 1.0/(y_sig * np.sqrt(2.0*np.pi))
y_min = border_factor*np.ceil(np.sqrt(-2.0*(y_sig**2)*np.log(resolution.filter_epsilon/a_y)))
if x_min < 1.0:
x_gauss = np.ones(2) if even else np.ones(1)
else:
X = np.arange(1.0-x_min-(0.5 if even else 0.0),x_min+(0.5 if even else 0.0))
x_gauss = (a_x *np.exp(-0.5*(X)**2/float(x_sig)**2)).clip(0,1)
if y_min < 1.0:
y_gauss = np.ones(2) if even else np.ones(1)
else:
Y = np.arange(1.0-y_min-(0.5 if even else 0.0),y_min+(0.5 if even else 0.0))
y_gauss = (a_y *np.exp(-0.5*(Y)**2/float(y_sig)**2)).clip(0,1)
kernel = np.prod(np.meshgrid(x_gauss,y_gauss),0)
if np.any(np.array(kernel.shape) == 0):
return np.ones((1,1))
return kernel
def gauss_filter_3d(x_sig,y_sig,normalize=False,resolution=None, even=None,border_factor=1.0):
"""
A 2d gaussian in a 5d data structure (1 x time x 1 x X x Y)
x_sig and y_sig are the standard deviations in x and y direction.
if :py:obj:`even` is not None, the kernel will be either made to have even or uneven side lengths, depending on the truth value of :py:obj:`even`.
"""
kernel = gauss_filter_2d(x_sig,y_sig,normalize=normalize,resolution=resolution, even=even,border_factor=border_factor)
return kernel.reshape((1,kernel.shape[0],kernel.shape[1]))
def gauss_filter_5d(x_sig,y_sig,normalize=False,resolution=None, even=None,border_factor=1.0):
"""
A 2d gaussian in a 5d data structure (1 x time x 1 x X x Y)
x_sig and y_sig are the standard deviations in x and y direction.
if :py:obj:`even` is not None, the kernel will be either made to have even or uneven side lengths, depending on the truth value of :py:obj:`even`.
"""
kernel = gauss_filter_2d(x_sig,y_sig,normalize=normalize,resolution=resolution, even=even,border_factor=border_factor)
return kernel.reshape((1,1,1,kernel.shape[0],kernel.shape[1]))
def fake_filter(*actual_filters,**kwargs):
"""
creates a fake filter that contains a single 1. The shape is the shape of all argument filters combined.
This is needed if the shape of a term has to be adjusted to another term that had originally the same shape, but then got filtered by multiple filters.
"""
shapes = np.array(np.sum([[aa-1 for aa in a.shape] for a in actual_filters],0) + np.ones(5),dtype=int)
new_filter = np.zeros(shapes)
if kwargs.get("centered",True):
new_filter[:,0,:,int(new_filter.shape[-2]/2),int(new_filter.shape[-1]/2)] = 1
else:
new_filter[:,0,:,0,0] = 1
return new_filter.reshape(shapes)
def fake_filter_shape(*actual_filters):
"""
returns the shape of all argument filters combined.
"""
shapes = np.array(np.sum([[aa-1 for aa in a.shape] for a in actual_filters],0) + np.ones(5),dtype=int)
return shapes
"""
Filters for recursive algorithms
--------------------------------
"""
def ab_filter_exp(tau,step = 0.001):
""" create an Exp filter and return arrays for the coefficients
TODO: describe how to use a and b
"""
if tau < 0:
raise Exception("Negative time constants are not implemented")
if tau == 0:
a = np.array([1.0])
b = np.array([])
return [a,b]
a = np.array([1.0,-np.exp(-step/tau)])
b = np.array([1.0-np.exp(-step/tau)])
return [a,b]
def ab_filter_exp_cascade(self,tau, n, step = 0.001):
""" create an ExpCascade filter and return arrays for the coefficients """
from scipy.misc import comb
tau = float(tau)
n = int(n)
if tau < 0:
raise Exception("Negative time constants are not implemented")
if n < 0:
raise Exception("Negative cascade number is not allowed")
if tau == 0:
a = np.array([1.0])
b = np.array([])
return [a,b]
tauC = tau/float(n) if n > 0 else tau
c = np.exp(-step/tauC)
N = n + 1
a = np.array([(-c)**(i)*comb(N,i) for i in range(N+1)])
b = np.array([(1.0-c)**N])
return [a,b]
def concatenate_time(a,b):
"""
concatenates ndarrays on the 'typical' time axis used in this implementation:
If the objects are 1d, they are concatenated along this axis.
(2d are static images only)
If the data is 3d, then the first axis is time.
If the data is 4d, then it is assumed that it is a collection of 3d objects, thus time is at the second index.
5d Objects are usually used as filters, while the first and third index are unused. Time is the second dimension.
6d objects are assumed to be collections of 5d objects such that time is at the third index.
If the data is a list or a tuple, elements from both a and b are zipped and concatenated along time.
"""
if type(a) in [list,tuple] and type(b) in [list,tuple]:
return [concatenate_time(a_,b_) for (a_,b_) in zip(a,b)]
if len(a.shape) == 1 and len(b.shape) == 1:
return np.concatenate([a,b],0)
if len(a.shape) == 3 and len(b.shape) == 3:
return np.concatenate([a,b],0)
if len(a.shape) == 4 and len(b.shape) == 4:
return np.concatenate([a,b],1)
if len(a.shape) == 5 and len(b.shape) == 5:
return np.concatenate([a,b],1)
if len(a.shape) == 6 and len(b.shape) == 6:
return np.concatenate([a,b],2)
def deriche_coefficients(density):
"""
Creates deriche coefficients for a given map of filter density
"""
alpha = 1.695 * density
ema = np.exp(-alpha)
ek = (1.0-ema)*(1.0-ema) / (1.0+2.0*alpha*ema - ema*ema)
A1 = ek
A2 = ek * ema * (alpha-1.0)
A3 = ek * ema * (alpha+1.0)
A4 = -ek*ema*ema
B1 = 2.0*ema
B2 = -ema*ema
return {'A1':A1, 'A2':A2, 'A3':A3, 'A4':A4, 'B1':B1, 'B2':B2 }
def sum_kernels(kernels):
"""
Sums numeric kernels and extends their size
"""
max_shape = np.max([k.shape for k in kernels],axis=0)
new_k = np.zeros(max_shape)
for k in kernels:
x1 = np.floor((max_shape[0] - k.shape[0])/2.0)
x2 = x1 + k.shape[0]
y1 = np.floor((max_shape[1] - k.shape[1])/2.0)
y2 = y1 + k.shape[1]
new_k[x1:x2,y1:y2] += k
return new_k
|
jahuth/convis
|
convis/numerical_filters.py
|
Python
|
gpl-3.0
| 10,343
|
[
"Gaussian"
] |
b9d9727fee7626960b65beeab1d5d1708b7e606b0413f310fa49b7b4f7b010b8
|
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import numpy
import preprocess
import hashfeatures
def get_naive_bayes_models():
gnb = GaussianNB()
mnb = MultinomialNB()
bnb = BernoulliNB()
classifier_list = [gnb,mnb,bnb]
classifier_name_list = ['Gaussian NB','Multinomial NB','Bernoulli NB']
return classifier_list,classifier_name_list
def get_neural_network(hidden_layer_size=50):
mlp = MLPClassifier(hidden_layer_sizes=hidden_layer_size)
return [mlp], ['MultiLayer Perceptron']
def get_ensemble_models():
rf = RandomForestClassifier(n_estimators=51,min_samples_leaf=5,min_samples_split=3)
bagg = BaggingClassifier(n_estimators=71,random_state=42)
extra = ExtraTreesClassifier(n_estimators=57,random_state=42)
ada = AdaBoostClassifier(n_estimators=51,random_state=42)
grad = GradientBoostingClassifier(n_estimators=101,random_state=42)
classifier_list = [rf,bagg,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list,classifier_name_list
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ------------', trained_model_name
predicted_values = trained_model.predict(X_test)
print metrics.classification_report(y_test,predicted_values)
print "Accuracy Score : ",metrics.accuracy_score(y_test,predicted_values)
print "---------------------------------------\n"
filename = 'train.csv'
author_frame = pd.read_csv(filename)
class_labels = list(author_frame['author'].values)
del author_frame['id']
del author_frame['author']
text_list = list(author_frame['text'].values)
cleaned_text_list = preprocess.text_clean_pipeline_list(text_list)
feat_hash = hashfeatures.FeatureHash(max_feature_num=1000)
text_features = feat_hash.get_feature_set(cleaned_text_list)
X_train,X_test,y_train,y_test = train_test_split(text_features,class_labels,test_size=0.2,random_state=42)
classifier_list,classifier_name_list = get_ensemble_models()
for classifier,classifier_name in zip(classifier_list,classifier_name_list):
classifier.fit(X_train,y_train)
print_evaluation_metrics(classifier,classifier_name,X_test,y_test)
|
rupakc/Kaggle-Compendium
|
Spooky Author Identification/spooky-baseline.py
|
Python
|
mit
| 2,770
|
[
"Gaussian"
] |
d7307428c004830c11d32c9a8f67a3b8292f86cf23075c320e713b2b3f1eb912
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# lsvgridtriggers - simple list of vgrid triggers for a grid with access
# Copyright (C) 2003-2015 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""List all IDs in the list of triggers for a given vgrid if user has
access to the vgrid.
"""
import shared.returnvalues as returnvalues
from shared.functional import validate_input_and_cert, REJECT_UNSET
from shared.init import initialize_main_variables
from shared.vgrid import init_vgrid_script_list, vgrid_list
def signature():
"""Signature of the main function"""
defaults = {'vgrid_name': REJECT_UNSET}
return ['list', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id)
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
vgrid_name = accepted['vgrid_name'][-1]
# Validity of user and vgrid names is checked in this init function so
# no need to worry about illegal directory traversal through variables
(ret_val, msg, ret_variables) = init_vgrid_script_list(vgrid_name,
client_id, configuration)
if not ret_val:
output_objects.append({'object_type': 'error_text', 'text'
: msg})
return (output_objects, returnvalues.CLIENT_ERROR)
# list
(status, msg) = vgrid_list(vgrid_name, 'triggers', configuration)
if not status:
output_objects.append({'object_type': 'error_text', 'text': '%s'
% msg})
return (output_objects, returnvalues.SYSTEM_ERROR)
output_objects.append({'object_type': 'list', 'list': msg})
return (output_objects, returnvalues.OK)
|
heromod/migrid
|
mig/shared/functionality/lsvgridtriggers.py
|
Python
|
gpl-2.0
| 2,787
|
[
"Brian"
] |
c8175b2ac67e63e858df3bba9a663b2e6660ef2c2111d93b1276849c9a5532e1
|
import numpy
from rdkit.Chem import AllChem
from steps.preprocessing.shared.chemicalproperties import chemical_properties
from steps.preprocessing.shared.tensor2d import bond_positions, bond_symbols
with_empty_bits = False
padding = 2
def molecule_to_2d_tensor(molecule, index_lookup, rasterizer_, preprocessed_shape, atom_locations_shape=None,
transformer_=None, random_=None, flip=False, rotation=0, chemical_properties_=[],
data_type='float32'):
# We redo this if the transformation size does not fit
while True:
try:
preprocessed_row = numpy.zeros((preprocessed_shape[1], preprocessed_shape[2], preprocessed_shape[3]),
dtype=data_type)
if atom_locations_shape is None:
atom_locations_row = None
else:
atom_locations_row = numpy.full((atom_locations_shape[1], atom_locations_shape[2]), -1, dtype='int16')
atom_positions = dict()
AllChem.Compute2DCoords(molecule)
if transformer_ is not None and random_ is not None:
flip = bool(random_.getrandbits(1))
rotation = random_.randrange(0, 360)
for atom in molecule.GetAtoms():
position = molecule.GetConformer().GetAtomPosition(atom.GetIdx())
x = position.x
y = position.y
if transformer_ is not None:
x, y = transformer_.apply(x, y, flip, rotation)
x, y = rasterizer_.apply(x, y)
# Check if coordinates fit into the shape
if (not 0 <= x < preprocessed_row.shape[0]) or (not 0 <= y < preprocessed_row.shape[1]):
# Redo everything hoping for a better fitting transformation
raise ValueError()
if atom.GetSymbol() in index_lookup:
symbol_index = index_lookup[atom.GetSymbol()]
preprocessed_row[x, y, symbol_index] = 1
if len(chemical_properties_) > 0:
preprocessed_row[x, y, len(index_lookup):] = \
chemical_properties.get_chemical_properties(atom, chemical_properties_)[:]
if atom_locations_row is not None:
atom_locations_row[atom.GetIdx(), 0] = x
atom_locations_row[atom.GetIdx(), 1] = y
atom_positions[atom.GetIdx()] = [x, y]
bond_positions_ = bond_positions.calculate(molecule, atom_positions)
for bond in molecule.GetBonds():
bond_symbol = bond_symbols.get_bond_symbol(bond.GetBondType())
if bond_symbol is not None and bond_symbol in index_lookup:
bond_symbol_index = index_lookup[bond_symbol]
for position in bond_positions_[bond.GetIdx()]:
preprocessed_row[position[0], position[1], bond_symbol_index] = 1
if with_empty_bits and ' ' in index_lookup:
set_empty_bits(preprocessed_row, len(index_lookup), index_lookup[' '])
return preprocessed_row, atom_locations_row
except ValueError:
# Redo everything hoping for a better fitting transformation
pass
def set_empty_bits(preprocessed_row, number_symbols, empty_symbol_index):
for x in range(preprocessed_row.shape[0]):
for y in range(preprocessed_row.shape[1]):
value_sum = 0
for symbol in range(number_symbols):
value_sum += preprocessed_row[x, y, symbol]
if value_sum == 0:
preprocessed_row[x, y, empty_symbol_index] = 1
|
patrick-winter-knime/mol-struct-nets
|
molstructnets/steps/preprocessing/shared/tensor2d/molecule_2d_tensor.py
|
Python
|
gpl-3.0
| 3,723
|
[
"RDKit"
] |
087c302bdfd6219a6ee44122f4a6e154fab6554e06eaf772585eb33cc01cb5f6
|
'''
Pulse characterization
Created Fri May 12 2017
@author: cpkmanchee
'''
import numpy as np
import os.path
from inspect import getargspec
from warnings import warn
from beamtools.constants import h,c,pi
from beamtools.common import normalize, gaussian, sech2, alias_dict, FitResult
from beamtools.import_data_file import import_data_file as _import
from beamtools.import_data_file import DataObj
from scipy.optimize import curve_fit
__all__ = ['autocorr','spectrumFT', 'fit_ac', 'ac_x2t', 'sigma_fwhm']
def autocorr(x):
'''Calculate autocorrelation of function
'''
return np.correlate(x, x, mode='same')
def spectrumFT(data,from_file = False, file_type='oo_spec', units_wl='nm', n_interp=0):
'''Compute transform limited pulse from spectrum.
data = wavelength vs. PSD (intensity) if from_file=False
= filename of spectrum file to be imported if from_file=True
Units assumed to be nm for wavelength.
If from_file is set True, data should be filename
Optional file_format, default is oceanoptics_spectrometer. Currently
can not change this (filetype handling for x/y).
n_interp = bit depth of frequency interpolation, n = 2**n_interp. 0 = auto
Et/Ew are envelope fulctions of time/freq electric field
w = 2pi*nu
S(w) = |Ew|**2
I(t) = |Et|**2
'''
if from_file:
if type(data) is str:
if not os.path.exists(data):
print('File does not exist')
return -1
imported_data = _import(data,file_type)
#insert testing for wavelength/intensity location in dataobject
wavelength = imported_data.wavelength
intensity = imported_data.intensity
#get units from dataobject
else:
print('invalid filetype')
return -1
else:
wavelength = data[0]
intensity = data[1]
imported_data = data
if n_interp == 0:
#insert here later - round up to nearest power of two.
n = 2**12
else:
n = 2**12
#use units to convert wavelength to SI
wl = wavelength*1E-9
psd = normalize(intensity)
nu = c/wl #nu is SI
#interpolate psd, linear freq spacing
nui = np.linspace(min(nu),max(nu),n)
df = (max(nu)-min(nu))/(n-1)
Ew = (normalize(np.interp(nui,np.flipud(nu),np.flipud(psd))))**(1/2)
#i = (np.abs(nui-nu0)).argmin() #centre freq index
#perform FT-1, remove centre spike
t = np.fft.fftshift(np.fft.fftfreq(n,df)[1:-1])
ti = np.fft.fftshift(np.fft.fftfreq(n,df))
Et = np.fft.fftshift((np.fft.ifft(np.fft.ifftshift(Ew)))[1:-1])
Eti = np.interp(ti,t,Et)
output_dict = {'time': ti, 'et': Eti, 'nu': nui, 'ew': Ew}
output = DataObj(output_dict)
return output, imported_data
def ac_x2t(position,aoi=15,config='sym'):
'''Convert autocorrelation position to time
Symmetric - stage moves perp to normal.
Asymmetric - stage moves along incoming optical axis
'''
if type(config) is not str:
print('Unrecognized configuration. Must be symmetric or asymmetric.')
return position
if config.lower() in alias_dict['symmetric']:
time = (1/c)*position*2*np.cos(aoi*pi/180)
elif config.lower() in alias_dict['asymmetric']:
time = (1/c)*2*position
#time = (1/c)*position*(1+np.cos(2*aoi*pi/180))
else:
print('Unrecognized configuration. Must be symmetric or asymmetric.')
#I think this is a general expression, given stage_angle wrt to mirror normal
#time = (1/c)*2*position*np.cos((aoi-stage_angle)*pi/180)
return position
return time
def fit_ac(data, from_file = False, file_type='bt_ac', form='all', bgform = 'constant'):
'''Fit autocorrelation peak.
data must be either:
1. 2 x n array - data[0] = time(delay), data[1] = intensity
2. datafile name --> from_file must be True
If there is no 'delay' parameter in data file (only position), the position is
auto converted to time delay.
'''
if from_file:
if type(data) is str:
if not os.path.exists(data):
warn('File does not exist')
return -1
imported_data = _import(data,file_type)
#insert testing for power location in dataobject
position = imported_data.position
intensity = imported_data.power
if 'delay' in imported_data.__dict__:
delay = imported_data.delay
else:
delay = ac_x2t(position,aoi=15,config='sym')
#get units from dataobject
else:
warn('invalid filetype')
return -1
else:
imported_data = data
delay = data[0]
intensity = data[1]
x = delay
y = intensity
bgpar, bgform = _background(x,y,form = bgform)
mean = np.average(x,weights = y)
stdv = np.sqrt(np.average((x-mean)**2 ,weights = y))
#set fitting function (including background)
if bgform is None:
def fitfuncGaus(x,sigma,a,x0):
return gaussian(x,sigma,a,x0)
def fitfuncSech2(x,sigma,a,x0):
return sech2(x,sigma,a,x0)
if bgform.lower() in alias_dict['constant']:
def fitfuncGaus(x,sigma,a,x0,p0):
return gaussian(x,sigma,a,x0) + p0
def fitfuncSech2(x,sigma,a,x0,p0):
return sech2(x,sigma,a,x0) + p0
elif bgform.lower() in alias_dict['linear']:
def fitfuncGaus(x,sigma,a,x0,p1,p0):
return gaussian(x,sigma,a,x0) + p1*x + p0
def fitfuncSech2(x,sigma,a,x0,p1,p0):
return sech2(x,sigma,a,x0) + p1*x + p0
elif bgform.lower() in alias_dict['quadratic']:
def fitfuncGaus(x,sigma,a,x0,p2,p1,p0):
return gaussian(x,sigma,a,x0) + p2*x**2 + p1*x + p0
def fitfuncSech2(x,sigma,a,x0,p2,p1,p0):
return sech2(x,sigma,a,x0) + p2*x**2 + p1*x + p0
else:
def fitfuncGaus(x,sigma,a,x0):
return gaussian(x,sigma,a,x0)
def fitfuncSech2(x,sigma,a,x0):
return sech2(x,sigma,a,x0)
nFitArgs = len(getargspec(fitfuncGaus).args) - 1
#sets which functions are to be fit... this can be streamlined i think
if form.lower() in ['both', 'all']:
fitGaus = True
fitSech2 = True
elif form.lower() in alias_dict['gaus']:
fitGaus = True
fitSech2 = False
elif form.lower() in alias_dict['sech2']:
fitGaus = False
fitSech2 = True
else:
print('Unknown fit form: '+form[0])
fitGaus = False
fitSech2 = False
#start fitting
popt=[]
pcov=[]
fit_results=[]
if type(bgpar) is np.float64:
p0=[stdv,max(y)-min(y),mean,bgpar]
elif type(bgpar) is np.ndarray:
p0=[stdv,max(y)-min(y),mean]+bgpar.tolist()
else:
p0=None
if fitGaus:
try:
valid = ~(np.isnan(x) | np.isnan(y))
poptGaus,pcovGaus = curve_fit(fitfuncGaus,x[valid],y[valid],p0)
except RuntimeError:
poptGaus = np.zeros(nFitArgs)
pcovGaus = np.zeros((nFitArgs,nFitArgs))
popt.append(poptGaus)
pcov.append(pcovGaus)
fit_results.append(FitResult(ffunc=fitfuncGaus, ftype='gaussian',
popt=poptGaus, pcov=pcovGaus, indep_var='time', bgform=bgform))
if fitSech2:
try:
valid = ~(np.isnan(x) | np.isnan(y))
poptSech2,pcovSech2 = curve_fit(fitfuncSech2,x[valid],y[valid],p0)
except RuntimeError:
poptSech2 = np.zeros(nFitArgs)
pcovSech2 = np.zeros((nFitArgs,nFitArgs))
popt.append(poptSech2)
pcov.append(pcovSech2)
fit_results.append(FitResult(ffunc=fitfuncSech2, ftype='sech2',
popt=poptSech2, pcov=pcovSech2, indep_var='time', bgform=bgform))
if len(fit_results) == 1:
fit_results = fit_results[0]
return fit_results, imported_data
def sigma_fwhm(sigma, shape='gaus'):
'''Convert sigma to full-width half-max
'''
if shape.lower() in alias_dict['gaus']:
A = 2*np.sqrt(2*np.log(2))
elif shape.lower() in alias_dict['sech2']:
A = 2*np.arccosh(np.sqrt(2))
elif shape.lower() in alias_dict['lorentz']:
A = 1
else:
A = 1
warn('Pulse shape not recognized.')
return A*sigma
def deconv(sigma, shape='gaus'):
'''Deconvolution factors
Go from sigma_ac --> sigma
'''
if shape.lower() in alias_dict['gaus']:
A = 1/np.sqrt(2)
elif shape.lower() in alias_dict['sech2']:
A = 0.6482
elif shape.lower() in alias_dict['lorentz']:
A = 0.5
else:
A = 1
warn('Pulse shape not recognized.')
return A*sigma
def _background(x,y,form = 'constant'):
'''Provides starting values for background parameters.
Takes x,y data and the desired background form (default to constant)
returns p, the polynomial coefficients. p is variable in length.
'''
if form is None:
p = np.zeros((3))
if form.lower() in alias_dict['constant']:
p = min(y)
#p = np.hstack((p,[0,0]))
elif form.lower() in alias_dict['linear']:
p = np.linalg.solve([[1,x[0]],[1,x[-1]]], [y[0],y[-1]])
p = np.flipud(p)
elif form.lower() in alias_dict['quadratic']:
index = np.argmin(y)
if index == 0:
x3 = 2*x[0]-x[-1]
y3 = y[-1]
elif index == len(y)-1:
x3 = 2*x[-1]-x[0]
y3 = y[0]
else:
x3 = x[index]
y3 = y[index]
a = [[1,x[0],x[0]**2],[1,x[-1],x[-1]**2],[1,x3,x3**2]]
b = [y[0],y[-1],y3]
p = np.linalg.solve(a,b)
p = np.flipud(p)
else:
warn('Unknown background form')
p = np.zeros((3))
return p, form
|
kikimaroca/beamtools
|
beamtools/pulse.py
|
Python
|
mit
| 10,038
|
[
"Gaussian"
] |
bfc98a5e9f835b360527b43db92e6e668a6b00b863aa71a3005ebaef7a41630c
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RHdf5array(RPackage):
"""HDF5 backend for DelayedArray objects
Implements the HDF5Array and TENxMatrix classes, 2 convenient and
memory-efficient array-like containers for on-disk representation of
HDF5 datasets. HDF5Array is for datasets that use the conventional (i.e.
dense) HDF5 representation. TENxMatrix is for datasets that use the
HDF5-based sparse matrix representation from 10x Genomics (e.g. the 1.3
Million Brain Cell Dataset). Both containers being DelayedArray
extensions, they support all operations supported by DelayedArray
objects. These operations can be either delayed or block-processed."""
homepage = "https://bioconductor.org/packages/HDF5Array"
git = "https://git.bioconductor.org/packages/HDF5Array.git"
version('1.18.0', commit='d5bd55d170cb384fdebdf60751e1e28483782caa')
version('1.12.3', commit='21c6077f3f789748a18f2e579110576c5522e975')
version('1.10.1', commit='0b8ae1dfb56e4203dd8e14781850370df46a5e2c')
version('1.8.1', commit='3c9aa23d117bf489b6341708dc80c943bd1af11a')
version('1.6.0', commit='95f2f8d3648143abe9dc77c76340c5edf4114c82')
version('1.4.8', commit='79ab96d123c8da8f8ead81f678fe714c0958ff45')
depends_on('r@3.4:', type=('build', 'run'))
depends_on('r-delayedarray@0.2.4:', type=('build', 'run'))
depends_on('r-delayedarray@0.3.18:', when='@1.6.0:', type=('build', 'run'))
depends_on('r-delayedarray@0.5.32:', when='@1.8.1:', type=('build', 'run'))
depends_on('r-delayedarray@0.7.41:', when='@1.10.1:', type=('build', 'run'))
depends_on('r-delayedarray@0.9.3:', when='@1.12.3:', type=('build', 'run'))
depends_on('r-delayedarray@0.15.16:', when='@1.18.0:', type=('build', 'run'))
depends_on('r-rhdf5', type=('build', 'run'))
depends_on('r-rhdf5@2.25.6:', when='@1.10.1:', type=('build', 'run'))
depends_on('r-rhdf5@2.31.6:', when='@1.18.0:', type=('build', 'run'))
depends_on('r-matrix', when='@1.18.0:', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-biocgenerics@0.25.1:', when='@1.8.1:', type=('build', 'run'))
depends_on('r-biocgenerics@0.31.5:', when='@1.18.0:', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-s4vectors@0.21.6:', when='@1.12.3:', type=('build', 'run'))
depends_on('r-s4vectors@0.27.13:', when='@1.18.0:', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-rhdf5lib', when='@1.12.3:', type=('build', 'run'))
depends_on('gmake', type='build')
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-hdf5array/package.py
|
Python
|
lgpl-2.1
| 2,830
|
[
"Bioconductor"
] |
906b96c74f0184c131cc70e91e08e3caa90d54d0e09cfde55aa975a5bda71d9c
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator as glm
# test scoring_history for Gaussian family with validation dataset and cv
def test_gaussian_alpha():
col_list_compare = ["iterations", "objective", "negative_log_likelihood", "training_rmse", "validation_rmse",
"training_mae", "validation_mae", "training_deviance", "validation_deviance"]
h2o_data = h2o.import_file(
path=pyunit_utils.locate("smalldata/glm_test/gaussian_20cols_10000Rows.csv"))
enum_columns = ["C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9", "C10"]
for cname in enum_columns:
h2o_data[cname] = h2o_data[cname]
myY = "C21"
myX = h2o_data.names.remove(myY)
data_frames = h2o_data.split_frame(ratios=[0.8])
training_data = data_frames[0]
test_data = data_frames[1]
# test with lambda search on, generate_scoring_history on and off
model1 = glm(family="gaussian", lambda_search=True, alpha=[0,0.2,1], generate_scoring_history=True, nlambdas=5)
model1.train(x=myX, y=myY, training_frame = training_data, validation_frame = test_data)
model2 = glm(family="gaussian", lambda_search=True, alpha=[0,0.2,1], generate_scoring_history=False, nlambdas=5)
model2.train(x=myX, y=myY, training_frame = training_data, validation_frame = test_data)
pyunit_utils.assertCoefDictEqual(model1.coef(), model2.coef())
# test with lambda search off, generate_scoring_history on and off
model1 = glm(family="gaussian", lambda_search=False, alpha=[0,0.8,1], generate_scoring_history=True,
Lambda=[0,0.004])
model1.train(x=myX, y=myY, training_frame = training_data, validation_frame = test_data)
model2 = glm(family="gaussian", lambda_search=False, alpha=[0,0.8,1], generate_scoring_history=False,
Lambda=[0,0.004])
model2.train(x=myX, y=myY, training_frame = training_data, validation_frame = test_data)
pyunit_utils.assertCoefDictEqual(model1.coef(), model2.coef())
# test with lambda search on, generate_scoring_history on and off, cv on
model1 = glm(family="gaussian", lambda_search=True, alpha=[0,0.8,1], generate_scoring_history=True,
nfolds=2, seed=12345, nlambdas=5)
model1.train(x=myX, y=myY, training_frame = training_data, validation_frame = test_data)
model2 = glm(family="gaussian", lambda_search=True, alpha=[0,0.8,1], generate_scoring_history=False,
nfolds=2, seed=12345, nlambdas=5)
model2.train(x=myX, y=myY, training_frame = training_data, validation_frame = test_data)
pyunit_utils.assertCoefDictEqual(model1.coef(), model2.coef())
# test with lambda search off, generate_scoring_history on and off, cv on
model1 = glm(family="gaussian", lambda_search=False, alpha=[0,0.2,1], generate_scoring_history=True,
Lambda=[0,0.1], nfolds=2, seed=12345)
model1.train(x=myX, y=myY, training_frame = training_data, validation_frame = test_data)
model2 = glm(family="gaussian", lambda_search=False, alpha=[0,0.2], generate_scoring_history=False,
Lambda=[0,0.1], nfolds=2, seed=12345)
model2.train(x=myX, y=myY, training_frame = training_data, validation_frame = test_data)
pyunit_utils.assertCoefDictEqual(model1.coef(), model2.coef())
if __name__ == "__main__":
pyunit_utils.standalone_test(test_gaussian_alpha)
else:
test_gaussian_alpha()
|
h2oai/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_PUBDEV_8077_gaussian_alpha.py
|
Python
|
apache-2.0
| 3,487
|
[
"Gaussian"
] |
bfb78afeed74a3ef0a7b4b9a90b4ceb9a52aaf7c901614a060175a30cee4a7d5
|
"""A client for the device based server."""
#
# Copyright (c) 2010 Brian E. Granger and Eugene Chernyshov
#
# This file is part of pyzmq.
#
# pyzmq is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyzmq is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import zmq
import os
from time import time
print 'Client', os.getpid()
context = zmq.Context(1)
socket = context.socket(zmq.REQ)
socket.connect('tcp://127.0.0.1:5555')
while True:
data = zmq.Message(str(os.getpid()))
start = time()
socket.send(data)
data = socket.recv()
print time()-start, data
|
mgadi/naemonbox
|
sources/psdash/pyzmq-13.1.0/examples/device/client.py
|
Python
|
gpl-2.0
| 1,140
|
[
"Brian"
] |
ee2063100d4cf17d2582deed7e14a739e520215706e5acef39de172473362820
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible import context
from ansible.cli.galaxy import GalaxyCLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import collection
from ansible.galaxy.dependency_resolution.dataclasses import Requirement
from ansible.module_utils._text import to_native
def path_exists(path):
if to_native(path) == '/root/.ansible/collections/ansible_collections/sandwiches/ham':
return False
elif to_native(path) == '/usr/share/ansible/collections/ansible_collections/sandwiches/reuben':
return False
elif to_native(path) == 'nope':
return False
else:
return True
def isdir(path):
if to_native(path) == 'nope':
return False
else:
return True
def cliargs(collections_paths=None, collection_name=None):
if collections_paths is None:
collections_paths = ['~/root/.ansible/collections', '/usr/share/ansible/collections']
context.CLIARGS._store = {
'collections_path': collections_paths,
'collection': collection_name,
'type': 'collection',
}
@pytest.fixture
def mock_collection_objects(mocker):
mocker.patch('ansible.cli.galaxy.GalaxyCLI._resolve_path', side_effect=['/root/.ansible/collections', '/usr/share/ansible/collections'])
mocker.patch('ansible.cli.galaxy.validate_collection_path',
side_effect=['/root/.ansible/collections/ansible_collections', '/usr/share/ansible/collections/ansible_collections'])
collection_args_1 = (
(
'sandwiches.pbj',
'1.5.0',
None,
'dir',
),
(
'sandwiches.reuben',
'2.5.0',
None,
'dir',
),
)
collection_args_2 = (
(
'sandwiches.pbj',
'1.0.0',
None,
'dir',
),
(
'sandwiches.ham',
'1.0.0',
None,
'dir',
),
)
collections_path_1 = [Requirement(*cargs) for cargs in collection_args_1]
collections_path_2 = [Requirement(*cargs) for cargs in collection_args_2]
mocker.patch('ansible.cli.galaxy.find_existing_collections', side_effect=[collections_path_1, collections_path_2])
@pytest.fixture
def mock_from_path(mocker):
def _from_path(collection_name='pbj'):
collection_args = {
'sandwiches.pbj': (
(
'sandwiches.pbj',
'1.5.0',
None,
'dir',
),
(
'sandwiches.pbj',
'1.0.0',
None,
'dir',
),
),
'sandwiches.ham': (
(
'sandwiches.ham',
'1.0.0',
None,
'dir',
),
),
}
from_path_objects = [Requirement(*args) for args in collection_args[collection_name]]
mocker.patch('ansible.cli.galaxy.Requirement.from_dir_path_as_unknown', side_effect=from_path_objects)
return _from_path
def test_execute_list_collection_all(mocker, capsys, mock_collection_objects, tmp_path_factory):
"""Test listing all collections from multiple paths"""
cliargs()
mocker.patch('os.path.exists', return_value=True)
mocker.patch('os.path.isdir', return_value=True)
gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list'])
tmp_path = tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
gc.execute_list_collection(artifacts_manager=concrete_artifact_cm)
out, err = capsys.readouterr()
out_lines = out.splitlines()
assert len(out_lines) == 12
assert out_lines[0] == ''
assert out_lines[1] == '# /root/.ansible/collections/ansible_collections'
assert out_lines[2] == 'Collection Version'
assert out_lines[3] == '----------------- -------'
assert out_lines[4] == 'sandwiches.pbj 1.5.0 '
assert out_lines[5] == 'sandwiches.reuben 2.5.0 '
assert out_lines[6] == ''
assert out_lines[7] == '# /usr/share/ansible/collections/ansible_collections'
assert out_lines[8] == 'Collection Version'
assert out_lines[9] == '-------------- -------'
assert out_lines[10] == 'sandwiches.ham 1.0.0 '
assert out_lines[11] == 'sandwiches.pbj 1.0.0 '
def test_execute_list_collection_specific(mocker, capsys, mock_collection_objects, mock_from_path, tmp_path_factory):
"""Test listing a specific collection"""
collection_name = 'sandwiches.ham'
mock_from_path(collection_name)
cliargs(collection_name=collection_name)
mocker.patch('os.path.exists', path_exists)
mocker.patch('os.path.isdir', return_value=True)
mocker.patch('ansible.galaxy.collection.validate_collection_name', collection_name)
mocker.patch('ansible.cli.galaxy._get_collection_widths', return_value=(14, 5))
gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list', collection_name])
tmp_path = tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
gc.execute_list_collection(artifacts_manager=concrete_artifact_cm)
out, err = capsys.readouterr()
out_lines = out.splitlines()
assert len(out_lines) == 5
assert out_lines[0] == ''
assert out_lines[1] == '# /usr/share/ansible/collections/ansible_collections'
assert out_lines[2] == 'Collection Version'
assert out_lines[3] == '-------------- -------'
assert out_lines[4] == 'sandwiches.ham 1.0.0 '
def test_execute_list_collection_specific_duplicate(mocker, capsys, mock_collection_objects, mock_from_path, tmp_path_factory):
"""Test listing a specific collection that exists at multiple paths"""
collection_name = 'sandwiches.pbj'
mock_from_path(collection_name)
cliargs(collection_name=collection_name)
mocker.patch('os.path.exists', path_exists)
mocker.patch('os.path.isdir', return_value=True)
mocker.patch('ansible.galaxy.collection.validate_collection_name', collection_name)
gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list', collection_name])
tmp_path = tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
gc.execute_list_collection(artifacts_manager=concrete_artifact_cm)
out, err = capsys.readouterr()
out_lines = out.splitlines()
assert len(out_lines) == 10
assert out_lines[0] == ''
assert out_lines[1] == '# /root/.ansible/collections/ansible_collections'
assert out_lines[2] == 'Collection Version'
assert out_lines[3] == '-------------- -------'
assert out_lines[4] == 'sandwiches.pbj 1.5.0 '
assert out_lines[5] == ''
assert out_lines[6] == '# /usr/share/ansible/collections/ansible_collections'
assert out_lines[7] == 'Collection Version'
assert out_lines[8] == '-------------- -------'
assert out_lines[9] == 'sandwiches.pbj 1.0.0 '
def test_execute_list_collection_specific_invalid_fqcn(mocker, tmp_path_factory):
"""Test an invalid fully qualified collection name (FQCN)"""
collection_name = 'no.good.name'
cliargs(collection_name=collection_name)
mocker.patch('os.path.exists', return_value=True)
mocker.patch('os.path.isdir', return_value=True)
gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list', collection_name])
tmp_path = tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
with pytest.raises(AnsibleError, match='Invalid collection name'):
gc.execute_list_collection(artifacts_manager=concrete_artifact_cm)
def test_execute_list_collection_no_valid_paths(mocker, capsys, tmp_path_factory):
"""Test listing collections when no valid paths are given"""
cliargs()
mocker.patch('os.path.exists', return_value=True)
mocker.patch('os.path.isdir', return_value=False)
mocker.patch('ansible.utils.color.ANSIBLE_COLOR', False)
mocker.patch('ansible.cli.galaxy.display.columns', 79)
gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list'])
tmp_path = tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
with pytest.raises(AnsibleOptionsError, match=r'None of the provided paths were usable.'):
gc.execute_list_collection(artifacts_manager=concrete_artifact_cm)
out, err = capsys.readouterr()
assert '[WARNING]: - the configured path' in err
assert 'exists, but it\nis not a directory.' in err
def test_execute_list_collection_one_invalid_path(mocker, capsys, mock_collection_objects, tmp_path_factory):
"""Test listing all collections when one invalid path is given"""
cliargs()
mocker.patch('os.path.exists', return_value=True)
mocker.patch('os.path.isdir', isdir)
mocker.patch('ansible.cli.galaxy.GalaxyCLI._resolve_path', side_effect=['/root/.ansible/collections', 'nope'])
mocker.patch('ansible.utils.color.ANSIBLE_COLOR', False)
gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list', '-p', 'nope'])
tmp_path = tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
gc.execute_list_collection(artifacts_manager=concrete_artifact_cm)
out, err = capsys.readouterr()
out_lines = out.splitlines()
assert out_lines[0] == ''
assert out_lines[1] == '# /root/.ansible/collections/ansible_collections'
assert out_lines[2] == 'Collection Version'
assert out_lines[3] == '----------------- -------'
assert out_lines[4] == 'sandwiches.pbj 1.5.0 '
# Only a partial test of the output
assert err == '[WARNING]: - the configured path nope, exists, but it is not a directory.\n'
|
atosorigin/ansible
|
test/units/cli/galaxy/test_execute_list_collection.py
|
Python
|
gpl-3.0
| 10,584
|
[
"Galaxy"
] |
2ffb016e5454e47ffb51aa5a3af58c60d9250f4435903326d962c5f99b622e00
|
"""
Instructor Dashboard Views
"""
import logging
import datetime
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
import uuid
import pytz
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.http import Http404, HttpResponseServerError
from django.conf import settings
from util.json_request import JsonResponse
from mock import patch
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from openedx.core.lib.xblock_utils import wrap_xblock
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_studio_url
from django_comment_client.utils import has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR
from student.models import CourseEnrollment
from shoppingcart.models import Coupon, PaidCourseRegistration, CourseRegCodeItem
from course_modes.models import CourseMode, CourseModesArchive
from student.roles import CourseFinanceAdminRole, CourseSalesAdminRole
from certificates.models import (
CertificateGenerationConfiguration,
CertificateWhitelist,
GeneratedCertificate,
CertificateStatuses,
CertificateGenerationHistory,
)
from certificates import api as certs_api
from util.date_utils import get_default_time_display
from class_dashboard.dashboard_data import get_section_display_name, get_array_section_has_problem
from .tools import get_units_with_due_date, title_or_url, bulk_email_is_enabled_for_course
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if the specified user has staff access.
"""
return bool(user and has_access(user, 'staff', course, course.id))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
}
if not access['staff']:
raise Http404()
is_white_label = CourseMode.is_white_label(course_key)
sections = [
_section_course_info(course, access),
_section_membership(course, access, is_white_label),
_section_cohort_management(course, access),
_section_student_admin(course, access),
_section_data_download(course, access),
]
analytics_dashboard_message = None
if settings.ANALYTICS_DASHBOARD_URL:
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = analytics_dashboard_message.format(
link_start=link_start, link_end="</a>", analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
u"Course %s has %s course modes with payment options. Course must only have "
u"one paid course mode to enable eCommerce options.",
unicode(course_key), len(paid_modes)
)
if settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']:
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if bulk_email_is_enabled_for_course(course_key):
sections.append(_section_send_email(course, access))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course, access))
# Gate access to Ecommerce tab
if course_mode_has_price and (access['finance_admin'] or access['sales_admin']):
sections.append(_section_e_commerce(course, access, paid_modes[0], is_white_label, is_white_label))
# Gate access to Special Exam tab depending if either timed exams or proctored exams
# are enabled in the course
# NOTE: For now, if we only have procotred exams enabled, then only platform Staff
# (user.is_staff) will be able to view the special exams tab. This may
# change in the future
can_see_special_exams = (
((course.enable_proctored_exams and request.user.is_staff) or course.enable_timed_exams) and
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False)
)
if can_see_special_exams:
sections.append(_section_special_exams(course, access))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
certs_enabled = CertificateGenerationConfiguration.current().enabled
if certs_enabled and access['admin']:
sections.append(_section_certificates(course))
disable_buttons = not _is_small_course(course_key)
certificate_white_list = CertificateWhitelist.get_certificate_white_list(course_key)
generate_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_certificate_exceptions',
kwargs={'course_id': unicode(course_key), 'generate_for': ''}
)
generate_bulk_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_bulk_certificate_exceptions',
kwargs={'course_id': unicode(course_key)}
)
certificate_exception_view_url = reverse(
'certificate_exception_view',
kwargs={'course_id': unicode(course_key)}
)
context = {
'course': course,
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message,
'certificate_white_list': certificate_white_list,
'generate_certificate_exceptions_url': generate_certificate_exceptions_url,
'generate_bulk_certificate_exceptions_url': generate_bulk_certificate_exceptions_url,
'certificate_exception_view_url': certificate_exception_view_url
}
if settings.FEATURES['ENABLE_INSTRUCTOR_LEGACY_DASHBOARD']:
context['old_dashboard_url'] = reverse('instructor_dashboard_legacy', kwargs={'course_id': unicode(course_key)})
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_e_commerce(course, access, paid_mode, coupons_enabled, reports_enabled):
""" Provide data for the corresponding dashboard section """
course_key = course.id
coupons = Coupon.objects.filter(course_id=course_key).order_by('-is_active')
course_price = paid_mode.min_price
total_amount = None
if access['finance_admin']:
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_key)
total_amount = single_purchase_total + bulk_purchase_total
section_data = {
'section_key': 'e-commerce',
'section_display_name': _('E-Commerce'),
'access': access,
'course_id': unicode(course_key),
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'ajax_remove_coupon_url': reverse('remove_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_get_coupon_info': reverse('get_coupon_info', kwargs={'course_id': unicode(course_key)}),
'get_user_invoice_preference_url': reverse('get_user_invoice_preference', kwargs={'course_id': unicode(course_key)}),
'sale_validation_url': reverse('sale_validation', kwargs={'course_id': unicode(course_key)}),
'ajax_update_coupon': reverse('update_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_add_coupon': reverse('add_coupon', kwargs={'course_id': unicode(course_key)}),
'get_sale_records_url': reverse('get_sale_records', kwargs={'course_id': unicode(course_key)}),
'get_sale_order_records_url': reverse('get_sale_order_records', kwargs={'course_id': unicode(course_key)}),
'instructor_url': reverse('instructor_dashboard', kwargs={'course_id': unicode(course_key)}),
'get_registration_code_csv_url': reverse('get_registration_codes', kwargs={'course_id': unicode(course_key)}),
'generate_registration_code_csv_url': reverse('generate_registration_codes', kwargs={'course_id': unicode(course_key)}),
'active_registration_code_csv_url': reverse('active_registration_codes', kwargs={'course_id': unicode(course_key)}),
'spent_registration_code_csv_url': reverse('spent_registration_codes', kwargs={'course_id': unicode(course_key)}),
'set_course_mode_url': reverse('set_course_mode_price', kwargs={'course_id': unicode(course_key)}),
'download_coupon_codes_url': reverse('get_coupon_codes', kwargs={'course_id': unicode(course_key)}),
'enrollment_report_url': reverse('get_enrollment_report', kwargs={'course_id': unicode(course_key)}),
'exec_summary_report_url': reverse('get_exec_summary_report', kwargs={'course_id': unicode(course_key)}),
'list_financial_report_downloads_url': reverse('list_financial_report_downloads',
kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'look_up_registration_code': reverse('look_up_registration_code', kwargs={'course_id': unicode(course_key)}),
'coupons': coupons,
'sales_admin': access['sales_admin'],
'coupons_enabled': coupons_enabled,
'reports_enabled': reports_enabled,
'course_price': course_price,
'total_amount': total_amount
}
return section_data
def _section_special_exams(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'special_exams',
'section_display_name': _('Special Exams'),
'access': access,
'course_id': unicode(course_key)
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = None
html_cert_enabled = certs_api.has_html_certificates_enabled(course.id, course)
if html_cert_enabled:
can_enable_for_course = True
else:
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
certificate_statuses_with_count = {
certificate['status']: certificate['count']
for certificate in GeneratedCertificate.get_unique_statuses(course_key=course.id)
}
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'instructor_generation_enabled': instructor_generation_enabled,
'html_cert_enabled': html_cert_enabled,
'active_certificate': certs_api.get_active_web_certificate(course),
'certificate_statuses_with_count': certificate_statuses_with_count,
'status': CertificateStatuses,
'certificate_generation_history': CertificateGenerationHistory.objects.filter(course_id=course.id),
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_regeneration': reverse(
'start_certificate_regeneration',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=course_honor_mode[0].min_price, currency=course_honor_mode[0].currency,
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'start_date': get_default_time_display(course.start),
'end_date': get_default_time_display(course.end) or _('No end date set'),
'num_sections': len(course.children),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if settings.ANALYTICS_DASHBOARD_URL:
dashboard_link = _get_dashboard_link(course_key)
message = _("Enrollment data is now available in {dashboard_link}.").format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse('gitlogs_detail', kwargs={'course_id': unicode(course_key)})
try:
sorted_cutoffs = sorted(course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True)
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, sorted_cutoffs, "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
# section_data['offline_grades'] = offline_grades_available(course_key)
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access, is_white_label):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'is_white_label': is_white_label,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': unicode(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': unicode(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': unicode(course_key)}),
'discussion_topics_url': reverse('cohort_discussion_topics', kwargs={'course_key_string': unicode(course_key)}),
}
return section_data
def _is_small_course(course_key):
""" Compares against MAX_ENROLLMENT_INSTR_BUTTONS to determine if course enrollment is considered small. """
is_small_course = False
enrollment_count = CourseEnrollment.objects.num_enrolled_in(course_key)
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
is_small_course = enrollment_count <= max_enrollment_for_buttons
return is_small_course
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = _is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': unicode(course_key)}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': unicode(course_key)}),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': unicode(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(course_key)}),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unicode(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)}),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
show_proctored_report_button = (
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and
course.enable_proctored_exams
)
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': show_proctored_report_button,
'get_problem_responses_url': reverse('get_problem_responses', kwargs={'course_id': unicode(course_key)}),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': unicode(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': unicode(course_key)}),
'get_issued_certificates_url': reverse(
'get_issued_certificates', kwargs={'course_id': unicode(course_key)}
),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': unicode(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': unicode(course_key)}),
'list_proctored_results_url': reverse('get_proctored_exam_results', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
'course_has_survey': True if course.course_survey_name else False,
'course_survey_results_url': reverse('get_course_survey_results', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlDescriptor for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlDescriptor is only being used to generate a nice text editor.
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': unicode(course_key)}),
'editor': email_editor,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': unicode(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link = u"<a href=\"{0}\" target=\"_blank\">{1}</a>".format(analytics_dashboard_url,
settings.ANALYTICS_DASHBOARD_NAME)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
insights_message = _("For analytics about your course, go to {analytics_dashboard_name}.")
insights_message = insights_message.format(
analytics_dashboard_name=u'{0}{1}</a>'.format(link_start, settings.ANALYTICS_DASHBOARD_NAME)
)
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'insights_message': insights_message,
}
return section_data
def _section_metrics(course, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'metrics',
'section_display_name': _('Metrics'),
'access': access,
'course_id': unicode(course_key),
'sub_section_display_name': get_section_display_name(course_key),
'section_has_problem': get_array_section_has_problem(course_key),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
return section_data
|
kursitet/edx-platform
|
lms/djangoapps/instructor/views/instructor_dashboard.py
|
Python
|
agpl-3.0
| 30,777
|
[
"VisIt"
] |
942adb0a05994363fa30932d39c0cfd921ad343eeb8018b58e17336dcbfed1b8
|
import sys
sys.path.insert(1, "../../../")
import h2o, tests
import random
def cv_carsGLM():
# read in the dataset and construct training set (and validation set)
cars = h2o.import_file(path=h2o.locate("smalldata/junit/cars_20mpg.csv"))
# choose the type model-building exercise (multinomial classification or regression). 0:regression, 1:binomial,
# 2:poisson
problem = random.sample(range(3),1)[0]
# pick the predictors and response column, along with the correct family
predictors = ["displacement","power","weight","acceleration","year"]
if problem == 1 :
response_col = "economy_20mpg"
family = "binomial"
cars[response_col] = cars[response_col].asfactor()
elif problem == 2 :
family = "poisson"
response_col = "cylinders"
else :
family = "gaussian"
response_col = "economy"
print "Distribution: {0}".format(family)
print "Response column: {0}".format(response_col)
## cross-validation
# 1. check that cv metrics are the same over repeated "Modulo" runs
nfolds = random.randint(3,10)
glm1 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Modulo")
glm2 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Modulo")
tests.check_models(glm1, glm2, True)
# 2. check that cv metrics are different over repeated "Random" runs
nfolds = random.randint(3,10)
glm1 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Random")
glm2 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Random")
try:
tests.check_models(glm1, glm2, True)
assert False, "Expected models to be different over repeated Random runs"
except AssertionError:
assert True
# 3. folds_column
num_folds = random.randint(2,5)
fold_assignments = h2o.H2OFrame(python_obj=[[random.randint(0,num_folds-1)] for f in range(cars.nrow)])
fold_assignments.setNames(["fold_assignments"])
cars = cars.cbind(fold_assignments)
glm = h2o.glm(y=cars[response_col], x=cars[predictors], training_frame=cars, family=family,
fold_column="fold_assignments", keep_cross_validation_predictions=True)
num_cv_models = len(glm._model_json['output']['cross_validation_models'])
assert num_cv_models==num_folds, "Expected {0} cross-validation models, but got " \
"{1}".format(num_folds, num_cv_models)
cv_model1 = h2o.get_model(glm._model_json['output']['cross_validation_models'][0]['name'])
cv_model2 = h2o.get_model(glm._model_json['output']['cross_validation_models'][1]['name'])
assert isinstance(cv_model1, type(glm)), "Expected cross-validation model to be the same model type as the " \
"constructed model, but got {0} and {1}".format(type(cv_model1),type(glm))
assert isinstance(cv_model2, type(glm)), "Expected cross-validation model to be the same model type as the " \
"constructed model, but got {0} and {1}".format(type(cv_model2),type(glm))
# 4. keep_cross_validation_predictions
cv_predictions = glm1._model_json['output']['cross_validation_predictions']
assert cv_predictions is None, "Expected cross-validation predictions to be None, but got {0}".format(cv_predictions)
cv_predictions = glm._model_json['output']['cross_validation_predictions']
assert len(cv_predictions)==num_folds, "Expected the same number of cross-validation predictions " \
"as folds, but got {0}".format(len(cv_predictions))
# # 5. manually construct models
# fold1 = cars[cars["fold_assignments"]==0]
# fold2 = cars[cars["fold_assignments"]==1]
# manual_model1 = h2o.glm(y=fold2[response_col],
# x=fold2[predictors],
# validation_y=fold1[response_col],
# validation_x=fold1[predictors],
# family=family)
# manual_model2 = h2o.glm(y=fold1[response_col],
# x=fold1[predictors],
# validation_y=fold2[response_col],
# validation_x=fold2[predictors],
# family=family)
## boundary cases
# 1. nfolds = number of observations (leave-one-out cross-validation)
# TODO: PUBDEV-1776
#glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=cars.nrow, family=family,
# fold_assignment="Modulo")
# 2. nfolds = 0
glm1 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=0, family=family)
# check that this is equivalent to no nfolds
glm2 = h2o.glm(y=cars[response_col], x=cars[predictors], family=family)
tests.check_models(glm1, glm2)
# 3. cross-validation and regular validation attempted
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=random.randint(3,10), validation_y=cars[response_col],
validation_x=cars[predictors], family=family)
## error cases
# 1. nfolds == 1 or < 0
try:
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=random.sample([-1,1], 1)[0],
family=family)
assert False, "Expected model-build to fail when nfolds is 1 or < 0"
except EnvironmentError:
assert True
# 2. more folds than observations
try:
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=cars.nrow+1, family=family,
fold_assignment="Modulo")
assert False, "Expected model-build to fail when nfolds > nobs"
except EnvironmentError:
assert True
# 3. fold_column and nfolds both specified
try:
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=3, fold_column="fold_assignments",
family=family, training_frame=cars)
assert False, "Expected model-build to fail when fold_column and nfolds both specified"
except EnvironmentError:
assert True
# # 4. fold_column and fold_assignment both specified
# try:
# glm = h2o.glm(y=cars[response_col], x=cars[predictors], fold_assignment="Random", fold_column="fold_assignments",
# family=family, training_frame=cars)
# assert False, "Expected model-build to fail when fold_column and fold_assignment both specified"
# except EnvironmentError:
# assert True
if __name__ == "__main__":
tests.run_test(sys.argv, cv_carsGLM)
|
tarasane/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_cv_carsGLM.py
|
Python
|
apache-2.0
| 6,743
|
[
"Gaussian"
] |
d946f308171169573b44ebb0281ca5ccc5290dbfb4f99d91c5c1d1ab58164ee0
|
#!/usr/bin/env python2.7
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
#
r"""
mx is a command line tool for managing the development of Java code organized as suites of projects.
Version 1.x supports a single suite of projects.
Full documentation can be found at https://wiki.openjdk.java.net/display/Graal/The+mx+Tool
"""
import sys, os, errno, time, subprocess, shlex, types, StringIO, zipfile, signal, xml.sax.saxutils, tempfile, fnmatch, platform
import multiprocessing
import textwrap
import socket
import tarfile
import hashlib
import xml.parsers.expat
import shutil, re, xml.dom.minidom
import pipes
import difflib
from collections import Callable, OrderedDict
from threading import Thread
from argparse import ArgumentParser, REMAINDER
from os.path import join, basename, dirname, exists, getmtime, isabs, expandvars, isdir, isfile
_projects = dict()
_libs = dict()
_jreLibs = dict()
_dists = dict()
_suites = dict()
_annotationProcessors = None
_primary_suite_path = None
_primary_suite = None
_opts = None
_java_homes = None
_warn = False
"""
A distribution is a jar or zip file containing the output from one or more Java projects.
"""
class Distribution:
def __init__(self, suite, name, path, sourcesPath, deps, mainClass, excludedDependencies, distDependencies, javaCompliance):
self.suite = suite
self.name = name
self.path = path.replace('/', os.sep)
self.path = _make_absolute(self.path, suite.dir)
self.sourcesPath = _make_absolute(sourcesPath.replace('/', os.sep), suite.dir) if sourcesPath else None
self.deps = deps
self.update_listeners = set()
self.mainClass = mainClass
self.excludedDependencies = excludedDependencies
self.distDependencies = distDependencies
self.javaCompliance = JavaCompliance(javaCompliance) if javaCompliance else None
def sorted_deps(self, includeLibs=False, transitive=False):
deps = []
if transitive:
for depDist in [distribution(name) for name in self.distDependencies]:
for d in depDist.sorted_deps(includeLibs=includeLibs, transitive=True):
if d not in deps:
deps.append(d)
try:
excl = [dependency(d) for d in self.excludedDependencies]
except SystemExit as e:
abort('invalid excluded dependency for {} distribution: {}'.format(self.name, e))
return deps + [d for d in sorted_deps(self.deps, includeLibs=includeLibs) if d not in excl]
def __str__(self):
return self.name
def add_update_listener(self, listener):
self.update_listeners.add(listener)
"""
Gets the directory in which the IDE project configuration
for this distribution is generated. If this is a distribution
derived from a project defining an annotation processor, then
None is return to indicate no IDE configuration should be
created for this distribution.
"""
def get_ide_project_dir(self):
if hasattr(self, 'definingProject') and self.definingProject.definedAnnotationProcessorsDist == self:
return None
if hasattr(self, 'subDir'):
return join(self.suite.dir, self.subDir, self.name + '.dist')
else:
return join(self.suite.dir, self.name + '.dist')
def make_archive(self):
# are sources combined into main archive?
unified = self.path == self.sourcesPath
with Archiver(self.path) as arc, Archiver(None if unified else self.sourcesPath) as srcArcRaw:
srcArc = arc if unified else srcArcRaw
services = {}
def overwriteCheck(zf, arcname, source):
if not hasattr(zf, '_provenance'):
zf._provenance = {}
existingSource = zf._provenance.get(arcname, None)
isOverwrite = False
if existingSource and existingSource != source:
if arcname[-1] != os.path.sep:
logv('warning: ' + self.path + ': avoid overwrite of ' + arcname + '\n new: ' + source + '\n old: ' + existingSource)
isOverwrite = True
zf._provenance[arcname] = source
return isOverwrite
if self.mainClass:
manifest = "Manifest-Version: 1.0\nMain-Class: %s\n\n" % (self.mainClass)
if not overwriteCheck(arc.zf, "META-INF/MANIFEST.MF", "project files"):
arc.zf.writestr("META-INF/MANIFEST.MF", manifest)
for dep in self.sorted_deps(includeLibs=True):
isCoveredByDependecy = False
for d in self.distDependencies:
if dep in _dists[d].sorted_deps(includeLibs=True, transitive=True):
logv("Excluding {0} from {1} because it's provided by the dependency {2}".format(dep.name, self.path, d))
isCoveredByDependecy = True
break
if isCoveredByDependecy:
continue
if dep.isLibrary():
l = dep
# merge library jar into distribution jar
logv('[' + self.path + ': adding library ' + l.name + ']')
lpath = l.get_path(resolve=True)
libSourcePath = l.get_source_path(resolve=True)
if lpath:
with zipfile.ZipFile(lpath, 'r') as lp:
for arcname in lp.namelist():
if arcname.startswith('META-INF/services/') and not arcname == 'META-INF/services/':
service = arcname[len('META-INF/services/'):]
assert '/' not in service
services.setdefault(service, []).extend(lp.read(arcname).splitlines())
else:
if not overwriteCheck(arc.zf, arcname, lpath + '!' + arcname):
arc.zf.writestr(arcname, lp.read(arcname))
if srcArc.zf and libSourcePath:
with zipfile.ZipFile(libSourcePath, 'r') as lp:
for arcname in lp.namelist():
if not overwriteCheck(srcArc.zf, arcname, lpath + '!' + arcname):
srcArc.zf.writestr(arcname, lp.read(arcname))
elif dep.isProject():
p = dep
if self.javaCompliance:
if p.javaCompliance > self.javaCompliance:
abort("Compliance level doesn't match: Distribution {0} requires {1}, but {2} is {3}.".format(self.name, self.javaCompliance, p.name, p.javaCompliance))
# skip a Java project if its Java compliance level is "higher" than the configured JDK
jdk = java(p.javaCompliance)
assert jdk
logv('[' + self.path + ': adding project ' + p.name + ']')
outputDir = p.output_dir()
for root, _, files in os.walk(outputDir):
relpath = root[len(outputDir) + 1:]
if relpath == join('META-INF', 'services'):
for service in files:
with open(join(root, service), 'r') as fp:
services.setdefault(service, []).extend([provider.strip() for provider in fp.readlines()])
elif relpath == join('META-INF', 'providers'):
for provider in files:
with open(join(root, provider), 'r') as fp:
for service in fp:
services.setdefault(service.strip(), []).append(provider)
else:
for f in files:
arcname = join(relpath, f).replace(os.sep, '/')
if not overwriteCheck(arc.zf, arcname, join(root, f)):
arc.zf.write(join(root, f), arcname)
if srcArc.zf:
sourceDirs = p.source_dirs()
if p.source_gen_dir():
sourceDirs.append(p.source_gen_dir())
for srcDir in sourceDirs:
for root, _, files in os.walk(srcDir):
relpath = root[len(srcDir) + 1:]
for f in files:
if f.endswith('.java'):
arcname = join(relpath, f).replace(os.sep, '/')
if not overwriteCheck(srcArc.zf, arcname, join(root, f)):
srcArc.zf.write(join(root, f), arcname)
for service, providers in services.iteritems():
arcname = 'META-INF/services/' + service
arc.zf.writestr(arcname, '\n'.join(providers))
self.notify_updated()
def notify_updated(self):
for l in self.update_listeners:
l(self)
"""
A dependency is a library or project specified in a suite.
"""
class Dependency:
def __init__(self, suite, name):
self.name = name
self.suite = suite
def __str__(self):
return self.name
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return self.name != other.name
def __hash__(self):
return hash(self.name)
def isLibrary(self):
return isinstance(self, Library)
def isJreLibrary(self):
return isinstance(self, JreLibrary)
def isProject(self):
return isinstance(self, Project)
class Project(Dependency):
def __init__(self, suite, name, srcDirs, deps, javaCompliance, workingSets, d):
Dependency.__init__(self, suite, name)
self.srcDirs = srcDirs
self.deps = deps
self.checkstyleProj = name
self.javaCompliance = JavaCompliance(javaCompliance) if javaCompliance is not None else None
self.native = False
self.workingSets = workingSets
self.dir = d
# The annotation processors defined by this project
self.definedAnnotationProcessors = None
self.definedAnnotationProcessorsDist = None
# Verify that a JDK exists for this project if its compliance level is
# less than the compliance level of the default JDK
jdk = java(self.javaCompliance)
if jdk is None and self.javaCompliance < java().javaCompliance:
abort('Cannot find ' + str(self.javaCompliance) + ' JDK required by ' + name + '. ' +
'Specify it with --extra-java-homes option or EXTRA_JAVA_HOMES environment variable.')
# Create directories for projects that don't yet exist
if not exists(d):
os.mkdir(d)
for s in self.source_dirs():
if not exists(s):
os.mkdir(s)
def all_deps(self, deps, includeLibs, includeSelf=True, includeJreLibs=False, includeAnnotationProcessors=False):
"""
Add the transitive set of dependencies for this project, including
libraries if 'includeLibs' is true, to the 'deps' list.
"""
return self._all_deps_helper(deps, [], includeLibs, includeSelf, includeJreLibs, includeAnnotationProcessors)
def _all_deps_helper(self, deps, dependants, includeLibs, includeSelf=True, includeJreLibs=False, includeAnnotationProcessors=False):
if self in dependants:
abort(str(self) + 'Project dependency cycle found:\n ' +
'\n |\n V\n '.join(map(str, dependants[dependants.index(self):])) +
'\n |\n V\n ' + self.name)
childDeps = list(self.deps)
if includeAnnotationProcessors and len(self.annotation_processors()) > 0:
childDeps = self.annotation_processors() + childDeps
if self in deps:
return deps
for name in childDeps:
assert name != self.name
dep = dependency(name)
if not dep in deps:
if dep.isProject():
dep._all_deps_helper(deps, dependants + [self], includeLibs=includeLibs, includeJreLibs=includeJreLibs, includeAnnotationProcessors=includeAnnotationProcessors)
elif dep.isProject or (dep.isLibrary() and includeLibs) or (dep.isJreLibrary() and includeJreLibs):
dep.all_deps(deps, includeLibs=includeLibs, includeJreLibs=includeJreLibs, includeAnnotationProcessors=includeAnnotationProcessors)
if not self in deps and includeSelf:
deps.append(self)
return deps
def _compute_max_dep_distances(self, name, distances, dist):
currentDist = distances.get(name)
if currentDist is None or currentDist < dist:
distances[name] = dist
p = project(name, False)
if p is not None:
for dep in p.deps:
self._compute_max_dep_distances(dep, distances, dist + 1)
def canonical_deps(self):
"""
Get the dependencies of this project that are not recursive (i.e. cannot be reached
via other dependencies).
"""
distances = dict()
result = set()
self._compute_max_dep_distances(self.name, distances, 0)
for n, d in distances.iteritems():
assert d > 0 or n == self.name
if d == 1:
result.add(n)
if len(result) == len(self.deps) and frozenset(self.deps) == result:
return self.deps
return result
def max_depth(self):
"""
Get the maximum canonical distance between this project and its most distant dependency.
"""
distances = dict()
self._compute_max_dep_distances(self.name, distances, 0)
return max(distances.values())
def source_dirs(self):
"""
Get the directories in which the sources of this project are found.
"""
return [join(self.dir, s) for s in self.srcDirs]
def source_gen_dir(self):
"""
Get the directory in which source files generated by the annotation processor are found/placed.
"""
if self.native:
return None
return join(self.dir, 'src_gen')
def output_dir(self):
"""
Get the directory in which the class files of this project are found/placed.
"""
if self.native:
return None
return join(self.dir, 'bin')
def jasmin_output_dir(self):
"""
Get the directory in which the Jasmin assembled class files of this project are found/placed.
"""
if self.native:
return None
return join(self.dir, 'jasmin_classes')
def append_to_classpath(self, cp, resolve):
if not self.native:
cp.append(self.output_dir())
def find_classes_with_matching_source_line(self, pkgRoot, function, includeInnerClasses=False):
"""
Scan the sources of this project for Java source files containing a line for which
'function' returns true. A map from class name to source file path for each existing class
corresponding to a matched source file is returned.
"""
result = dict()
pkgDecl = re.compile(r"^package\s+([a-zA-Z_][\w\.]*)\s*;$")
for srcDir in self.source_dirs():
outputDir = self.output_dir()
for root, _, files in os.walk(srcDir):
for name in files:
if name.endswith('.java') and name != 'package-info.java':
matchFound = False
source = join(root, name)
with open(source) as f:
pkg = None
for line in f:
if line.startswith("package "):
match = pkgDecl.match(line)
if match:
pkg = match.group(1)
if function(line.strip()):
matchFound = True
if pkg and matchFound:
break
if matchFound:
simpleClassName = name[:-len('.java')]
assert pkg is not None
if pkgRoot is None or pkg.startswith(pkgRoot):
pkgOutputDir = join(outputDir, pkg.replace('.', os.path.sep))
if exists(pkgOutputDir):
for e in os.listdir(pkgOutputDir):
if includeInnerClasses:
if e.endswith('.class') and (e.startswith(simpleClassName) or e.startswith(simpleClassName + '$')):
className = pkg + '.' + e[:-len('.class')]
result[className] = source
elif e == simpleClassName + '.class':
className = pkg + '.' + simpleClassName
result[className] = source
return result
def _init_packages_and_imports(self):
if not hasattr(self, '_defined_java_packages'):
packages = set()
extendedPackages = set()
depPackages = set()
for d in self.all_deps([], includeLibs=False, includeSelf=False):
depPackages.update(d.defined_java_packages())
imports = set()
importRe = re.compile(r'import\s+(?:static\s+)?([^;]+);')
for sourceDir in self.source_dirs():
for root, _, files in os.walk(sourceDir):
javaSources = [name for name in files if name.endswith('.java')]
if len(javaSources) != 0:
pkg = root[len(sourceDir) + 1:].replace(os.sep, '.')
if not pkg in depPackages:
packages.add(pkg)
else:
# A project extends a package already defined by one of it dependencies
extendedPackages.add(pkg)
imports.add(pkg)
for n in javaSources:
with open(join(root, n)) as fp:
content = fp.read()
imports.update(importRe.findall(content))
self._defined_java_packages = frozenset(packages)
self._extended_java_packages = frozenset(extendedPackages)
importedPackages = set()
for imp in imports:
name = imp
while not name in depPackages and len(name) > 0:
lastDot = name.rfind('.')
if lastDot == -1:
name = None
break
name = name[0:lastDot]
if name is not None:
importedPackages.add(name)
self._imported_java_packages = frozenset(importedPackages)
def defined_java_packages(self):
"""Get the immutable set of Java packages defined by the Java sources of this project"""
self._init_packages_and_imports()
return self._defined_java_packages
def extended_java_packages(self):
"""Get the immutable set of Java packages extended by the Java sources of this project"""
self._init_packages_and_imports()
return self._extended_java_packages
def imported_java_packages(self):
"""Get the immutable set of Java packages defined by other Java projects that are
imported by the Java sources of this project."""
self._init_packages_and_imports()
return self._imported_java_packages
"""
Gets the list of projects defining the annotation processors that will be applied
when compiling this project. This includes the projects declared by the annotationProcessors property
of this project and any of its project dependencies. It also includes
any project dependencies that define an annotation processors.
"""
def annotation_processors(self):
if not hasattr(self, '_annotationProcessors'):
aps = set()
if hasattr(self, '_declaredAnnotationProcessors'):
aps = set(self._declaredAnnotationProcessors)
for ap in aps:
if project(ap).definedAnnotationProcessorsDist is None:
config = join(project(ap).source_dirs()[0], 'META-INF', 'services', 'javax.annotation.processing.Processor')
if not exists(config):
TimeStampFile(config).touch()
abort('Project ' + ap + ' declared in annotationProcessors property of ' + self.name + ' does not define any annotation processors.\n' +
'Please specify the annotation processors in ' + config)
allDeps = self.all_deps([], includeLibs=False, includeSelf=False, includeAnnotationProcessors=False)
for p in allDeps:
# Add an annotation processor dependency
if p.definedAnnotationProcessorsDist is not None:
aps.add(p.name)
# Inherit annotation processors from dependencies
aps.update(p.annotation_processors())
self._annotationProcessors = list(aps)
return self._annotationProcessors
"""
Gets the class path composed of the distribution jars containing the
annotation processors that will be applied when compiling this project.
"""
def annotation_processors_path(self):
aps = [project(ap) for ap in self.annotation_processors()]
if len(aps):
return os.pathsep.join([ap.definedAnnotationProcessorsDist.path for ap in aps if ap.definedAnnotationProcessorsDist])
return None
def update_current_annotation_processors_file(self):
aps = self.annotation_processors()
outOfDate = False
currentApsFile = join(self.suite.mxDir, 'currentAnnotationProcessors', self.name)
currentApsFileExists = exists(currentApsFile)
if currentApsFileExists:
with open(currentApsFile) as fp:
currentAps = [l.strip() for l in fp.readlines()]
if currentAps != aps:
outOfDate = True
if outOfDate or not currentApsFileExists:
if not exists(dirname(currentApsFile)):
os.mkdir(dirname(currentApsFile))
with open(currentApsFile, 'w') as fp:
for ap in aps:
print >> fp, ap
return outOfDate
def make_archive(self, path=None):
outputDir = self.output_dir()
if not path:
path = join(self.dir, self.name + '.jar')
with Archiver(path) as arc:
for root, _, files in os.walk(outputDir):
for f in files:
relpath = root[len(outputDir) + 1:]
arcname = join(relpath, f).replace(os.sep, '/')
arc.zf.write(join(root, f), arcname)
return path
def _make_absolute(path, prefix):
"""
Makes 'path' absolute if it isn't already by prefixing 'prefix'
"""
if not isabs(path):
return join(prefix, path)
return path
def sha1OfFile(path):
with open(path, 'rb') as f:
d = hashlib.sha1()
while True:
buf = f.read(4096)
if not buf:
break
d.update(buf)
return d.hexdigest()
def download_file_with_sha1(name, path, urls, sha1, sha1path, resolve, mustExist, sources=False, canSymlink=True):
def _download_lib():
cacheDir = get_env('MX_CACHE_DIR', join(_opts.user_home, '.mx', 'cache'))
if not exists(cacheDir):
os.makedirs(cacheDir)
base = basename(path)
cachePath = join(cacheDir, base + '_' + sha1)
if not exists(cachePath) or sha1OfFile(cachePath) != sha1:
if exists(cachePath):
log('SHA1 of ' + cachePath + ' does not match expected value (' + sha1 + ') - re-downloading')
print 'Downloading ' + ("sources " if sources else "") + name + ' from ' + str(urls)
download(cachePath, urls)
d = dirname(path)
if d != '' and not exists(d):
os.makedirs(d)
if canSymlink and 'symlink' in dir(os):
if exists(path):
os.unlink(path)
os.symlink(cachePath, path)
else:
shutil.copy(cachePath, path)
def _sha1Cached():
with open(sha1path, 'r') as f:
return f.read()[0:40]
def _writeSha1Cached():
with open(sha1path, 'w') as f:
f.write(sha1OfFile(path))
if resolve and mustExist and not exists(path):
assert not len(urls) == 0, 'cannot find required library ' + name + ' ' + path
_download_lib()
if exists(path):
if sha1 and not exists(sha1path):
_writeSha1Cached()
if sha1 and sha1 != _sha1Cached():
_download_lib()
if sha1 != sha1OfFile(path):
abort("SHA1 does not match for " + name + ". Broken download? SHA1 not updated in projects file?")
_writeSha1Cached()
return path
class BaseLibrary(Dependency):
def __init__(self, suite, name, optional):
Dependency.__init__(self, suite, name)
self.optional = optional
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
"""
A library that will be provided by the JRE but may be absent.
Any project or normal library that depends on a missing library
will be removed from the global project and library dictionaries
(i.e., _projects and _libs).
This mechanism exists primarily to be able to support code
that may use functionality in one JRE (e.g., Oracle JRE)
that is not present in another JRE (e.g., OpenJDK). A
motivating example is the Java Flight Recorder library
found in the Oracle JRE.
"""
class JreLibrary(BaseLibrary):
def __init__(self, suite, name, jar, optional):
BaseLibrary.__init__(self, suite, name, optional)
self.jar = jar
def __eq__(self, other):
if isinstance(other, JreLibrary):
return self.jar == other.jar
else:
return NotImplemented
def is_present_in_jdk(self, jdk):
return jdk.containsJar(self.jar)
def all_deps(self, deps, includeLibs, includeSelf=True, includeJreLibs=False, includeAnnotationProcessors=False):
"""
Add the transitive set of dependencies for this JRE library to the 'deps' list.
"""
if includeJreLibs and includeSelf and not self in deps:
deps.append(self)
return deps
class Library(BaseLibrary):
def __init__(self, suite, name, path, optional, urls, sha1, sourcePath, sourceUrls, sourceSha1, deps):
BaseLibrary.__init__(self, suite, name, optional)
self.path = path.replace('/', os.sep)
self.urls = urls
self.sha1 = sha1
self.sourcePath = sourcePath
self.sourceUrls = sourceUrls
if sourcePath == path:
assert sourceSha1 is None or sourceSha1 == sha1
sourceSha1 = sha1
self.sourceSha1 = sourceSha1
self.deps = deps
abspath = _make_absolute(path, self.suite.dir)
if not optional and not exists(abspath):
if not len(urls):
abort('Non-optional library {} must either exist at {} or specify one or more URLs from which it can be retrieved'.format(name, abspath))
def _checkSha1PropertyCondition(propName, cond, inputPath):
if not cond:
absInputPath = _make_absolute(inputPath, self.suite.dir)
if exists(absInputPath):
abort('Missing "{}" property for library {}. Add the following line to projects file:\nlibrary@{}@{}={}'.format(propName, name, name, propName, sha1OfFile(absInputPath)))
abort('Missing "{}" property for library {}'.format(propName, name))
_checkSha1PropertyCondition('sha1', sha1, path)
_checkSha1PropertyCondition('sourceSha1', not sourcePath or sourceSha1, sourcePath)
for url in urls:
if url.endswith('/') != self.path.endswith(os.sep):
abort('Path for dependency directory must have a URL ending with "/": path=' + self.path + ' url=' + url)
def __eq__(self, other):
if isinstance(other, Library):
if len(self.urls) == 0:
return self.path == other.path
else:
return self.urls == other.urls
else:
return NotImplemented
def get_path(self, resolve):
path = _make_absolute(self.path, self.suite.dir)
sha1path = path + '.sha1'
includedInJDK = getattr(self, 'includedInJDK', None)
if includedInJDK and java().javaCompliance >= JavaCompliance(includedInJDK):
return None
bootClassPathAgent = getattr(self, 'bootClassPathAgent').lower() == 'true' if hasattr(self, 'bootClassPathAgent') else False
return download_file_with_sha1(self.name, path, self.urls, self.sha1, sha1path, resolve, not self.optional, canSymlink=not bootClassPathAgent)
def get_source_path(self, resolve):
if self.sourcePath is None:
return None
path = _make_absolute(self.sourcePath, self.suite.dir)
sha1path = path + '.sha1'
return download_file_with_sha1(self.name, path, self.sourceUrls, self.sourceSha1, sha1path, resolve, len(self.sourceUrls) != 0, sources=True)
def append_to_classpath(self, cp, resolve):
path = self.get_path(resolve)
if path and (exists(path) or not resolve):
cp.append(path)
def all_deps(self, deps, includeLibs, includeSelf=True, includeJreLibs=False, includeAnnotationProcessors=False):
"""
Add the transitive set of dependencies for this library to the 'deps' list.
"""
if not includeLibs:
return deps
childDeps = list(self.deps)
if self in deps:
return deps
for name in childDeps:
assert name != self.name
dep = library(name)
if not dep in deps:
dep.all_deps(deps, includeLibs=includeLibs, includeJreLibs=includeJreLibs, includeAnnotationProcessors=includeAnnotationProcessors)
if not self in deps and includeSelf:
deps.append(self)
return deps
class HgConfig:
"""
Encapsulates access to Mercurial (hg)
"""
def __init__(self):
self.missing = 'no hg executable found'
self.has_hg = None
def check(self, abortOnFail=True):
if self.has_hg is None:
try:
subprocess.check_output(['hg'])
self.has_hg = True
except OSError:
self.has_hg = False
warn(self.missing)
if not self.has_hg:
if abortOnFail:
abort(self.missing)
else:
warn(self.missing)
def tip(self, sDir, abortOnError=True):
try:
return subprocess.check_output(['hg', 'tip', '-R', sDir, '--template', '{node}'])
except OSError:
warn(self.missing)
except subprocess.CalledProcessError:
if abortOnError:
abort('failed to get tip revision id')
else:
return None
def isDirty(self, sDir, abortOnError=True):
try:
return len(subprocess.check_output(['hg', 'status', '-R', sDir])) > 0
except OSError:
warn(self.missing)
except subprocess.CalledProcessError:
if abortOnError:
abort('failed to get status')
else:
return None
# TODO: remove this function once all repos have transitioned
# to the new project format
def _read_projects_file(projectsFile):
suite = OrderedDict()
suite['projects'] = OrderedDict()
suite['libraries'] = OrderedDict()
suite['jrelibraries'] = OrderedDict()
suite['distributions'] = OrderedDict()
with open(projectsFile) as f:
prefix = ''
lineNum = 0
def error(message):
abort(projectsFile + ':' + str(lineNum) + ': ' + message)
for line in f:
lineNum = lineNum + 1
line = line.strip()
if line.endswith('\\'):
prefix = prefix + line[:-1]
continue
if len(prefix) != 0:
line = prefix + line
prefix = ''
if len(line) != 0 and line[0] != '#':
if '=' not in line:
error('non-comment line does not contain an "=" character')
key, value = line.split('=', 1)
parts = key.split('@')
if len(parts) == 1:
if parts[0] == 'suite':
suite['name'] = value
elif parts[0] == 'mxversion':
suite['mxversion'] = value
else:
error('Single part property must be "suite": ' + key)
continue
if len(parts) != 3:
error('Property name does not have 3 parts separated by "@": ' + key)
kind, name, attr = parts
if kind == 'project':
m = suite['projects']
elif kind == 'library':
m = suite['libraries']
elif kind == 'jrelibrary':
m = suite['jrelibraries']
elif kind == 'distribution':
m = suite['distributions']
else:
error('Property name does not start with "project@", "library@" or "distribution@": ' + key)
attrs = m.get(name)
if attrs is None:
attrs = OrderedDict()
m[name] = attrs
attrs[attr] = value
return suite
def _load_suite_dict(mxDir):
suffix = 1
suite = None
dictName = 'suite'
def expand(value, context):
if isinstance(value, types.DictionaryType):
for n, v in value.iteritems():
value[n] = expand(v, context + [n])
elif isinstance(value, types.ListType):
for i in range(len(value)):
value[i] = expand(value[i], context + [str(i)])
else:
if not isinstance(value, types.StringTypes):
abort('value of ' + '.'.join(context) + ' is of unexpected type ' + str(type(value)))
value = expandvars(value)
if '$' in value or '%' in value:
abort('value of ' + '.'.join(context) + ' contains an undefined environment variable: ' + value)
return value
moduleName = 'suite'
modulePath = join(mxDir, moduleName + '.py')
while exists(modulePath):
savedModule = sys.modules.get(moduleName)
if savedModule:
warn(modulePath + ' conflicts with ' + savedModule.__file__)
# temporarily extend the Python path
sys.path.insert(0, mxDir)
snapshot = frozenset(sys.modules.viewkeys())
module = __import__(moduleName)
if savedModule:
# restore the old module into the module name space
sys.modules[moduleName] = savedModule
else:
# remove moduleName from the module name space
sys.modules.pop(moduleName)
# For now fail fast if extra modules were loaded.
# This can later be relaxed to simply remove the extra modules
# from the sys.modules name space if necessary.
extraModules = sys.modules.viewkeys() - snapshot
assert len(extraModules) == 0, 'loading ' + modulePath + ' caused extra modules to be loaded: ' + ', '.join([m for m in extraModules])
# revert the Python path
del sys.path[0]
if not hasattr(module, dictName):
abort(modulePath + ' must define a variable named "' + dictName + '"')
d = expand(getattr(module, dictName), [dictName])
sections = ['projects', 'libraries', 'jrelibraries', 'distributions'] + (['distribution_extensions'] if suite else ['name', 'mxversion'])
unknown = d.viewkeys() - sections
if unknown:
abort(modulePath + ' defines unsupported suite sections: ' + ', '.join(unknown))
if suite is None:
suite = d
else:
for s in sections:
existing = suite.get(s)
additional = d.get(s)
if additional:
if not existing:
suite[s] = additional
else:
conflicting = additional.viewkeys() & existing.viewkeys()
if conflicting:
abort(modulePath + ' redefines: ' + ', '.join(conflicting))
existing.update(additional)
distExtensions = d.get('distribution_extensions')
if distExtensions:
existing = suite['distributions']
for n, attrs in distExtensions.iteritems():
original = existing.get(n)
if not original:
abort('cannot extend non-existing distribution ' + n)
for k, v in attrs.iteritems():
if k != 'dependencies':
abort('Only the dependencies of distribution ' + n + ' can be extended')
if not isinstance(v, types.ListType):
abort('distribution_extensions.' + n + '.dependencies must be a list')
original['dependencies'] += v
dictName = 'extra'
moduleName = 'suite' + str(suffix)
modulePath = join(mxDir, moduleName + '.py')
deprecatedModulePath = join(mxDir, 'projects' + str(suffix) + '.py')
if exists(deprecatedModulePath):
abort('Please rename ' + deprecatedModulePath + ' to ' + modulePath)
suffix = suffix + 1
return suite, modulePath
class Suite:
def __init__(self, mxDir, primary, load=True):
self.dir = dirname(mxDir)
self.mxDir = mxDir
self.projects = []
self.libs = []
self.jreLibs = []
self.dists = []
self.commands = None
self.primary = primary
self.requiredMxVersion = None
self.name = _suitename(mxDir) # validated in _load_projects
if load:
# just check that there are no imports
self._load_imports()
self._load_env()
self._load_commands()
_suites[self.name] = self
def __str__(self):
return self.name
def _load_projects(self):
suitePyFile = join(self.mxDir, 'suite.py')
if not exists(suitePyFile):
return
suiteDict, _ = _load_suite_dict(self.mxDir)
if suiteDict.get('name') is not None and suiteDict.get('name') != self.name:
abort('suite name in project file does not match ' + _suitename(self.mxDir))
if suiteDict.has_key('mxversion'):
try:
self.requiredMxVersion = VersionSpec(suiteDict['mxversion'])
except AssertionError as ae:
abort('Exception while parsing "mxversion" in project file: ' + str(ae))
libsMap = suiteDict['libraries']
jreLibsMap = suiteDict['jrelibraries']
projsMap = suiteDict['projects']
distsMap = suiteDict['distributions']
def pop_list(attrs, name, context):
v = attrs.pop(name, None)
if not v:
return []
if not isinstance(v, list):
abort('Attribute "' + name + '" for ' + context + ' must be a list')
return v
for name, attrs in projsMap.iteritems():
context = 'project ' + name
srcDirs = pop_list(attrs, 'sourceDirs', context)
deps = pop_list(attrs, 'dependencies', context)
ap = pop_list(attrs, 'annotationProcessors', context)
javaCompliance = attrs.pop('javaCompliance', None)
subDir = attrs.pop('subDir', None)
if subDir is None:
d = join(self.dir, name)
else:
d = join(self.dir, subDir, name)
workingSets = attrs.pop('workingSets', None)
p = Project(self, name, srcDirs, deps, javaCompliance, workingSets, d)
p.checkstyleProj = attrs.pop('checkstyle', name)
p.native = attrs.pop('native', '') == 'true'
if not p.native and p.javaCompliance is None:
abort('javaCompliance property required for non-native project ' + name)
if len(ap) > 0:
p._declaredAnnotationProcessors = ap
p.__dict__.update(attrs)
self.projects.append(p)
for name, attrs in jreLibsMap.iteritems():
jar = attrs.pop('jar')
# JRE libraries are optional by default
optional = attrs.pop('optional', 'true') != 'false'
l = JreLibrary(self, name, jar, optional)
self.jreLibs.append(l)
for name, attrs in libsMap.iteritems():
context = 'library ' + name
if "|" in name:
if name.count('|') != 2:
abort("Format error in library name: " + name + "\nsyntax: libname|os-platform|architecture")
name, platform, architecture = name.split("|")
if platform != get_os() or architecture != get_arch():
continue
path = attrs.pop('path')
urls = pop_list(attrs, 'urls', context)
sha1 = attrs.pop('sha1', None)
sourcePath = attrs.pop('sourcePath', None)
sourceUrls = pop_list(attrs, 'sourceUrls', context)
sourceSha1 = attrs.pop('sourceSha1', None)
deps = pop_list(attrs, 'dependencies', context)
# Add support optional libraries once we have a good use case
optional = False
l = Library(self, name, path, optional, urls, sha1, sourcePath, sourceUrls, sourceSha1, deps)
l.__dict__.update(attrs)
self.libs.append(l)
for name, attrs in distsMap.iteritems():
context = 'distribution ' + name
path = attrs.pop('path')
sourcesPath = attrs.pop('sourcesPath', None)
deps = pop_list(attrs, 'dependencies', context)
mainClass = attrs.pop('mainClass', None)
exclDeps = pop_list(attrs, 'exclude', context)
distDeps = pop_list(attrs, 'distDependencies', context)
javaCompliance = attrs.pop('javaCompliance', None)
d = Distribution(self, name, path, sourcesPath, deps, mainClass, exclDeps, distDeps, javaCompliance)
d.__dict__.update(attrs)
self.dists.append(d)
# Create a distribution for each project that defines annotation processors
for p in self.projects:
annotationProcessors = None
for srcDir in p.source_dirs():
configFile = join(srcDir, 'META-INF', 'services', 'javax.annotation.processing.Processor')
if exists(configFile):
with open(configFile) as fp:
annotationProcessors = [ap.strip() for ap in fp]
if len(annotationProcessors) != 0:
for ap in annotationProcessors:
if not ap.startswith(p.name):
abort(ap + ' in ' + configFile + ' does not start with ' + p.name)
if annotationProcessors:
dname = p.name.replace('.', '_').upper()
apDir = join(p.dir, 'ap')
path = join(apDir, p.name + '.jar')
sourcesPath = None
deps = [p.name]
mainClass = None
exclDeps = []
distDeps = []
javaCompliance = None
d = Distribution(self, dname, path, sourcesPath, deps, mainClass, exclDeps, distDeps, javaCompliance)
d.subDir = os.path.relpath(os.path.dirname(p.dir), self.dir)
self.dists.append(d)
p.definedAnnotationProcessors = annotationProcessors
p.definedAnnotationProcessorsDist = d
d.definingProject = p
# Restrict exported annotation processors to those explicitly defined by the project
def _refineAnnotationProcessorServiceConfig(dist):
aps = dist.definingProject.definedAnnotationProcessors
apsJar = dist.path
config = 'META-INF/services/javax.annotation.processing.Processor'
with zipfile.ZipFile(apsJar, 'r') as zf:
currentAps = zf.read(config).split()
if currentAps != aps:
logv('[updating ' + config + ' in ' + apsJar + ']')
with Archiver(apsJar) as arc, zipfile.ZipFile(apsJar, 'r') as lp:
for arcname in lp.namelist():
if arcname == config:
arc.zf.writestr(arcname, '\n'.join(aps))
else:
arc.zf.writestr(arcname, lp.read(arcname))
d.add_update_listener(_refineAnnotationProcessorServiceConfig)
self.dists.append(d)
if self.name is None:
abort('Missing "suite=<name>" in ' + suitePyFile)
def _commands_name(self):
return 'mx_' + self.name.replace('-', '_')
def _find_commands(self, name):
commandsPath = join(self.mxDir, name + '.py')
if exists(commandsPath):
return name
else:
return None
def _load_commands(self):
commandsName = self._find_commands(self._commands_name())
if commandsName is None:
# backwards compatibility
commandsName = self._find_commands('commands')
if commandsName is not None:
if commandsName in sys.modules:
abort(commandsName + '.py in suite ' + self.name + ' duplicates ' + sys.modules[commandsName].__file__)
# temporarily extend the Python path
sys.path.insert(0, self.mxDir)
mod = __import__(commandsName)
self.commands = sys.modules.pop(commandsName)
sys.modules[commandsName] = self.commands
# revert the Python path
del sys.path[0]
if not hasattr(mod, 'mx_init'):
abort(commandsName + '.py in suite ' + self.name + ' must define an mx_init(suite) function')
if hasattr(mod, 'mx_post_parse_cmd_line'):
self.mx_post_parse_cmd_line = mod.mx_post_parse_cmd_line
mod.mx_init(self)
self.commands = mod
def _load_imports(self):
if exists(join(self.mxDir, 'imports')):
abort('multiple suites are not supported in this version of mx')
def _load_env(self):
e = join(self.mxDir, 'env')
if exists(e):
with open(e) as f:
lineNum = 0
for line in f:
lineNum = lineNum + 1
line = line.strip()
if len(line) != 0 and line[0] != '#':
if not '=' in line:
abort(e + ':' + str(lineNum) + ': line does not match pattern "key=value"')
key, value = line.split('=', 1)
os.environ[key.strip()] = expandvars_in_property(value.strip())
def _post_init(self, opts):
self._load_projects()
if self.requiredMxVersion is None:
warn("This suite does not express any required mx version. Consider adding 'mxversion=<version>' to your projects file.")
elif self.requiredMxVersion > version:
abort("This suite requires mx version " + str(self.requiredMxVersion) + " while your current mx version is " + str(version) + ". Please update mx.")
# set the global data structures, checking for conflicts unless _check_global_structures is False
for p in self.projects:
existing = _projects.get(p.name)
if existing is not None:
abort('cannot override project ' + p.name + ' in ' + p.dir + " with project of the same name in " + existing.dir)
if not p.name in _opts.ignored_projects:
_projects[p.name] = p
for l in self.libs:
existing = _libs.get(l.name)
# Check that suites that define same library are consistent
if existing is not None and existing != l:
abort('inconsistent library redefinition of ' + l.name + ' in ' + existing.suite.dir + ' and ' + l.suite.dir)
_libs[l.name] = l
for l in self.jreLibs:
existing = _jreLibs.get(l.name)
# Check that suites that define same library are consistent
if existing is not None and existing != l:
abort('inconsistent JRE library redefinition of ' + l.name + ' in ' + existing.suite.dir + ' and ' + l.suite.dir)
_jreLibs[l.name] = l
for d in self.dists:
existing = _dists.get(d.name)
if existing is not None:
# allow redefinition, so use path from existing
# abort('cannot redefine distribution ' + d.name)
warn('distribution ' + d.name + ' redefined')
d.path = existing.path
_dists[d.name] = d
# Remove projects and libraries that (recursively) depend on an optional library
# whose artifact does not exist or on a JRE library that is not present in the
# JDK for a project. Also remove projects whose Java compliance requirement
# cannot be satisfied by the configured JDKs.
#
# Removed projects and libraries are also removed from
# distributions in they are listed as dependencies.
for d in sorted_deps(includeLibs=True):
if d.isLibrary():
if d.optional:
try:
d.optional = False
path = d.get_path(resolve=True)
except SystemExit:
path = None
finally:
d.optional = True
if not path:
logv('[omitting optional library {} as {} does not exist]'.format(d, d.path))
del _libs[d.name]
self.libs.remove(d)
elif d.isProject():
if java(d.javaCompliance) is None:
logv('[omitting project {} as Java compliance {} cannot be satisfied by configured JDKs]'.format(d, d.javaCompliance))
del _projects[d.name]
self.projects.remove(d)
else:
for name in list(d.deps):
jreLib = _jreLibs.get(name)
if jreLib:
if not jreLib.is_present_in_jdk(java(d.javaCompliance)):
if jreLib.optional:
logv('[omitting project {} as dependency {} is missing]'.format(d, name))
del _projects[d.name]
self.projects.remove(d)
else:
abort('JRE library {} required by {} not found'.format(jreLib, d))
elif not dependency(name, fatalIfMissing=False):
logv('[omitting project {} as dependency {} is missing]'.format(d, name))
del _projects[d.name]
self.projects.remove(d)
for dist in _dists.itervalues():
for name in list(dist.deps):
if not dependency(name, fatalIfMissing=False):
logv('[omitting {} from distribution {}]'.format(name, dist))
dist.deps.remove(name)
if hasattr(self, 'mx_post_parse_cmd_line'):
self.mx_post_parse_cmd_line(opts)
class XMLElement(xml.dom.minidom.Element):
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write(indent + "<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
xml.dom.minidom._write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
if not self.ownerDocument.padTextNodeWithoutSiblings and len(self.childNodes) == 1 and isinstance(self.childNodes[0], xml.dom.minidom.Text):
# if the only child of an Element node is a Text node, then the
# text is printed without any indentation or new line padding
writer.write(">")
self.childNodes[0].writexml(writer)
writer.write("</%s>%s" % (self.tagName, newl))
else:
writer.write(">%s" % (newl))
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % (newl))
class XMLDoc(xml.dom.minidom.Document):
def __init__(self):
xml.dom.minidom.Document.__init__(self)
self.current = self
self.padTextNodeWithoutSiblings = False
def createElement(self, tagName):
# overwritten to create XMLElement
e = XMLElement(tagName)
e.ownerDocument = self
return e
def comment(self, txt):
self.current.appendChild(self.createComment(txt))
def open(self, tag, attributes=None, data=None):
if attributes is None:
attributes = {}
element = self.createElement(tag)
for key, value in attributes.items():
element.setAttribute(key, value)
self.current.appendChild(element)
self.current = element
if data is not None:
element.appendChild(self.createTextNode(data))
return self
def close(self, tag):
assert self.current != self
assert tag == self.current.tagName, str(tag) + ' != ' + self.current.tagName
self.current = self.current.parentNode
return self
def element(self, tag, attributes=None, data=None):
if attributes is None:
attributes = {}
return self.open(tag, attributes, data).close(tag)
def xml(self, indent='', newl='', escape=False, standalone=None):
assert self.current == self
result = self.toprettyxml(indent, newl, encoding="UTF-8")
if escape:
entities = {'"': """, "'": "'", '\n': ' '}
result = xml.sax.saxutils.escape(result, entities)
if standalone is not None:
result = result.replace('encoding="UTF-8"?>', 'encoding="UTF-8" standalone="' + str(standalone) + '"?>')
return result
def get_os():
"""
Get a canonical form of sys.platform.
"""
if sys.platform.startswith('darwin'):
return 'darwin'
elif sys.platform.startswith('linux'):
return 'linux'
elif sys.platform.startswith('sunos'):
return 'solaris'
elif sys.platform.startswith('win32'):
return 'windows'
elif sys.platform.startswith('cygwin'):
return 'cygwin'
else:
abort('Unknown operating system ' + sys.platform)
def _cygpathU2W(p):
"""
Translate a path from unix-style to windows-style.
This method has no effects on other platforms than cygwin.
"""
if p is None or get_os() != "cygwin":
return p
return subprocess.check_output(['cygpath', '-w', p]).strip()
def _cygpathW2U(p):
"""
Translate a path from windows-style to unix-style.
This method has no effects on other platforms than cygwin.
"""
if p is None or get_os() != "cygwin":
return p
return subprocess.check_output(['cygpath', '-u', p]).strip()
def _separatedCygpathU2W(p):
"""
Translate a group of paths, separated by a path separator.
unix-style to windows-style.
This method has no effects on other platforms than cygwin.
"""
if p is None or p == "" or get_os() != "cygwin":
return p
return ';'.join(map(_cygpathU2W, p.split(os.pathsep)))
def _separatedCygpathW2U(p):
"""
Translate a group of paths, separated by a path separator.
windows-style to unix-style.
This method has no effects on other platforms than cygwin.
"""
if p is None or p == "" or get_os() != "cygwin":
return p
return os.pathsep.join(map(_cygpathW2U, p.split(';')))
def get_arch():
machine = platform.uname()[4]
if machine in ['amd64', 'AMD64', 'x86_64', 'i86pc']:
return 'amd64'
if machine in ['sun4v', 'sun4u']:
return 'sparcv9'
if machine == 'i386' and get_os() == 'darwin':
try:
# Support for Snow Leopard and earlier version of MacOSX
if subprocess.check_output(['sysctl', '-n', 'hw.cpu64bit_capable']).strip() == '1':
return 'amd64'
except OSError:
# sysctl is not available
pass
abort('unknown or unsupported architecture: os=' + get_os() + ', machine=' + machine)
def _loadSuite(mxDir, primary=False):
"""
Load a suite from 'mxDir'.
"""
for s in _suites.itervalues():
if s.mxDir == mxDir:
return s
# create the new suite
s = Suite(mxDir, primary)
return s
def suites(opt_limit_to_suite=False):
"""
Get the list of all loaded suites.
"""
return _suites.values()
def suite(name, fatalIfMissing=True):
"""
Get the suite for a given name.
"""
s = _suites.get(name)
if s is None and fatalIfMissing:
abort('suite named ' + name + ' not found')
return s
def projects_from_names(projectNames):
"""
Get the list of projects corresponding to projectNames; all projects if None
"""
if projectNames is None:
return projects()
else:
return [project(name) for name in projectNames]
def projects(opt_limit_to_suite=False):
"""
Get the list of all loaded projects limited by --suite option if opt_limit_to_suite == True
"""
sortedProjects = sorted(_projects.values(), key=lambda p: p.name)
if opt_limit_to_suite:
return _projects_opt_limit_to_suites(sortedProjects)
else:
return sortedProjects
def projects_opt_limit_to_suites():
"""
Get the list of all loaded projects optionally limited by --suite option
"""
return projects(True)
def _projects_opt_limit_to_suites(projects):
return projects
def annotation_processors():
"""
Get the list of all loaded projects that define an annotation processor.
"""
global _annotationProcessors
if _annotationProcessors is None:
aps = set()
for p in projects():
for ap in p.annotation_processors():
if project(ap, False):
aps.add(ap)
_annotationProcessors = list(aps)
return _annotationProcessors
def distribution(name, fatalIfMissing=True):
"""
Get the distribution for a given name. This will abort if the named distribution does
not exist and 'fatalIfMissing' is true.
"""
d = _dists.get(name)
if d is None and fatalIfMissing:
abort('distribution named ' + name + ' not found')
return d
def dependency(name, fatalIfMissing=True):
"""
Get the project or library for a given name. This will abort if a project or library does
not exist for 'name' and 'fatalIfMissing' is true.
"""
d = _projects.get(name)
if d is None:
d = _libs.get(name)
if d is None:
d = _jreLibs.get(name)
if d is None and fatalIfMissing:
if name in _opts.ignored_projects:
abort('project named ' + name + ' is ignored')
abort('project or library named ' + name + ' not found')
return d
def project(name, fatalIfMissing=True):
"""
Get the project for a given name. This will abort if the named project does
not exist and 'fatalIfMissing' is true.
"""
p = _projects.get(name)
if p is None and fatalIfMissing:
if name in _opts.ignored_projects:
abort('project named ' + name + ' is ignored')
abort('project named ' + name + ' not found')
return p
def library(name, fatalIfMissing=True):
"""
Gets the library for a given name. This will abort if the named library does
not exist and 'fatalIfMissing' is true.
"""
l = _libs.get(name)
if l is None and fatalIfMissing:
if _projects.get(name):
abort(name + ' is a project, not a library')
abort('library named ' + name + ' not found')
return l
def _as_classpath(deps, resolve):
cp = []
if _opts.cp_prefix is not None:
cp = [_opts.cp_prefix]
for d in deps:
d.append_to_classpath(cp, resolve)
if _opts.cp_suffix is not None:
cp += [_opts.cp_suffix]
return os.pathsep.join(cp)
def classpath(names=None, resolve=True, includeSelf=True, includeBootClasspath=False):
"""
Get the class path for a list of given dependencies and distributions, resolving each entry in the
path (e.g. downloading a missing library) if 'resolve' is true.
"""
if names is None:
deps = sorted_deps(includeLibs=True)
dists = list(_dists.values())
else:
deps = []
dists = []
if isinstance(names, types.StringTypes):
names = [names]
for n in names:
dep = dependency(n, fatalIfMissing=False)
if dep:
dep.all_deps(deps, True, includeSelf)
else:
dist = distribution(n)
if not dist:
abort('project, library or distribution named ' + n + ' not found')
dists.append(dist)
if len(dists):
distsDeps = set()
for d in dists:
distsDeps.update(d.sorted_deps())
# remove deps covered by a dist that will be on the class path
deps = [d for d in deps if d not in distsDeps]
result = _as_classpath(deps, resolve)
# prepend distributions
if len(dists):
distsCp = os.pathsep.join(dist.path for dist in dists)
if len(result):
result = distsCp + os.pathsep + result
else:
result = distsCp
if includeBootClasspath:
result = os.pathsep.join([java().bootclasspath(), result])
return result
def classpath_walk(names=None, resolve=True, includeSelf=True, includeBootClasspath=False):
"""
Walks the resources available in a given classpath, yielding a tuple for each resource
where the first member of the tuple is a directory path or ZipFile object for a
classpath entry and the second member is the qualified path of the resource relative
to the classpath entry.
"""
cp = classpath(names, resolve, includeSelf, includeBootClasspath)
for entry in cp.split(os.pathsep):
if not exists(entry):
continue
if isdir(entry):
for root, dirs, files in os.walk(entry):
for d in dirs:
entryPath = join(root[len(entry) + 1:], d)
yield entry, entryPath
for f in files:
entryPath = join(root[len(entry) + 1:], f)
yield entry, entryPath
elif entry.endswith('.jar') or entry.endswith('.zip'):
with zipfile.ZipFile(entry, 'r') as zf:
for zi in zf.infolist():
entryPath = zi.filename
yield zf, entryPath
def sorted_deps(projectNames=None, includeLibs=False, includeJreLibs=False, includeAnnotationProcessors=False):
"""
Gets projects and libraries sorted such that dependencies
are before the projects that depend on them. Unless 'includeLibs' is
true, libraries are omitted from the result.
"""
projects = projects_from_names(projectNames)
return sorted_project_deps(projects, includeLibs=includeLibs, includeJreLibs=includeJreLibs, includeAnnotationProcessors=includeAnnotationProcessors)
def sorted_dists():
"""
Gets distributions sorted such that each distribution comes after
any distributions it depends upon.
"""
dists = []
def add_dist(dist):
if not dist in dists:
for depDist in [distribution(name) for name in dist.distDependencies]:
add_dist(depDist)
if not dist in dists:
dists.append(dist)
for d in _dists.itervalues():
add_dist(d)
return dists
def sorted_project_deps(projects, includeLibs=False, includeJreLibs=False, includeAnnotationProcessors=False):
deps = []
for p in projects:
p.all_deps(deps, includeLibs=includeLibs, includeJreLibs=includeJreLibs, includeAnnotationProcessors=includeAnnotationProcessors)
return deps
def _handle_missing_java_home():
if not sys.stdout.isatty():
abort('Could not find bootstrap JDK. Use --java-home option or ensure JAVA_HOME environment variable is set.')
candidateJdks = []
if get_os() == 'darwin':
base = '/Library/Java/JavaVirtualMachines'
candidateJdks = [join(base, n, 'Contents/Home') for n in os.listdir(base) if exists(join(base, n, 'Contents/Home'))]
elif get_os() == 'linux':
base = '/usr/lib/jvm'
candidateJdks = [join(base, n) for n in os.listdir(base) if exists(join(base, n, 'jre/lib/rt.jar'))]
elif get_os() == 'solaris':
base = '/usr/jdk/instances'
candidateJdks = [join(base, n) for n in os.listdir(base) if exists(join(base, n, 'jre/lib/rt.jar'))]
elif get_os() == 'windows':
base = r'C:\Program Files\Java'
candidateJdks = [join(base, n) for n in os.listdir(base) if exists(join(base, n, r'jre\lib\rt.jar'))]
javaHome = None
if len(candidateJdks) != 0:
javaHome = select_items(candidateJdks + ['<other>'], allowMultiple=False)
if javaHome == '<other>':
javaHome = None
while javaHome is None:
javaHome = raw_input('Enter path of bootstrap JDK: ')
rtJarPath = join(javaHome, 'jre', 'lib', 'rt.jar')
if not exists(rtJarPath):
log('Does not appear to be a valid JDK as ' + rtJarPath + ' does not exist')
javaHome = None
else:
break
envPath = join(_primary_suite.mxDir, 'env')
if ask_yes_no('Persist this setting by adding "JAVA_HOME=' + javaHome + '" to ' + envPath, 'y'):
with open(envPath, 'a') as fp:
print >> fp, 'JAVA_HOME=' + javaHome
return javaHome
class ArgParser(ArgumentParser):
# Override parent to append the list of available commands
def format_help(self):
return ArgumentParser.format_help(self) + _format_commands()
def __init__(self):
self.java_initialized = False
# this doesn't resolve the right way, but can't figure out how to override _handle_conflict_resolve in _ActionsContainer
ArgumentParser.__init__(self, prog='mx', conflict_handler='resolve')
self.add_argument('-v', action='store_true', dest='verbose', help='enable verbose output')
self.add_argument('-V', action='store_true', dest='very_verbose', help='enable very verbose output')
self.add_argument('-w', action='store_true', dest='warn', help='enable warning messages')
self.add_argument('-p', '--primary-suite-path', help='set the primary suite directory', metavar='<path>')
self.add_argument('--dbg', type=int, dest='java_dbg_port', help='make Java processes wait on <port> for a debugger', metavar='<port>')
self.add_argument('-d', action='store_const', const=8000, dest='java_dbg_port', help='alias for "-dbg 8000"')
self.add_argument('--cp-pfx', dest='cp_prefix', help='class path prefix', metavar='<arg>')
self.add_argument('--cp-sfx', dest='cp_suffix', help='class path suffix', metavar='<arg>')
self.add_argument('--J', dest='java_args', help='Java VM arguments (e.g. --J @-dsa)', metavar='@<args>')
self.add_argument('--Jp', action='append', dest='java_args_pfx', help='prefix Java VM arguments (e.g. --Jp @-dsa)', metavar='@<args>', default=[])
self.add_argument('--Ja', action='append', dest='java_args_sfx', help='suffix Java VM arguments (e.g. --Ja @-dsa)', metavar='@<args>', default=[])
self.add_argument('--user-home', help='users home directory', metavar='<path>', default=os.path.expanduser('~'))
self.add_argument('--java-home', help='primary JDK directory (must be JDK 7 or later)', metavar='<path>')
self.add_argument('--extra-java-homes', help='secondary JDK directories separated by "' + os.pathsep + '"', metavar='<path>')
self.add_argument('--ignore-project', action='append', dest='ignored_projects', help='name of project to ignore', metavar='<name>', default=[])
self.add_argument('--kill-with-sigquit', action='store_true', dest='killwithsigquit', help='send sigquit first before killing child processes')
if get_os() != 'windows':
# Time outs are (currently) implemented with Unix specific functionality
self.add_argument('--timeout', help='timeout (in seconds) for command', type=int, default=0, metavar='<secs>')
self.add_argument('--ptimeout', help='timeout (in seconds) for subprocesses', type=int, default=0, metavar='<secs>')
def _parse_cmd_line(self, args=None):
if args is None:
args = sys.argv[1:]
self.add_argument('commandAndArgs', nargs=REMAINDER, metavar='command args...')
opts = self.parse_args()
# Give the timeout options a default value to avoid the need for hasattr() tests
opts.__dict__.setdefault('timeout', 0)
opts.__dict__.setdefault('ptimeout', 0)
if opts.very_verbose:
opts.verbose = True
if opts.java_home is None:
opts.java_home = os.environ.get('JAVA_HOME')
if opts.extra_java_homes is None:
opts.extra_java_homes = os.environ.get('EXTRA_JAVA_HOMES')
if opts.java_home is None or opts.java_home == '':
opts.java_home = _handle_missing_java_home()
if opts.user_home is None or opts.user_home == '':
abort('Could not find user home. Use --user-home option or ensure HOME environment variable is set.')
os.environ['JAVA_HOME'] = opts.java_home
os.environ['HOME'] = opts.user_home
opts.ignored_projects = opts.ignored_projects + os.environ.get('IGNORED_PROJECTS', '').split(',')
commandAndArgs = opts.__dict__.pop('commandAndArgs')
return opts, commandAndArgs
def _handle_conflict_resolve(self, action, conflicting_actions):
self._handle_conflict_error(action, conflicting_actions)
def _format_commands():
msg = '\navailable commands:\n\n'
for cmd in sorted(_commands.iterkeys()):
c, _ = _commands[cmd][:2]
doc = c.__doc__
if doc is None:
doc = ''
msg += ' {0:<20} {1}\n'.format(cmd, doc.split('\n', 1)[0])
return msg + '\n'
def java(requiredCompliance=None):
"""
Get a JavaConfig object containing Java commands launch details.
If requiredCompliance is None, the compliance level specified by --java-home/JAVA_HOME
is returned. Otherwise, the JavaConfig exactly matching requiredCompliance is returned
or None if there is no exact match.
"""
assert _java_homes
if not requiredCompliance:
return _java_homes[0]
for java in _java_homes:
if java.javaCompliance == requiredCompliance:
return java
return None
def run_java(args, nonZeroIsFatal=True, out=None, err=None, cwd=None, addDefaultArgs=True, javaConfig=None):
if not javaConfig:
javaConfig = java()
return run(javaConfig.format_cmd(args, addDefaultArgs), nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd)
def _kill_process_group(pid, sig):
if not sig:
sig = signal.SIGKILL
pgid = os.getpgid(pid)
try:
os.killpg(pgid, sig)
return True
except:
log('Error killing subprocess ' + str(pgid) + ': ' + str(sys.exc_info()[1]))
return False
def _waitWithTimeout(process, args, timeout):
def _waitpid(pid):
while True:
try:
return os.waitpid(pid, os.WNOHANG)
except OSError, e:
if e.errno == errno.EINTR:
continue
raise
def _returncode(status):
if os.WIFSIGNALED(status):
return -os.WTERMSIG(status)
elif os.WIFEXITED(status):
return os.WEXITSTATUS(status)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
end = time.time() + timeout
delay = 0.0005
while True:
(pid, status) = _waitpid(process.pid)
if pid == process.pid:
return _returncode(status)
remaining = end - time.time()
if remaining <= 0:
abort('Process timed out after {0} seconds: {1}'.format(timeout, ' '.join(args)))
delay = min(delay * 2, remaining, .05)
time.sleep(delay)
# Makes the current subprocess accessible to the abort() function
# This is a list of tuples of the subprocess.Popen or
# multiprocessing.Process object and args.
_currentSubprocesses = []
def _addSubprocess(p, args):
entry = (p, args)
_currentSubprocesses.append(entry)
return entry
def _removeSubprocess(entry):
if entry and entry in _currentSubprocesses:
try:
_currentSubprocesses.remove(entry)
except:
pass
def waitOn(p):
if get_os() == 'windows':
# on windows use a poll loop, otherwise signal does not get handled
retcode = None
while retcode == None:
retcode = p.poll()
time.sleep(0.05)
else:
retcode = p.wait()
return retcode
def run(args, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, env=None):
"""
Run a command in a subprocess, wait for it to complete and return the exit status of the process.
If the exit status is non-zero and `nonZeroIsFatal` is true, then mx is exited with
the same exit status.
Each line of the standard output and error streams of the subprocess are redirected to
out and err if they are callable objects.
"""
assert isinstance(args, types.ListType), "'args' must be a list: " + str(args)
for arg in args:
assert isinstance(arg, types.StringTypes), 'argument is not a string: ' + str(arg)
if env is None:
env = os.environ
if _opts.verbose:
if _opts.very_verbose:
log('Environment variables:')
for key in sorted(env.keys()):
log(' ' + key + '=' + env[key])
log(' '.join(map(pipes.quote, args)))
if timeout is None and _opts.ptimeout != 0:
timeout = _opts.ptimeout
sub = None
try:
# On Unix, the new subprocess should be in a separate group so that a timeout alarm
# can use os.killpg() to kill the whole subprocess group
preexec_fn = None
creationflags = 0
if get_os() == 'windows':
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
else:
preexec_fn = os.setsid
def redirect(stream, f):
for line in iter(stream.readline, ''):
f(line)
stream.close()
stdout = out if not callable(out) else subprocess.PIPE
stderr = err if not callable(err) else subprocess.PIPE
p = subprocess.Popen(args, cwd=cwd, stdout=stdout, stderr=stderr, preexec_fn=preexec_fn, creationflags=creationflags, env=env)
sub = _addSubprocess(p, args)
joiners = []
if callable(out):
t = Thread(target=redirect, args=(p.stdout, out))
# Don't make the reader thread a daemon otherwise output can be droppped
t.start()
joiners.append(t)
if callable(err):
t = Thread(target=redirect, args=(p.stderr, err))
# Don't make the reader thread a daemon otherwise output can be droppped
t.start()
joiners.append(t)
while any([t.is_alive() for t in joiners]):
# Need to use timeout otherwise all signals (including CTRL-C) are blocked
# see: http://bugs.python.org/issue1167930
for t in joiners:
t.join(10)
if timeout is None or timeout == 0:
retcode = waitOn(p)
else:
if get_os() == 'windows':
abort('Use of timeout not (yet) supported on Windows')
retcode = _waitWithTimeout(p, args, timeout)
except OSError as e:
log('Error executing \'' + ' '.join(args) + '\': ' + str(e))
if _opts.verbose:
raise e
abort(e.errno)
except KeyboardInterrupt:
abort(1)
finally:
_removeSubprocess(sub)
if retcode and nonZeroIsFatal:
if _opts.verbose:
if _opts.very_verbose:
raise subprocess.CalledProcessError(retcode, ' '.join(args))
else:
log('[exit code: ' + str(retcode) + ']')
abort(retcode)
return retcode
def exe_suffix(name):
"""
Gets the platform specific suffix for an executable
"""
if get_os() == 'windows':
return name + '.exe'
return name
def add_lib_prefix(name):
"""
Adds the platform specific library prefix to a name
"""
os = get_os()
if os == 'linux' or os == 'solaris' or os == 'darwin':
return 'lib' + name
return name
def add_lib_suffix(name):
"""
Adds the platform specific library suffix to a name
"""
os = get_os()
if os == 'windows':
return name + '.dll'
if os == 'linux' or os == 'solaris':
return name + '.so'
if os == 'darwin':
return name + '.dylib'
return name
"""
Utility for filtering duplicate lines.
"""
class DuplicateSuppressingStream:
"""
Creates an object that will suppress duplicate lines sent to 'out'.
The lines considered for suppression are those that contain one of the
strings in 'restrictTo' if it is not None.
"""
def __init__(self, restrictTo=None, out=sys.stdout):
self.restrictTo = restrictTo
self.seen = set()
self.out = out
self.currentFilteredLineCount = 0
self.currentFilteredTime = None
def isSuppressionCandidate(self, line):
if self.restrictTo:
for p in self.restrictTo:
if p in line:
return True
return False
else:
return True
def write(self, line):
if self.isSuppressionCandidate(line):
if line in self.seen:
self.currentFilteredLineCount += 1
if self.currentFilteredTime:
if time.time() - self.currentFilteredTime > 1 * 60:
self.out.write(" Filtered " + str(self.currentFilteredLineCount) + " repeated lines...\n")
self.currentFilteredTime = time.time()
else:
self.currentFilteredTime = time.time()
return
self.seen.add(line)
self.currentFilteredLineCount = 0
self.out.write(line)
self.currentFilteredTime = None
"""
A JavaCompliance simplifies comparing Java compliance values extracted from a JDK version string.
"""
class JavaCompliance:
def __init__(self, ver):
m = re.match(r'1\.(\d+).*', ver)
assert m is not None, 'not a recognized version string: ' + ver
self.value = int(m.group(1))
def __str__(self):
return '1.' + str(self.value)
def __cmp__(self, other):
if isinstance(other, types.StringType):
other = JavaCompliance(other)
return cmp(self.value, other.value)
def __hash__(self):
return self.value.__hash__()
"""
A version specification as defined in JSR-56
"""
class VersionSpec:
def __init__(self, versionString):
validChar = r'[\x21-\x25\x27-\x29\x2c\x2f-\x5e\x60-\x7f]'
separator = r'[.\-_]'
m = re.match("^" + validChar + '+(' + separator + validChar + '+)*$', versionString)
assert m is not None, 'not a recognized version string: ' + versionString
self.versionString = versionString
self.parts = [int(f) if f.isdigit() else f for f in re.split(separator, versionString)]
def __str__(self):
return self.versionString
def __cmp__(self, other):
return cmp(self.parts, other.parts)
def _filter_non_existant_paths(paths):
return os.pathsep.join([path for path in _separatedCygpathW2U(paths).split(os.pathsep) if exists(path)])
"""
A JavaConfig object encapsulates info on how Java commands are run.
"""
class JavaConfig:
def __init__(self, java_home, java_dbg_port):
self.jdk = java_home
self.debug_port = java_dbg_port
self.jar = exe_suffix(join(self.jdk, 'bin', 'jar'))
self.java = exe_suffix(join(self.jdk, 'bin', 'java'))
self.javac = exe_suffix(join(self.jdk, 'bin', 'javac'))
self.javap = exe_suffix(join(self.jdk, 'bin', 'javap'))
self.javadoc = exe_suffix(join(self.jdk, 'bin', 'javadoc'))
self.pack200 = exe_suffix(join(self.jdk, 'bin', 'pack200'))
self.toolsjar = join(self.jdk, 'lib', 'tools.jar')
self._bootclasspath = None
self._extdirs = None
self._endorseddirs = None
if not exists(self.java):
abort('Java launcher does not exist: ' + self.java)
def delAtAndSplit(s):
return shlex.split(s.lstrip('@'))
self.java_args = delAtAndSplit(_opts.java_args) if _opts.java_args else []
self.java_args_pfx = sum(map(delAtAndSplit, _opts.java_args_pfx), [])
self.java_args_sfx = sum(map(delAtAndSplit, _opts.java_args_sfx), [])
# Prepend the -d64 VM option only if the java command supports it
try:
output = subprocess.check_output([self.java, '-d64', '-version'], stderr=subprocess.STDOUT)
self.java_args = ['-d64'] + self.java_args
except subprocess.CalledProcessError as e:
try:
output = subprocess.check_output([self.java, '-version'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print e.output
abort(e.returncode)
def _checkOutput(out):
return 'version' in out
# hotspot can print a warning, e.g. if there's a .hotspot_compiler file in the cwd
output = output.split('\n')
version = None
for o in output:
if _checkOutput(o):
assert version is None
version = o
self.version = VersionSpec(version.split()[2].strip('"'))
self.javaCompliance = JavaCompliance(self.version.versionString)
if self.debug_port is not None:
self.java_args += ['-Xdebug', '-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=' + str(self.debug_port)]
def _init_classpaths(self):
myDir = dirname(__file__)
outDir = join(dirname(__file__), '.jdk' + str(self.version))
if not exists(outDir):
os.makedirs(outDir)
javaSource = join(myDir, 'ClasspathDump.java')
if not exists(join(outDir, 'ClasspathDump.class')):
subprocess.check_call([self.javac, '-d', _cygpathU2W(outDir), _cygpathU2W(javaSource)], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self._bootclasspath, self._extdirs, self._endorseddirs = [x if x != 'null' else None for x in subprocess.check_output([self.java, '-cp', _separatedCygpathU2W(outDir), 'ClasspathDump'], stderr=subprocess.PIPE).split('|')]
if not self._bootclasspath or not self._extdirs or not self._endorseddirs:
warn("Could not find all classpaths: boot='" + str(self._bootclasspath) + "' extdirs='" + str(self._extdirs) + "' endorseddirs='" + str(self._endorseddirs) + "'")
self._bootclasspath = _filter_non_existant_paths(self._bootclasspath)
self._extdirs = _filter_non_existant_paths(self._extdirs)
self._endorseddirs = _filter_non_existant_paths(self._endorseddirs)
def __hash__(self):
return hash(self.jdk)
def __cmp__(self, other):
if isinstance(other, JavaConfig):
return cmp(self.javaCompliance, other.javaCompliance)
raise TypeError()
def format_cmd(self, args, addDefaultArgs):
if addDefaultArgs:
return [self.java] + self.processArgs(args)
else:
return [self.java] + args
def processArgs(self, args):
return self.java_args_pfx + self.java_args + self.java_args_sfx + args
def bootclasspath(self):
if self._bootclasspath is None:
self._init_classpaths()
return _separatedCygpathU2W(self._bootclasspath)
def extdirs(self):
if self._extdirs is None:
self._init_classpaths()
return _separatedCygpathU2W(self._extdirs)
def endorseddirs(self):
if self._endorseddirs is None:
self._init_classpaths()
return _separatedCygpathU2W(self._endorseddirs)
def containsJar(self, jar):
if self._bootclasspath is None:
self._init_classpaths()
for e in self._bootclasspath.split(os.pathsep):
if basename(e) == jar:
return True
for d in self._extdirs.split(os.pathsep):
if len(d) and jar in os.listdir(d):
return True
for d in self._endorseddirs.split(os.pathsep):
if len(d) and jar in os.listdir(d):
return True
return False
def check_get_env(key):
"""
Gets an environment variable, aborting with a useful message if it is not set.
"""
value = get_env(key)
if value is None:
abort('Required environment variable ' + key + ' must be set')
return value
def get_env(key, default=None):
"""
Gets an environment variable.
"""
value = os.environ.get(key, default)
return value
def logv(msg=None):
if _opts.verbose:
log(msg)
def log(msg=None):
"""
Write a message to the console.
All script output goes through this method thus allowing a subclass
to redirect it.
"""
if msg is None:
print
else:
print msg
def expand_project_in_class_path_arg(cpArg):
cp = []
for part in cpArg.split(os.pathsep):
if part.startswith('@'):
cp += classpath(part[1:]).split(os.pathsep)
else:
cp.append(part)
return os.pathsep.join(cp)
def expand_project_in_args(args):
for i in range(len(args)):
if args[i] == '-cp' or args[i] == '-classpath':
if i + 1 < len(args):
args[i + 1] = expand_project_in_class_path_arg(args[i + 1])
return
def gmake_cmd():
for a in ['make', 'gmake', 'gnumake']:
try:
output = subprocess.check_output([a, '--version'])
if 'GNU' in output:
return a
except:
pass
abort('Could not find a GNU make executable on the current path.')
def expandvars_in_property(value):
result = expandvars(value)
if '$' in result or '%' in result:
abort('Property contains an undefined environment variable: ' + value)
return result
def _send_sigquit():
for p, args in _currentSubprocesses:
def _isJava():
if args:
name = args[0].split(os.sep)[-1]
return name == "java"
return False
if p is not None and _isJava():
if get_os() == 'windows':
log("mx: implement me! want to send SIGQUIT to my child process")
else:
_kill_process_group(p.pid, sig=signal.SIGQUIT)
time.sleep(0.1)
def abort(codeOrMessage):
"""
Aborts the program with a SystemExit exception.
If 'codeOrMessage' is a plain integer, it specifies the system exit status;
if it is None, the exit status is zero; if it has another type (such as a string),
the object's value is printed and the exit status is one.
"""
if _opts.killwithsigquit:
_send_sigquit()
def is_alive(p):
if isinstance(p, subprocess.Popen):
return p.poll() is None
assert isinstance(p, multiprocessing.Process), p
return p.is_alive()
for p, args in _currentSubprocesses:
if is_alive(p):
try:
if get_os() == 'windows':
p.terminate()
else:
_kill_process_group(p.pid, signal.SIGKILL)
except BaseException as e:
if is_alive(p):
log('error while killing subprocess {} "{}": {}'.format(p.pid, ' '.join(args), e))
if _opts and _opts.verbose:
import traceback
traceback.print_stack()
raise SystemExit(codeOrMessage)
def download(path, urls, verbose=False):
"""
Attempts to downloads content for each URL in a list, stopping after the first successful download.
If the content cannot be retrieved from any URL, the program is aborted. The downloaded content
is written to the file indicated by 'path'.
"""
d = dirname(path)
if d != '' and not exists(d):
os.makedirs(d)
assert not path.endswith(os.sep)
myDir = dirname(__file__)
javaSource = join(myDir, 'URLConnectionDownload.java')
javaClass = join(myDir, 'URLConnectionDownload.class')
if not exists(javaClass) or getmtime(javaClass) < getmtime(javaSource):
subprocess.check_call([java().javac, '-d', _cygpathU2W(myDir), _cygpathU2W(javaSource)])
verbose = []
if sys.stderr.isatty():
verbose.append("-v")
if run([java().java, '-cp', _cygpathU2W(myDir), 'URLConnectionDownload', _cygpathU2W(path)] + verbose + urls, nonZeroIsFatal=False) == 0:
return
abort('Could not download to ' + path + ' from any of the following URLs:\n\n ' +
'\n '.join(urls) + '\n\nPlease use a web browser to do the download manually')
def update_file(path, content):
"""
Updates a file with some given content if the content differs from what's in
the file already. The return value indicates if the file was updated.
"""
existed = exists(path)
try:
old = None
if existed:
with open(path, 'rb') as f:
old = f.read()
if old == content:
return False
with open(path, 'wb') as f:
f.write(content)
log(('modified ' if existed else 'created ') + path)
return True
except IOError as e:
abort('Error while writing to ' + path + ': ' + str(e))
# Builtin commands
def _defaultEcjPath():
return get_env('JDT', join(_primary_suite.mxDir, 'ecj.jar'))
class JavaCompileTask:
def __init__(self, args, proj, reason, javafilelist, jdk, outputDir, jdtJar, deps):
self.proj = proj
self.reason = reason
self.javafilelist = javafilelist
self.deps = deps
self.jdk = jdk
self.outputDir = outputDir
self.done = False
self.jdtJar = jdtJar
self.args = args
def __str__(self):
return self.proj.name
def logCompilation(self, compiler):
log('Compiling Java sources for {} with {}... [{}]'.format(self.proj.name, compiler, self.reason))
def execute(self):
argfileName = join(self.proj.dir, 'javafilelist.txt')
argfile = open(argfileName, 'wb')
argfile.write('\n'.join(map(_cygpathU2W, self.javafilelist)))
argfile.close()
processorArgs = []
processorPath = self.proj.annotation_processors_path()
if processorPath:
genDir = self.proj.source_gen_dir()
if exists(genDir):
shutil.rmtree(genDir)
os.mkdir(genDir)
processorArgs += ['-processorpath', _separatedCygpathU2W(join(processorPath)), '-s', _cygpathU2W(genDir)]
else:
processorArgs += ['-proc:none']
args = self.args
jdk = self.jdk
outputDir = _cygpathU2W(self.outputDir)
compliance = str(jdk.javaCompliance)
cp = _separatedCygpathU2W(classpath(self.proj.name, includeSelf=True))
toBeDeleted = [argfileName]
try:
if not self.jdtJar:
mainJava = java()
if not args.error_prone:
javac = args.alt_javac if args.alt_javac else mainJava.javac
self.logCompilation('javac' if not args.alt_javac else args.alt_javac)
javacCmd = [javac, '-g', '-J-Xmx1g', '-J-Xms1g', '-source', compliance, '-target', compliance, '-classpath', cp, '-d', outputDir, '-bootclasspath', jdk.bootclasspath(), '-endorseddirs', jdk.endorseddirs(), '-extdirs', jdk.extdirs()]
if jdk.debug_port is not None:
javacCmd += ['-J-Xdebug', '-J-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=' + str(jdk.debug_port)]
javacCmd += processorArgs
javacCmd += ['@' + _cygpathU2W(argfile.name)]
if not args.warnAPI:
javacCmd.append('-XDignore.symbol.file')
run(javacCmd)
else:
self.logCompilation('javac (with error-prone)')
javaArgs = ['-Xmx1g', '-Xms1g']
javacArgs = ['-g', '-source', compliance, '-target', compliance, '-classpath', cp, '-d', outputDir, '-bootclasspath', jdk.bootclasspath(), '-endorseddirs', jdk.endorseddirs(), '-extdirs', jdk.extdirs()]
javacArgs += processorArgs
javacArgs += ['@' + argfile.name]
if not args.warnAPI:
javacArgs.append('-XDignore.symbol.file')
run_java(javaArgs + ['-cp', os.pathsep.join([mainJava.toolsjar, args.error_prone]), 'com.google.errorprone.ErrorProneCompiler'] + javacArgs)
else:
self.logCompilation('JDT')
jdtVmArgs = ['-Xmx1g', '-Xms1g', '-jar', _cygpathU2W(self.jdtJar)]
jdtArgs = ['-' + compliance,
'-cp', cp, '-g', '-enableJavadoc',
'-d', outputDir,
'-bootclasspath', jdk.bootclasspath(),
'-endorseddirs', jdk.endorseddirs(),
'-extdirs', jdk.extdirs()]
jdtArgs += processorArgs
jdtProperties = join(self.proj.dir, '.settings', 'org.eclipse.jdt.core.prefs')
rootJdtProperties = join(self.proj.suite.mxDir, 'eclipse-settings', 'org.eclipse.jdt.core.prefs')
if not exists(jdtProperties) or os.path.getmtime(jdtProperties) < os.path.getmtime(rootJdtProperties):
# Try to fix a missing properties file by running eclipseinit
_eclipseinit_project(self.proj)
if not exists(jdtProperties):
log('JDT properties file {0} not found'.format(jdtProperties))
else:
with open(jdtProperties) as fp:
origContent = fp.read()
content = origContent
if args.jdt_warning_as_error:
content = content.replace('=warning', '=error')
if not args.jdt_show_task_tags:
content = content + '\norg.eclipse.jdt.core.compiler.problem.tasks=ignore'
if origContent != content:
jdtPropertiesTmp = jdtProperties + '.tmp'
with open(jdtPropertiesTmp, 'w') as fp:
fp.write(content)
toBeDeleted.append(jdtPropertiesTmp)
jdtArgs += ['-properties', _cygpathU2W(jdtPropertiesTmp)]
else:
jdtArgs += ['-properties', _cygpathU2W(jdtProperties)]
jdtArgs.append('@' + _cygpathU2W(argfile.name))
run_java(jdtVmArgs + jdtArgs)
# Create annotation processor jar for a project that defines annotation processors
if self.proj.definedAnnotationProcessorsDist:
self.proj.definedAnnotationProcessorsDist.make_archive()
finally:
# Do not clean up temp files if verbose as there's
# a good chance the user wants to copy and paste the
# Java compiler command directly
if not _opts.verbose:
for n in toBeDeleted:
os.remove(n)
self.done = True
def build(args, parser=None):
"""compile the Java and C sources, linking the latter
Compile all the Java source code using the appropriate compilers
and linkers for the various source code types."""
suppliedParser = parser is not None
if not suppliedParser:
parser = ArgumentParser(prog='mx build')
parser = parser if parser is not None else ArgumentParser(prog='mx build')
parser.add_argument('-f', action='store_true', dest='force', help='force build (disables timestamp checking)')
parser.add_argument('-c', action='store_true', dest='clean', help='removes existing build output')
parser.add_argument('-p', action='store_true', dest='parallelize', help='parallelizes Java compilation')
parser.add_argument('--source', dest='compliance', help='Java compliance level for projects without an explicit one')
parser.add_argument('--Wapi', action='store_true', dest='warnAPI', help='show warnings about using internal APIs')
parser.add_argument('--projects', action='store', help='comma separated projects to build (omit to build all projects)')
parser.add_argument('--only', action='store', help='comma separated projects to build, without checking their dependencies (omit to build all projects)')
parser.add_argument('--no-java', action='store_false', dest='java', help='do not build Java projects')
parser.add_argument('--no-native', action='store_false', dest='native', help='do not build native projects')
parser.add_argument('--jdt-warning-as-error', action='store_true', help='convert all Eclipse batch compiler warnings to errors')
parser.add_argument('--jdt-show-task-tags', action='store_true', help='show task tags as Eclipse batch compiler warnings')
parser.add_argument('--alt-javac', dest='alt_javac', help='path to alternative javac executable', metavar='<path>')
compilerSelect = parser.add_mutually_exclusive_group()
compilerSelect.add_argument('--error-prone', dest='error_prone', help='path to error-prone.jar', metavar='<path>')
compilerSelect.add_argument('--jdt', help='path to ecj.jar, the Eclipse batch compiler', default=_defaultEcjPath(), metavar='<path>')
compilerSelect.add_argument('--force-javac', action='store_true', dest='javac', help='use javac whether ecj.jar is found or not')
if suppliedParser:
parser.add_argument('remainder', nargs=REMAINDER, metavar='...')
args = parser.parse_args(args)
jdtJar = None
if not args.javac and args.jdt is not None:
if not args.jdt.endswith('.jar'):
abort('Path for Eclipse batch compiler does not look like a jar file: ' + args.jdt)
jdtJar = args.jdt
if not exists(jdtJar):
if os.path.abspath(jdtJar) == os.path.abspath(_defaultEcjPath()) and get_env('JDT', None) is None:
# Silently ignore JDT if default location is used but does not exist
jdtJar = None
else:
abort('Eclipse batch compiler jar does not exist: ' + args.jdt)
if args.only is not None:
# N.B. This build will not include dependencies including annotation processor dependencies
sortedProjects = [project(name) for name in args.only.split(',')]
else:
if args.projects is not None:
projectNames = args.projects.split(',')
else:
projectNames = None
projects = _projects_opt_limit_to_suites(projects_from_names(projectNames))
# N.B. Limiting to a suite only affects the starting set of projects. Dependencies in other suites will still be compiled
sortedProjects = sorted_project_deps(projects, includeAnnotationProcessors=True)
if args.java:
ideinit([], refreshOnly=True, buildProcessorJars=False)
def prepareOutputDirs(p, clean):
outputDir = p.output_dir()
if exists(outputDir):
if clean:
log('Cleaning {0}...'.format(outputDir))
shutil.rmtree(outputDir)
os.mkdir(outputDir)
else:
os.mkdir(outputDir)
genDir = p.source_gen_dir()
if genDir != '' and exists(genDir) and clean:
log('Cleaning {0}...'.format(genDir))
for f in os.listdir(genDir):
shutil.rmtree(join(genDir, f))
return outputDir
tasks = {}
updatedAnnotationProcessorDists = set()
for p in sortedProjects:
if p.native:
if args.native:
log('Calling GNU make {0}...'.format(p.dir))
if args.clean:
run([gmake_cmd(), 'clean'], cwd=p.dir)
run([gmake_cmd()], cwd=p.dir)
continue
else:
if not args.java:
continue
if exists(join(p.dir, 'plugin.xml')): # eclipse plugin project
continue
# skip building this Java project if its Java compliance level is "higher" than the configured JDK
requiredCompliance = p.javaCompliance if p.javaCompliance else JavaCompliance(args.compliance) if args.compliance else None
jdk = java(requiredCompliance)
assert jdk
outputDir = prepareOutputDirs(p, args.clean)
sourceDirs = p.source_dirs()
buildReason = 'forced build' if args.force else None
taskDeps = []
for dep in p.all_deps([], includeLibs=False, includeAnnotationProcessors=True):
taskDep = tasks.get(dep.name)
if taskDep:
if not buildReason:
buildReason = dep.name + ' rebuilt'
taskDeps.append(taskDep)
jasminAvailable = None
javafilelist = []
for sourceDir in sourceDirs:
for root, _, files in os.walk(sourceDir):
javafiles = [join(root, name) for name in files if name.endswith('.java') and name != 'package-info.java']
javafilelist += javafiles
# Copy all non Java resources or assemble Jasmin files
nonjavafilelist = [join(root, name) for name in files if not name.endswith('.java')]
for src in nonjavafilelist:
if src.endswith('.jasm'):
className = None
with open(src) as f:
for line in f:
if line.startswith('.class '):
className = line.split()[-1]
break
if className is not None:
jasminOutputDir = p.jasmin_output_dir()
classFile = join(jasminOutputDir, className.replace('/', os.sep) + '.class')
if exists(dirname(classFile)) and (not exists(classFile) or os.path.getmtime(classFile) < os.path.getmtime(src)):
if jasminAvailable is None:
try:
with open(os.devnull) as devnull:
subprocess.call('jasmin', stdout=devnull, stderr=subprocess.STDOUT)
jasminAvailable = True
except OSError:
jasminAvailable = False
if jasminAvailable:
log('Assembling Jasmin file ' + src)
run(['jasmin', '-d', jasminOutputDir, src])
else:
log('The jasmin executable could not be found - skipping ' + src)
with file(classFile, 'a'):
os.utime(classFile, None)
else:
log('could not file .class directive in Jasmin source: ' + src)
else:
dst = join(outputDir, src[len(sourceDir) + 1:])
if not exists(dirname(dst)):
os.makedirs(dirname(dst))
if exists(dirname(dst)) and (not exists(dst) or os.path.getmtime(dst) < os.path.getmtime(src)):
shutil.copyfile(src, dst)
if not buildReason:
for javafile in javafiles:
classfile = TimeStampFile(outputDir + javafile[len(sourceDir):-len('java')] + 'class')
if not classfile.exists() or classfile.isOlderThan(javafile):
buildReason = 'class file(s) out of date'
break
apsOutOfDate = p.update_current_annotation_processors_file()
if apsOutOfDate:
buildReason = 'annotation processor(s) changed'
if not buildReason:
logv('[all class files for {0} are up to date - skipping]'.format(p.name))
continue
if len(javafilelist) == 0:
logv('[no Java sources for {0} - skipping]'.format(p.name))
continue
javafilelist = sorted(javafilelist)
task = JavaCompileTask(args, p, buildReason, javafilelist, jdk, outputDir, jdtJar, taskDeps)
if p.definedAnnotationProcessorsDist:
updatedAnnotationProcessorDists.add(p.definedAnnotationProcessorsDist)
tasks[p.name] = task
if args.parallelize:
# Best to initialize class paths on main process
jdk.bootclasspath()
task.proc = None
else:
task.execute()
if args.parallelize:
def joinTasks(tasks):
failed = []
for t in tasks:
t.proc.join()
_removeSubprocess(t.sub)
if t.proc.exitcode != 0:
failed.append(t)
return failed
def checkTasks(tasks):
active = []
for t in tasks:
if t.proc.is_alive():
active.append(t)
else:
if t.proc.exitcode != 0:
return ([], joinTasks(tasks))
return (active, [])
def remainingDepsDepth(task):
if task._d is None:
incompleteDeps = [d for d in task.deps if d.proc is None or d.proc.is_alive()]
if len(incompleteDeps) == 0:
task._d = 0
else:
task._d = max([remainingDepsDepth(t) for t in incompleteDeps]) + 1
return task._d
def compareTasks(t1, t2):
d = remainingDepsDepth(t1) - remainingDepsDepth(t2)
if d == 0:
t1Work = (1 + len(t1.proj.annotation_processors())) * len(t1.javafilelist)
t2Work = (1 + len(t2.proj.annotation_processors())) * len(t2.javafilelist)
d = t1Work - t2Work
return d
def sortWorklist(tasks):
for t in tasks:
t._d = None
return sorted(tasks, compareTasks)
cpus = multiprocessing.cpu_count()
worklist = sortWorklist(tasks.values())
active = []
failed = []
while len(worklist) != 0:
while True:
active, failed = checkTasks(active)
if len(failed) != 0:
assert not active, active
break
if len(active) == cpus:
# Sleep for 1 second
time.sleep(1)
else:
break
if len(failed) != 0:
break
def executeTask(task):
# Clear sub-process list cloned from parent process
del _currentSubprocesses[:]
task.execute()
def depsDone(task):
for d in task.deps:
if d.proc is None or d.proc.exitcode is None:
return False
return True
for task in worklist:
if depsDone(task):
worklist.remove(task)
task.proc = multiprocessing.Process(target=executeTask, args=(task,))
task.proc.start()
active.append(task)
task.sub = _addSubprocess(task.proc, ['JavaCompileTask', str(task)])
if len(active) == cpus:
break
worklist = sortWorklist(worklist)
failed += joinTasks(active)
if len(failed):
for t in failed:
log('Compiling {} failed'.format(t.proj.name))
abort('{} Java compilation tasks failed'.format(len(failed)))
if args.java:
for dist in sorted_dists():
if dist not in updatedAnnotationProcessorDists:
archive(['@' + dist.name])
if suppliedParser:
return args
return None
def _chunk_files_for_command_line(files, limit=None, pathFunction=None):
"""
Returns a generator for splitting up a list of files into chunks such that the
size of the space separated file paths in a chunk is less than a given limit.
This is used to work around system command line length limits.
"""
chunkSize = 0
chunkStart = 0
if limit is None:
commandLinePrefixAllowance = 3000
if get_os() == 'windows':
# The CreateProcess function on Windows limits the length of a command line to
# 32,768 characters (http://msdn.microsoft.com/en-us/library/ms682425%28VS.85%29.aspx)
limit = 32768 - commandLinePrefixAllowance
else:
# Using just SC_ARG_MAX without extra downwards adjustment
# results in "[Errno 7] Argument list too long" on MacOS.
syslimit = os.sysconf('SC_ARG_MAX') - 20000
limit = syslimit - commandLinePrefixAllowance
for i in range(len(files)):
path = files[i] if pathFunction is None else pathFunction(files[i])
size = len(path) + 1
if chunkSize + size < limit:
chunkSize += size
else:
assert i > chunkStart
yield files[chunkStart:i]
chunkStart = i
chunkSize = 0
if chunkStart == 0:
assert chunkSize < limit
yield files
def eclipseformat(args):
"""run the Eclipse Code Formatter on the Java sources
The exit code 1 denotes that at least one file was modified."""
parser = ArgumentParser(prog='mx eclipseformat')
parser.add_argument('-e', '--eclipse-exe', help='location of the Eclipse executable')
parser.add_argument('-C', '--no-backup', action='store_false', dest='backup', help='do not save backup of modified files')
parser.add_argument('--projects', action='store', help='comma separated projects to process (omit to process all projects)')
args = parser.parse_args(args)
if args.eclipse_exe is None:
args.eclipse_exe = os.environ.get('ECLIPSE_EXE')
if args.eclipse_exe is None:
abort('Could not find Eclipse executable. Use -e option or ensure ECLIPSE_EXE environment variable is set.')
# Maybe an Eclipse installation dir was specified - look for the executable in it
if isdir(args.eclipse_exe):
args.eclipse_exe = join(args.eclipse_exe, exe_suffix('eclipse'))
warn("The eclipse-exe was a directory, now using " + args.eclipse_exe)
if not os.path.isfile(args.eclipse_exe):
abort('File does not exist: ' + args.eclipse_exe)
if not os.access(args.eclipse_exe, os.X_OK):
abort('Not an executable file: ' + args.eclipse_exe)
eclipseinit([], buildProcessorJars=False)
# build list of projects to be processed
projects = sorted_deps()
if args.projects is not None:
projects = [project(name) for name in args.projects.split(',')]
class Batch:
def __init__(self, settingsDir, javaCompliance):
self.path = join(settingsDir, 'org.eclipse.jdt.core.prefs')
self.javaCompliance = javaCompliance
self.javafiles = list()
with open(join(settingsDir, 'org.eclipse.jdt.ui.prefs')) as fp:
jdtUiPrefs = fp.read()
self.removeTrailingWhitespace = 'sp_cleanup.remove_trailing_whitespaces_all=true' in jdtUiPrefs
if self.removeTrailingWhitespace:
assert 'sp_cleanup.remove_trailing_whitespaces=true' in jdtUiPrefs and 'sp_cleanup.remove_trailing_whitespaces_ignore_empty=false' in jdtUiPrefs
def settings(self):
with open(self.path) as fp:
return fp.read() + java(self.javaCompliance).java + str(self.removeTrailingWhitespace)
class FileInfo:
def __init__(self, path):
self.path = path
with open(path) as fp:
self.content = fp.read()
self.times = (os.path.getatime(path), os.path.getmtime(path))
def update(self, removeTrailingWhitespace):
with open(self.path) as fp:
content = fp.read()
if self.content != content:
# Only apply *after* formatting to match the order in which the IDE does it
if removeTrailingWhitespace:
content, n = re.subn(r'[ \t]+$', '', content, flags=re.MULTILINE)
if n != 0 and self.content == content:
# undo on-disk changes made by the Eclipse formatter
with open(self.path, 'w') as fp:
fp.write(content)
if self.content != content:
self.diff = difflib.unified_diff(self.content.splitlines(1), content.splitlines(1))
self.content = content
return True
# reset access and modification time of file
os.utime(self.path, self.times)
modified = list()
batches = dict() # all sources with the same formatting settings are formatted together
for p in projects:
if p.native:
continue
sourceDirs = p.source_dirs()
batch = Batch(join(p.dir, '.settings'), p.javaCompliance)
if not exists(batch.path):
if _opts.verbose:
log('[no Eclipse Code Formatter preferences at {0} - skipping]'.format(batch.path))
continue
for sourceDir in sourceDirs:
for root, _, files in os.walk(sourceDir):
for f in [join(root, name) for name in files if name.endswith('.java')]:
batch.javafiles.append(FileInfo(f))
if len(batch.javafiles) == 0:
logv('[no Java sources in {0} - skipping]'.format(p.name))
continue
res = batches.setdefault(batch.settings(), batch)
if res is not batch:
res.javafiles = res.javafiles + batch.javafiles
log("we have: " + str(len(batches)) + " batches")
for batch in batches.itervalues():
for chunk in _chunk_files_for_command_line(batch.javafiles, pathFunction=lambda f: f.path):
run([args.eclipse_exe,
'-nosplash',
'-application',
'org.eclipse.jdt.core.JavaCodeFormatter',
'-vm', java(batch.javaCompliance).java,
'-config', batch.path]
+ [f.path for f in chunk])
for fi in chunk:
if fi.update(batch.removeTrailingWhitespace):
modified.append(fi)
log('{0} files were modified'.format(len(modified)))
if len(modified) != 0:
arcbase = _primary_suite.dir
if args.backup:
backup = os.path.abspath('eclipseformat.backup.zip')
zf = zipfile.ZipFile(backup, 'w', zipfile.ZIP_DEFLATED)
for fi in modified:
name = os.path.relpath(fi.path, arcbase)
log(' - {0}'.format(name))
log('Changes:')
log(''.join(fi.diff))
if args.backup:
arcname = name.replace(os.sep, '/')
zf.writestr(arcname, fi.content)
if args.backup:
zf.close()
log('Wrote backup of {0} modified files to {1}'.format(len(modified), backup))
return 1
return 0
def processorjars():
for s in suites(True):
_processorjars_suite(s)
def _processorjars_suite(s):
projs = [p for p in s.projects if p.definedAnnotationProcessors is not None]
if len(projs) <= 0:
return []
pnames = [p.name for p in projs]
build(['--jdt-warning-as-error', '--projects', ",".join(pnames)])
return [p.definedAnnotationProcessorsDist.path for p in s.projects if p.definedAnnotationProcessorsDist is not None]
def pylint(args):
"""run pylint (if available) over Python source files (found by 'hg locate' or by tree walk with -walk)"""
parser = ArgumentParser(prog='mx pylint')
parser.add_argument('--walk', action='store_true', help='use tree walk find .py files')
args = parser.parse_args(args)
rcfile = join(dirname(__file__), '.pylintrc')
if not exists(rcfile):
log('pylint configuration file does not exist: ' + rcfile)
return
try:
output = subprocess.check_output(['pylint', '--version'], stderr=subprocess.STDOUT)
m = re.match(r'.*pylint (\d+)\.(\d+)\.(\d+).*', output, re.DOTALL)
if not m:
log('could not determine pylint version from ' + output)
return
major, minor, micro = (int(m.group(1)), int(m.group(2)), int(m.group(3)))
if major != 1 or minor != 1:
log('require pylint version = 1.1.x (got {0}.{1}.{2})'.format(major, minor, micro))
return
except BaseException:
log('pylint is not available')
return
def findfiles_by_walk():
result = []
for suite in suites(True):
for root, dirs, files in os.walk(suite.dir):
for f in files:
if f.endswith('.py'):
pyfile = join(root, f)
result.append(pyfile)
if 'bin' in dirs:
dirs.remove('bin')
if 'lib' in dirs:
# avoids downloaded .py files
dirs.remove('lib')
return result
def findfiles_by_hg():
result = []
for suite in suites(True):
versioned = subprocess.check_output(['hg', 'locate', '-f'], stderr=subprocess.STDOUT, cwd=suite.dir).split(os.linesep)
for f in versioned:
if f.endswith('.py') and exists(f):
result.append(f)
return result
# Perhaps we should just look in suite.mxDir directories for .py files?
if args.walk:
pyfiles = findfiles_by_walk()
else:
pyfiles = findfiles_by_hg()
env = os.environ.copy()
pythonpath = dirname(__file__)
for suite in suites(True):
pythonpath = os.pathsep.join([pythonpath, suite.mxDir])
env['PYTHONPATH'] = pythonpath
for pyfile in pyfiles:
log('Running pylint on ' + pyfile + '...')
run(['pylint', '--reports=n', '--rcfile=' + rcfile, pyfile], env=env)
"""
Utility for creating and updating a zip file atomically.
"""
class Archiver:
def __init__(self, path):
self.path = path
def __enter__(self):
if self.path:
if not isdir(dirname(self.path)):
os.makedirs(dirname(self.path))
fd, tmp = tempfile.mkstemp(suffix='', prefix=basename(self.path) + '.', dir=dirname(self.path))
self.tmpFd = fd
self.tmpPath = tmp
self.zf = zipfile.ZipFile(tmp, 'w')
else:
self.tmpFd = None
self.tmpPath = None
self.zf = None
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.zf:
self.zf.close()
os.close(self.tmpFd)
# Correct the permissions on the temporary file which is created with restrictive permissions
os.chmod(self.tmpPath, 0o666 & ~currentUmask)
# Atomic on Unix
shutil.move(self.tmpPath, self.path)
def _archive(args):
archive(args)
return 0
def archive(args):
"""create jar files for projects and distributions"""
parser = ArgumentParser(prog='mx archive')
parser.add_argument('names', nargs=REMAINDER, metavar='[<project>|@<distribution>]...')
args = parser.parse_args(args)
archives = []
for name in args.names:
if name.startswith('@'):
dname = name[1:]
d = distribution(dname)
d.make_archive()
archives.append(d.path)
else:
p = project(name)
archives.append(p.make_archive())
logv("generated archives: " + str(archives))
return archives
def canonicalizeprojects(args):
"""check all project specifications for canonical dependencies
The exit code of this command reflects how many projects have non-canonical dependencies."""
nonCanonical = []
for s in suites(True):
for p in s.projects:
for pkg in p.defined_java_packages():
if not pkg.startswith(p.name):
abort('package in {0} does not have prefix matching project name: {1}'.format(p, pkg))
ignoredDeps = set([name for name in p.deps if project(name, False) is not None])
for pkg in p.imported_java_packages():
for name in p.deps:
dep = project(name, False)
if dep is None:
ignoredDeps.discard(name)
else:
if pkg in dep.defined_java_packages():
ignoredDeps.discard(name)
if pkg in dep.extended_java_packages():
ignoredDeps.discard(name)
if len(ignoredDeps) != 0:
candidates = set()
# Compute dependencies based on projects required by p
for d in sorted_deps():
if not d.defined_java_packages().isdisjoint(p.imported_java_packages()):
candidates.add(d)
# Remove non-canonical candidates
for c in list(candidates):
candidates.difference_update(c.all_deps([], False, False))
candidates = [d.name for d in candidates]
abort('{} does not use any packages defined in these projects: {}\nComputed project dependencies: {}'.format(
p, ', '.join(ignoredDeps), ','.join(candidates)))
excess = frozenset(p.deps) - set(p.canonical_deps())
if len(excess) != 0:
nonCanonical.append(p)
if len(nonCanonical) != 0:
for p in nonCanonical:
canonicalDeps = p.canonical_deps()
if len(canonicalDeps) != 0:
log('Canonical dependencies for project ' + p.name + ' are: [')
for d in canonicalDeps:
log(' "' + d + '",')
log(' ],')
else:
log('Canonical dependencies for project ' + p.name + ' are: []')
return len(nonCanonical)
class TimeStampFile:
def __init__(self, path):
self.path = path
self.timestamp = os.path.getmtime(path) if exists(path) else None
def isOlderThan(self, arg):
if not self.timestamp:
return True
if isinstance(arg, TimeStampFile):
if arg.timestamp is None:
return False
else:
return arg.timestamp > self.timestamp
elif isinstance(arg, types.ListType):
files = arg
else:
files = [arg]
for f in files:
if os.path.getmtime(f) > self.timestamp:
return True
return False
def exists(self):
return exists(self.path)
def touch(self):
if exists(self.path):
os.utime(self.path, None)
else:
if not isdir(dirname(self.path)):
os.makedirs(dirname(self.path))
file(self.path, 'a')
def checkstyle(args):
"""run Checkstyle on the Java sources
Run Checkstyle over the Java sources. Any errors or warnings
produced by Checkstyle result in a non-zero exit code."""
parser = ArgumentParser(prog='mx checkstyle')
parser.add_argument('-f', action='store_true', dest='force', help='force checking (disables timestamp checking)')
args = parser.parse_args(args)
totalErrors = 0
for p in projects_opt_limit_to_suites():
if p.native:
continue
sourceDirs = p.source_dirs()
config = join(project(p.checkstyleProj).dir, '.checkstyle_checks.xml')
if not exists(config):
logv('[No Checkstyle configuration foudn for {0} - skipping]'.format(p))
continue
# skip checking this Java project if its Java compliance level is "higher" than the configured JDK
jdk = java(p.javaCompliance)
assert jdk
for sourceDir in sourceDirs:
javafilelist = []
for root, _, files in os.walk(sourceDir):
javafilelist += [join(root, name) for name in files if name.endswith('.java') and name != 'package-info.java']
if len(javafilelist) == 0:
logv('[no Java sources in {0} - skipping]'.format(sourceDir))
continue
timestamp = TimeStampFile(join(p.suite.mxDir, 'checkstyle-timestamps', sourceDir[len(p.suite.dir) + 1:].replace(os.sep, '_') + '.timestamp'))
mustCheck = False
if not args.force and timestamp.exists():
mustCheck = timestamp.isOlderThan(javafilelist)
else:
mustCheck = True
if not mustCheck:
if _opts.verbose:
log('[all Java sources in {0} already checked - skipping]'.format(sourceDir))
continue
exclude = join(p.dir, '.checkstyle.exclude')
if exists(exclude):
with open(exclude) as f:
# Convert patterns to OS separators
patterns = [name.rstrip().replace('/', os.sep) for name in f.readlines()]
def match(name):
for p in patterns:
if p in name:
if _opts.verbose:
log('excluding: ' + name)
return True
return False
javafilelist = [name for name in javafilelist if not match(name)]
auditfileName = join(p.dir, 'checkstyleOutput.txt')
log('Running Checkstyle on {0} using {1}...'.format(sourceDir, config))
try:
for chunk in _chunk_files_for_command_line(javafilelist):
try:
run_java(['-Xmx1g', '-Xms1g', '-jar', library('CHECKSTYLE').get_path(True), '-f', 'xml', '-c', config, '-o', auditfileName] + chunk, nonZeroIsFatal=False)
finally:
if exists(auditfileName):
errors = []
source = [None]
def start_element(name, attrs):
if name == 'file':
source[0] = attrs['name']
elif name == 'error':
errors.append('{}:{}: {}'.format(source[0], attrs['line'], attrs['message']))
xp = xml.parsers.expat.ParserCreate()
xp.StartElementHandler = start_element
with open(auditfileName) as fp:
xp.ParseFile(fp)
if len(errors) != 0:
map(log, errors)
totalErrors = totalErrors + len(errors)
else:
timestamp.touch()
finally:
if exists(auditfileName):
os.unlink(auditfileName)
return totalErrors
def clean(args, parser=None):
"""remove all class files, images, and executables
Removes all files created by a build, including Java class files, executables, and
generated images.
"""
suppliedParser = parser is not None
parser = parser if suppliedParser else ArgumentParser(prog='mx clean')
parser.add_argument('--no-native', action='store_false', dest='native', help='do not clean native projects')
parser.add_argument('--no-java', action='store_false', dest='java', help='do not clean Java projects')
parser.add_argument('--no-dist', action='store_false', dest='dist', help='do not delete distributions')
args = parser.parse_args(args)
def _rmtree(dirPath):
path = dirPath
if get_os() == 'windows':
path = unicode("\\\\?\\" + dirPath)
shutil.rmtree(path)
def _rmIfExists(name):
if name and os.path.isfile(name):
os.unlink(name)
for p in projects_opt_limit_to_suites():
if p.native:
if args.native:
run([gmake_cmd(), '-C', p.dir, 'clean'])
else:
if args.java:
genDir = p.source_gen_dir()
if genDir != '' and exists(genDir):
log('Clearing {0}...'.format(genDir))
for f in os.listdir(genDir):
_rmtree(join(genDir, f))
outputDir = p.output_dir()
if outputDir != '' and exists(outputDir):
log('Removing {0}...'.format(outputDir))
_rmtree(outputDir)
for configName in ['netbeans-config.zip', 'eclipse-config.zip']:
config = TimeStampFile(join(p.suite.mxDir, configName))
if config.exists():
os.unlink(config.path)
if args.java:
if args.dist:
for d in _dists.keys():
log('Removing distribution {0}...'.format(d))
_rmIfExists(distribution(d).path)
_rmIfExists(distribution(d).sourcesPath)
if suppliedParser:
return args
def about(args):
"""show the 'man page' for mx"""
print __doc__
def help_(args):
"""show help for a given command
With no arguments, print a list of commands and short help for each command.
Given a command name, print help for that command."""
if len(args) == 0:
_argParser.print_help()
return
name = args[0]
if not _commands.has_key(name):
hits = [c for c in _commands.iterkeys() if c.startswith(name)]
if len(hits) == 1:
name = hits[0]
elif len(hits) == 0:
abort('mx: unknown command \'{0}\'\n{1}use "mx help" for more options'.format(name, _format_commands()))
else:
abort('mx: command \'{0}\' is ambiguous\n {1}'.format(name, ' '.join(hits)))
value = _commands[name]
(func, usage) = value[:2]
doc = func.__doc__
if len(value) > 2:
docArgs = value[2:]
fmtArgs = []
for d in docArgs:
if isinstance(d, Callable):
fmtArgs += [d()]
else:
fmtArgs += [str(d)]
doc = doc.format(*fmtArgs)
print 'mx {0} {1}\n\n{2}\n'.format(name, usage, doc)
def projectgraph(args, suite=None):
"""create graph for project structure ("mx projectgraph | dot -Tpdf -oprojects.pdf" or "mx projectgraph --igv")"""
parser = ArgumentParser(prog='mx projectgraph')
parser.add_argument('--igv', action='store_true', help='output to IGV listening on 127.0.0.1:4444')
parser.add_argument('--igv-format', action='store_true', help='output graph in IGV format')
args = parser.parse_args(args)
if args.igv or args.igv_format:
ids = {}
nextToIndex = {}
igv = XMLDoc()
igv.open('graphDocument')
igv.open('group')
igv.open('properties')
igv.element('p', {'name' : 'name'}, 'GraalProjectDependencies')
igv.close('properties')
igv.open('graph', {'name' : 'dependencies'})
igv.open('nodes')
for p in sorted_deps(includeLibs=True, includeJreLibs=True):
ident = len(ids)
ids[p.name] = str(ident)
igv.open('node', {'id' : str(ident)})
igv.open('properties')
igv.element('p', {'name' : 'name'}, p.name)
igv.close('properties')
igv.close('node')
igv.close('nodes')
igv.open('edges')
for p in projects():
fromIndex = 0
for dep in p.canonical_deps():
toIndex = nextToIndex.get(dep, 0)
nextToIndex[dep] = toIndex + 1
igv.element('edge', {'from' : ids[p.name], 'fromIndex' : str(fromIndex), 'to' : ids[dep], 'toIndex' : str(toIndex), 'label' : 'dependsOn'})
fromIndex = fromIndex + 1
igv.close('edges')
igv.close('graph')
igv.close('group')
igv.close('graphDocument')
if args.igv:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', 4444))
s.send(igv.xml())
else:
print igv.xml(indent=' ', newl='\n')
return
print 'digraph projects {'
print 'rankdir=BT;'
print 'node [shape=rect];'
for p in projects():
for dep in p.canonical_deps():
print '"' + p.name + '"->"' + dep + '"'
print '}'
def _source_locator_memento(deps):
slm = XMLDoc()
slm.open('sourceLookupDirector')
slm.open('sourceContainers', {'duplicates' : 'false'})
javaCompliance = None
for dep in deps:
if dep.isLibrary():
if hasattr(dep, 'eclipse.container'):
memento = XMLDoc().element('classpathContainer', {'path' : getattr(dep, 'eclipse.container')}).xml(standalone='no')
slm.element('classpathContainer', {'memento' : memento, 'typeId':'org.eclipse.jdt.launching.sourceContainer.classpathContainer'})
elif dep.get_source_path(resolve=True):
memento = XMLDoc().element('archive', {'detectRoot' : 'true', 'path' : dep.get_source_path(resolve=True)}).xml(standalone='no')
slm.element('container', {'memento' : memento, 'typeId':'org.eclipse.debug.core.containerType.externalArchive'})
elif dep.isProject():
memento = XMLDoc().element('javaProject', {'name' : dep.name}).xml(standalone='no')
slm.element('container', {'memento' : memento, 'typeId':'org.eclipse.jdt.launching.sourceContainer.javaProject'})
if javaCompliance is None or dep.javaCompliance > javaCompliance:
javaCompliance = dep.javaCompliance
if javaCompliance:
memento = XMLDoc().element('classpathContainer', {'path' : 'org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-' + str(javaCompliance)}).xml(standalone='no')
slm.element('classpathContainer', {'memento' : memento, 'typeId':'org.eclipse.jdt.launching.sourceContainer.classpathContainer'})
else:
memento = XMLDoc().element('classpathContainer', {'path' : 'org.eclipse.jdt.launching.JRE_CONTAINER'}).xml(standalone='no')
slm.element('classpathContainer', {'memento' : memento, 'typeId':'org.eclipse.jdt.launching.sourceContainer.classpathContainer'})
slm.close('sourceContainers')
slm.close('sourceLookupDirector')
return slm
def make_eclipse_attach(suite, hostname, port, name=None, deps=None):
"""
Creates an Eclipse launch configuration file for attaching to a Java process.
"""
if deps is None:
deps = []
slm = _source_locator_memento(deps)
launch = XMLDoc()
launch.open('launchConfiguration', {'type' : 'org.eclipse.jdt.launching.remoteJavaApplication'})
launch.element('stringAttribute', {'key' : 'org.eclipse.debug.core.source_locator_id', 'value' : 'org.eclipse.jdt.launching.sourceLocator.JavaSourceLookupDirector'})
launch.element('stringAttribute', {'key' : 'org.eclipse.debug.core.source_locator_memento', 'value' : '%s'})
launch.element('booleanAttribute', {'key' : 'org.eclipse.jdt.launching.ALLOW_TERMINATE', 'value' : 'true'})
launch.open('mapAttribute', {'key' : 'org.eclipse.jdt.launching.CONNECT_MAP'})
launch.element('mapEntry', {'key' : 'hostname', 'value' : hostname})
launch.element('mapEntry', {'key' : 'port', 'value' : port})
launch.close('mapAttribute')
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.PROJECT_ATTR', 'value' : ''})
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.VM_CONNECTOR_ID', 'value' : 'org.eclipse.jdt.launching.socketAttachConnector'})
launch.close('launchConfiguration')
launch = launch.xml(newl='\n', standalone='no') % slm.xml(escape=True, standalone='no')
if name is None:
if len(suites()) == 1:
suitePrefix = ''
else:
suitePrefix = suite.name + '-'
name = suitePrefix + 'attach-' + hostname + '-' + port
eclipseLaunches = join(suite.mxDir, 'eclipse-launches')
if not exists(eclipseLaunches):
os.makedirs(eclipseLaunches)
launchFile = join(eclipseLaunches, name + '.launch')
return update_file(launchFile, launch), launchFile
def make_eclipse_launch(javaArgs, jre, name=None, deps=None):
"""
Creates an Eclipse launch configuration file for running/debugging a Java command.
"""
if deps is None:
deps = []
mainClass = None
vmArgs = []
appArgs = []
cp = None
argsCopy = list(reversed(javaArgs))
while len(argsCopy) != 0:
a = argsCopy.pop()
if a == '-jar':
mainClass = '-jar'
appArgs = list(reversed(argsCopy))
break
if a == '-cp' or a == '-classpath':
assert len(argsCopy) != 0
cp = argsCopy.pop()
vmArgs.append(a)
vmArgs.append(cp)
elif a.startswith('-'):
vmArgs.append(a)
else:
mainClass = a
appArgs = list(reversed(argsCopy))
break
if mainClass is None:
log('Cannot create Eclipse launch configuration without main class or jar file: java ' + ' '.join(javaArgs))
return False
if name is None:
if mainClass == '-jar':
name = basename(appArgs[0])
if len(appArgs) > 1 and not appArgs[1].startswith('-'):
name = name + '_' + appArgs[1]
else:
name = mainClass
name = time.strftime('%Y-%m-%d-%H%M%S_' + name)
if cp is not None:
for e in cp.split(os.pathsep):
for s in suites():
deps += [p for p in s.projects if e == p.output_dir()]
deps += [l for l in s.libs if e == l.get_path(False)]
slm = _source_locator_memento(deps)
launch = XMLDoc()
launch.open('launchConfiguration', {'type' : 'org.eclipse.jdt.launching.localJavaApplication'})
launch.element('stringAttribute', {'key' : 'org.eclipse.debug.core.source_locator_id', 'value' : 'org.eclipse.jdt.launching.sourceLocator.JavaSourceLookupDirector'})
launch.element('stringAttribute', {'key' : 'org.eclipse.debug.core.source_locator_memento', 'value' : '%s'})
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.JRE_CONTAINER', 'value' : 'org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/' + jre})
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.MAIN_TYPE', 'value' : mainClass})
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.PROGRAM_ARGUMENTS', 'value' : ' '.join(appArgs)})
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.PROJECT_ATTR', 'value' : ''})
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.VM_ARGUMENTS', 'value' : ' '.join(vmArgs)})
launch.close('launchConfiguration')
launch = launch.xml(newl='\n', standalone='no') % slm.xml(escape=True, standalone='no')
eclipseLaunches = join('mx', 'eclipse-launches')
if not exists(eclipseLaunches):
os.makedirs(eclipseLaunches)
return update_file(join(eclipseLaunches, name + '.launch'), launch)
def eclipseinit(args, buildProcessorJars=True, refreshOnly=False):
"""(re)generate Eclipse project configurations and working sets"""
for s in suites(True):
_eclipseinit_suite(args, s, buildProcessorJars, refreshOnly)
generate_eclipse_workingsets()
def _check_ide_timestamp(suite, configZip, ide):
"""return True if and only if the projects file, eclipse-settings files, and mx itself are all older than configZip"""
suitePyFiles = [join(suite.mxDir, e) for e in os.listdir(suite.mxDir) if e.startswith('suite') and e.endswith('.py')]
if configZip.isOlderThan(suitePyFiles):
return False
# Assume that any mx change might imply changes to the generated IDE files
if configZip.isOlderThan(__file__):
return False
if ide == 'eclipse':
eclipseSettingsDir = join(suite.mxDir, 'eclipse-settings')
if exists(eclipseSettingsDir):
for name in os.listdir(eclipseSettingsDir):
path = join(eclipseSettingsDir, name)
if configZip.isOlderThan(path):
return False
return True
def _eclipseinit_project(p, files=None, libFiles=None):
assert java(p.javaCompliance)
if not exists(p.dir):
os.makedirs(p.dir)
out = XMLDoc()
out.open('classpath')
for src in p.srcDirs:
srcDir = join(p.dir, src)
if not exists(srcDir):
os.mkdir(srcDir)
out.element('classpathentry', {'kind' : 'src', 'path' : src})
if len(p.annotation_processors()) > 0:
genDir = p.source_gen_dir()
if not exists(genDir):
os.mkdir(genDir)
out.element('classpathentry', {'kind' : 'src', 'path' : 'src_gen'})
if files:
files.append(genDir)
# Every Java program depends on a JRE
out.element('classpathentry', {'kind' : 'con', 'path' : 'org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-' + str(p.javaCompliance)})
if exists(join(p.dir, 'plugin.xml')): # eclipse plugin project
out.element('classpathentry', {'kind' : 'con', 'path' : 'org.eclipse.pde.core.requiredPlugins'})
containerDeps = set()
libraryDeps = set()
projectDeps = set()
for dep in p.all_deps([], True):
if dep == p:
continue
if dep.isLibrary():
if hasattr(dep, 'eclipse.container'):
container = getattr(dep, 'eclipse.container')
containerDeps.add(container)
libraryDeps -= set(dep.all_deps([], True))
else:
libraryDeps.add(dep)
elif dep.isProject():
projectDeps.add(dep)
for dep in containerDeps:
out.element('classpathentry', {'exported' : 'true', 'kind' : 'con', 'path' : dep})
for dep in libraryDeps:
path = dep.path
dep.get_path(resolve=True)
# Relative paths for "lib" class path entries have various semantics depending on the Eclipse
# version being used (e.g. see https://bugs.eclipse.org/bugs/show_bug.cgi?id=274737) so it's
# safest to simply use absolute paths.
path = _make_absolute(path, p.suite.dir)
attributes = {'exported' : 'true', 'kind' : 'lib', 'path' : path}
sourcePath = dep.get_source_path(resolve=True)
if sourcePath is not None:
attributes['sourcepath'] = sourcePath
out.element('classpathentry', attributes)
if libFiles:
libFiles.append(path)
for dep in projectDeps:
out.element('classpathentry', {'combineaccessrules' : 'false', 'exported' : 'true', 'kind' : 'src', 'path' : '/' + dep.name})
out.element('classpathentry', {'kind' : 'output', 'path' : getattr(p, 'eclipse.output', 'bin')})
out.close('classpath')
classpathFile = join(p.dir, '.classpath')
update_file(classpathFile, out.xml(indent='\t', newl='\n'))
if files:
files.append(classpathFile)
csConfig = join(project(p.checkstyleProj).dir, '.checkstyle_checks.xml')
if exists(csConfig):
out = XMLDoc()
dotCheckstyle = join(p.dir, ".checkstyle")
checkstyleConfigPath = '/' + p.checkstyleProj + '/.checkstyle_checks.xml'
out.open('fileset-config', {'file-format-version' : '1.2.0', 'simple-config' : 'true'})
out.open('local-check-config', {'name' : 'Checks', 'location' : checkstyleConfigPath, 'type' : 'project', 'description' : ''})
out.element('additional-data', {'name' : 'protect-config-file', 'value' : 'false'})
out.close('local-check-config')
out.open('fileset', {'name' : 'all', 'enabled' : 'true', 'check-config-name' : 'Checks', 'local' : 'true'})
out.element('file-match-pattern', {'match-pattern' : '.', 'include-pattern' : 'true'})
out.close('fileset')
out.open('filter', {'name' : 'all', 'enabled' : 'true', 'check-config-name' : 'Checks', 'local' : 'true'})
out.element('filter-data', {'value' : 'java'})
out.close('filter')
exclude = join(p.dir, '.checkstyle.exclude')
if exists(exclude):
out.open('filter', {'name' : 'FilesFromPackage', 'enabled' : 'true'})
with open(exclude) as f:
for line in f:
if not line.startswith('#'):
line = line.strip()
exclDir = join(p.dir, line)
assert isdir(exclDir), 'excluded source directory listed in ' + exclude + ' does not exist or is not a directory: ' + exclDir
out.element('filter-data', {'value' : line})
out.close('filter')
out.close('fileset-config')
update_file(dotCheckstyle, out.xml(indent=' ', newl='\n'))
if files:
files.append(dotCheckstyle)
else:
# clean up existing .checkstyle file
dotCheckstyle = join(p.dir, ".checkstyle")
if exists(dotCheckstyle):
os.unlink(dotCheckstyle)
out = XMLDoc()
out.open('projectDescription')
out.element('name', data=p.name)
out.element('comment', data='')
out.element('projects', data='')
out.open('buildSpec')
out.open('buildCommand')
out.element('name', data='org.eclipse.jdt.core.javabuilder')
out.element('arguments', data='')
out.close('buildCommand')
if exists(csConfig):
out.open('buildCommand')
out.element('name', data='net.sf.eclipsecs.core.CheckstyleBuilder')
out.element('arguments', data='')
out.close('buildCommand')
if exists(join(p.dir, 'plugin.xml')): # eclipse plugin project
for buildCommand in ['org.eclipse.pde.ManifestBuilder', 'org.eclipse.pde.SchemaBuilder']:
out.open('buildCommand')
out.element('name', data=buildCommand)
out.element('arguments', data='')
out.close('buildCommand')
if p.definedAnnotationProcessorsDist:
# Create a launcher that will (re)build the annotation processor
# jar any time one of its sources is modified.
dist = p.definedAnnotationProcessorsDist
distProjects = [d for d in dist.sorted_deps(transitive=True) if d.isProject()]
relevantResources = []
for p in distProjects:
for srcDir in p.source_dirs():
relevantResources.append(join(p.name, os.path.relpath(srcDir, p.dir)))
relevantResources.append(join(p.name, os.path.relpath(p.output_dir(), p.dir)))
# The path should always be p.name/dir independent of where the workspace actually is.
# So we use the parent folder of the project, whatever that is, to generate such a relative path.
logicalWorkspaceRoot = os.path.dirname(p.dir)
refreshFile = os.path.relpath(p.definedAnnotationProcessorsDist.path, logicalWorkspaceRoot)
_genEclipseBuilder(out, p, 'CreateAnnotationProcessorJar', 'archive @' + dist.name, refresh=True, refreshFile=refreshFile, relevantResources=relevantResources, async=True, xmlIndent='', xmlStandalone='no')
out.close('buildSpec')
out.open('natures')
out.element('nature', data='org.eclipse.jdt.core.javanature')
if exists(csConfig):
out.element('nature', data='net.sf.eclipsecs.core.CheckstyleNature')
if exists(join(p.dir, 'plugin.xml')): # eclipse plugin project
out.element('nature', data='org.eclipse.pde.PluginNature')
out.close('natures')
out.close('projectDescription')
projectFile = join(p.dir, '.project')
update_file(projectFile, out.xml(indent='\t', newl='\n'))
if files:
files.append(projectFile)
settingsDir = join(p.dir, ".settings")
if not exists(settingsDir):
os.mkdir(settingsDir)
# collect the defaults from mxtool
defaultEclipseSettingsDir = join(dirname(__file__), 'eclipse-settings')
esdict = {}
if exists(defaultEclipseSettingsDir):
for name in os.listdir(defaultEclipseSettingsDir):
if isfile(join(defaultEclipseSettingsDir, name)):
esdict[name] = os.path.abspath(join(defaultEclipseSettingsDir, name))
# check for suite overrides
eclipseSettingsDir = join(p.suite.mxDir, 'eclipse-settings')
if exists(eclipseSettingsDir):
for name in os.listdir(eclipseSettingsDir):
if isfile(join(eclipseSettingsDir, name)):
esdict[name] = os.path.abspath(join(eclipseSettingsDir, name))
# check for project overrides
projectSettingsDir = join(p.dir, 'eclipse-settings')
if exists(projectSettingsDir):
for name in os.listdir(projectSettingsDir):
if isfile(join(projectSettingsDir, name)):
esdict[name] = os.path.abspath(join(projectSettingsDir, name))
# copy a possibly modified file to the project's .settings directory
for name, path in esdict.iteritems():
# ignore this file altogether if this project has no annotation processors
if name == "org.eclipse.jdt.apt.core.prefs" and not len(p.annotation_processors()) > 0:
continue
with open(path) as f:
content = f.read()
content = content.replace('${javaCompliance}', str(p.javaCompliance))
if len(p.annotation_processors()) > 0:
content = content.replace('org.eclipse.jdt.core.compiler.processAnnotations=disabled', 'org.eclipse.jdt.core.compiler.processAnnotations=enabled')
update_file(join(settingsDir, name), content)
if files:
files.append(join(settingsDir, name))
processorPath = p.annotation_processors_path()
if processorPath:
out = XMLDoc()
out.open('factorypath')
out.element('factorypathentry', {'kind' : 'PLUGIN', 'id' : 'org.eclipse.jst.ws.annotations.core', 'enabled' : 'true', 'runInBatchMode' : 'false'})
for e in processorPath.split(os.pathsep):
out.element('factorypathentry', {'kind' : 'EXTJAR', 'id' : e, 'enabled' : 'true', 'runInBatchMode' : 'false'})
out.close('factorypath')
update_file(join(p.dir, '.factorypath'), out.xml(indent='\t', newl='\n'))
if files:
files.append(join(p.dir, '.factorypath'))
def _eclipseinit_suite(args, suite, buildProcessorJars=True, refreshOnly=False):
configZip = TimeStampFile(join(suite.mxDir, 'eclipse-config.zip'))
configLibsZip = join(suite.mxDir, 'eclipse-config-libs.zip')
if refreshOnly and not configZip.exists():
return
if _check_ide_timestamp(suite, configZip, 'eclipse'):
logv('[Eclipse configurations are up to date - skipping]')
return
files = []
libFiles = []
if buildProcessorJars:
files += _processorjars_suite(suite)
for p in suite.projects:
if p.native:
continue
_eclipseinit_project(p, files, libFiles)
_, launchFile = make_eclipse_attach(suite, 'localhost', '8000', deps=sorted_deps(projectNames=None, includeLibs=True))
files.append(launchFile)
# Create an Eclipse project for each distribution that will create/update the archive
# for the distribution whenever any (transitively) dependent project of the
# distribution is updated.
for dist in suite.dists:
projectDir = dist.get_ide_project_dir()
if not projectDir:
continue
if not exists(projectDir):
os.makedirs(projectDir)
distProjects = [d for d in dist.sorted_deps(transitive=True) if d.isProject()]
relevantResources = []
for p in distProjects:
for srcDir in p.source_dirs():
relevantResources.append(join(p.name, os.path.relpath(srcDir, p.dir)))
relevantResources.append(join(p.name, os.path.relpath(p.output_dir(), p.dir)))
out = XMLDoc()
out.open('projectDescription')
out.element('name', data=dist.name)
out.element('comment', data='Updates ' + dist.path + ' if a project dependency of ' + dist.name + ' is updated')
out.open('projects')
for p in distProjects:
out.element('project', data=p.name)
for d in dist.distDependencies:
out.element('project', data=d)
out.close('projects')
out.open('buildSpec')
dist.dir = projectDir
dist.javaCompliance = max([p.javaCompliance for p in distProjects])
_genEclipseBuilder(out, dist, 'Create' + dist.name + 'Dist', 'archive @' + dist.name, relevantResources=relevantResources, logToFile=True, refresh=False, async=True)
out.close('buildSpec')
out.open('natures')
out.element('nature', data='org.eclipse.jdt.core.javanature')
out.close('natures')
out.close('projectDescription')
projectFile = join(projectDir, '.project')
update_file(projectFile, out.xml(indent='\t', newl='\n'))
files.append(projectFile)
_zip_files(files, suite.dir, configZip.path)
_zip_files(libFiles, suite.dir, configLibsZip)
def _zip_files(files, baseDir, zipPath):
fd, tmp = tempfile.mkstemp(suffix='', prefix=basename(zipPath), dir=baseDir)
try:
zf = zipfile.ZipFile(tmp, 'w')
for f in sorted(set(files)):
relpath = os.path.relpath(f, baseDir)
arcname = relpath.replace(os.sep, '/')
zf.write(f, arcname)
zf.close()
os.close(fd)
# Atomic on Unix
shutil.move(tmp, zipPath)
# Correct the permissions on the temporary file which is created with restrictive permissions
os.chmod(zipPath, 0o666 & ~currentUmask)
finally:
if exists(tmp):
os.remove(tmp)
def _genEclipseBuilder(dotProjectDoc, p, name, mxCommand, refresh=True, refreshFile=None, relevantResources=None, async=False, logToConsole=False, logToFile=False, appendToLogFile=True, xmlIndent='\t', xmlStandalone=None):
externalToolDir = join(p.dir, '.externalToolBuilders')
launchOut = XMLDoc()
consoleOn = 'true' if logToConsole else 'false'
launchOut.open('launchConfiguration', {'type' : 'org.eclipse.ui.externaltools.ProgramBuilderLaunchConfigurationType'})
launchOut.element('booleanAttribute', {'key' : 'org.eclipse.debug.core.capture_output', 'value': consoleOn})
launchOut.open('mapAttribute', {'key' : 'org.eclipse.debug.core.environmentVariables'})
launchOut.element('mapEntry', {'key' : 'JAVA_HOME', 'value' : java(p.javaCompliance).jdk})
launchOut.element('mapEntry', {'key' : 'EXTRA_JAVA_HOMES', 'value' : _opts.extra_java_homes})
launchOut.close('mapAttribute')
if refresh:
if refreshFile is None:
refreshScope = '${project}'
else:
refreshScope = '${working_set:<?xml version="1.0" encoding="UTF-8"?><resources><item path="' + refreshFile + '" type="1"/></resources>}'
launchOut.element('booleanAttribute', {'key' : 'org.eclipse.debug.core.ATTR_REFRESH_RECURSIVE', 'value': 'false'})
launchOut.element('stringAttribute', {'key' : 'org.eclipse.debug.core.ATTR_REFRESH_SCOPE', 'value': refreshScope})
if relevantResources is not None:
resources = '${working_set:<?xml version="1.0" encoding="UTF-8"?><resources>'
for relevantResource in relevantResources:
resources += '<item path="' + relevantResource + '" type="2" />'
resources += '</resources>}'
launchOut.element('stringAttribute', {'key' : 'org.eclipse.ui.externaltools.ATTR_BUILD_SCOPE', 'value': resources})
launchOut.element('booleanAttribute', {'key' : 'org.eclipse.debug.ui.ATTR_CONSOLE_OUTPUT_ON', 'value': consoleOn})
launchOut.element('booleanAttribute', {'key' : 'org.eclipse.debug.ui.ATTR_LAUNCH_IN_BACKGROUND', 'value': 'true' if async else 'false'})
if logToFile:
logFile = join(externalToolDir, name + '.log')
launchOut.element('stringAttribute', {'key' : 'org.eclipse.debug.ui.ATTR_CAPTURE_IN_FILE', 'value': logFile})
launchOut.element('booleanAttribute', {'key' : 'org.eclipse.debug.ui.ATTR_APPEND_TO_FILE', 'value': 'true' if appendToLogFile else 'false'})
# expect to find the OS command to invoke mx in the same directory
baseDir = dirname(os.path.abspath(__file__))
cmd = 'mx.sh'
if get_os() == 'windows':
cmd = 'mx.cmd'
cmdPath = join(baseDir, cmd)
if not os.path.exists(cmdPath):
# backwards compatibility for when the commands lived in parent of mxtool
cmdPath = join(dirname(baseDir), cmd)
if not os.path.exists(cmdPath):
abort('cannot locate ' + cmd)
launchOut.element('stringAttribute', {'key' : 'org.eclipse.ui.externaltools.ATTR_LOCATION', 'value': cmdPath})
launchOut.element('stringAttribute', {'key' : 'org.eclipse.ui.externaltools.ATTR_RUN_BUILD_KINDS', 'value': 'full,incremental,auto,'})
launchOut.element('stringAttribute', {'key' : 'org.eclipse.ui.externaltools.ATTR_TOOL_ARGUMENTS', 'value': mxCommand})
launchOut.element('booleanAttribute', {'key' : 'org.eclipse.ui.externaltools.ATTR_TRIGGERS_CONFIGURED', 'value': 'true'})
launchOut.element('stringAttribute', {'key' : 'org.eclipse.ui.externaltools.ATTR_WORKING_DIRECTORY', 'value': p.suite.dir})
launchOut.close('launchConfiguration')
if not exists(externalToolDir):
os.makedirs(externalToolDir)
update_file(join(externalToolDir, name + '.launch'), launchOut.xml(indent=xmlIndent, standalone=xmlStandalone, newl='\n'))
dotProjectDoc.open('buildCommand')
dotProjectDoc.element('name', data='org.eclipse.ui.externaltools.ExternalToolBuilder')
dotProjectDoc.element('triggers', data='auto,full,incremental,')
dotProjectDoc.open('arguments')
dotProjectDoc.open('dictionary')
dotProjectDoc.element('key', data='LaunchConfigHandle')
dotProjectDoc.element('value', data='<project>/.externalToolBuilders/' + name + '.launch')
dotProjectDoc.close('dictionary')
dotProjectDoc.open('dictionary')
dotProjectDoc.element('key', data='incclean')
dotProjectDoc.element('value', data='true')
dotProjectDoc.close('dictionary')
dotProjectDoc.close('arguments')
dotProjectDoc.close('buildCommand')
def generate_eclipse_workingsets():
"""
Populate the workspace's working set configuration with working sets generated from project data for the primary suite
If the workspace already contains working set definitions, the existing ones will be retained and extended.
In case mx/env does not contain a WORKSPACE definition pointing to the workspace root directory, a parent search from the primary suite directory is performed.
If no workspace root directory can be identified, the primary suite directory is used and the user has to place the workingsets.xml file by hand.
"""
# identify the location where to look for workingsets.xml
wsfilename = 'workingsets.xml'
wsloc = '.metadata/.plugins/org.eclipse.ui.workbench'
if os.environ.has_key('WORKSPACE'):
expected_wsroot = os.environ['WORKSPACE']
else:
expected_wsroot = _primary_suite.dir
wsroot = _find_eclipse_wsroot(expected_wsroot)
if wsroot is None:
# failed to find it
wsroot = expected_wsroot
wsdir = join(wsroot, wsloc)
if not exists(wsdir):
wsdir = wsroot
logv('Could not find Eclipse metadata directory. Please place ' + wsfilename + ' in ' + wsloc + ' manually.')
wspath = join(wsdir, wsfilename)
# gather working set info from project data
workingSets = dict()
for p in projects():
if p.workingSets is None:
continue
for w in p.workingSets.split(","):
if not workingSets.has_key(w):
workingSets[w] = [p.name]
else:
workingSets[w].append(p.name)
if exists(wspath):
wsdoc = _copy_workingset_xml(wspath, workingSets)
else:
wsdoc = _make_workingset_xml(workingSets)
update_file(wspath, wsdoc.xml(newl='\n'))
def _find_eclipse_wsroot(wsdir):
md = join(wsdir, '.metadata')
if exists(md):
return wsdir
split = os.path.split(wsdir)
if split[0] == wsdir: # root directory
return None
else:
return _find_eclipse_wsroot(split[0])
def _make_workingset_xml(workingSets):
wsdoc = XMLDoc()
wsdoc.open('workingSetManager')
for w in sorted(workingSets.keys()):
_workingset_open(wsdoc, w)
for p in workingSets[w]:
_workingset_element(wsdoc, p)
wsdoc.close('workingSet')
wsdoc.close('workingSetManager')
return wsdoc
def _copy_workingset_xml(wspath, workingSets):
target = XMLDoc()
target.open('workingSetManager')
parser = xml.parsers.expat.ParserCreate()
class ParserState(object):
def __init__(self):
self.current_ws_name = 'none yet'
self.current_ws = None
self.seen_ws = list()
self.seen_projects = list()
self.aggregate_ws = False
self.nested_ws = False
ps = ParserState()
# parsing logic
def _ws_start(name, attributes):
if name == 'workingSet':
if attributes.has_key('name'):
ps.current_ws_name = attributes['name']
if attributes.has_key('aggregate') and attributes['aggregate'] == 'true':
ps.aggregate_ws = True
ps.current_ws = None
elif workingSets.has_key(ps.current_ws_name):
ps.current_ws = workingSets[ps.current_ws_name]
ps.seen_ws.append(ps.current_ws_name)
ps.seen_projects = list()
else:
ps.current_ws = None
target.open(name, attributes)
parser.StartElementHandler = _ws_item
def _ws_end(name):
closeAndResetHandler = False
if name == 'workingSet':
if ps.aggregate_ws:
if ps.nested_ws:
ps.nested_ws = False
else:
ps.aggregate_ws = False
closeAndResetHandler = True
else:
if not ps.current_ws is None:
for p in ps.current_ws:
if not p in ps.seen_projects:
_workingset_element(target, p)
closeAndResetHandler = True
if closeAndResetHandler:
target.close('workingSet')
parser.StartElementHandler = _ws_start
elif name == 'workingSetManager':
# process all working sets that are new to the file
for w in sorted(workingSets.keys()):
if not w in ps.seen_ws:
_workingset_open(target, w)
for p in workingSets[w]:
_workingset_element(target, p)
target.close('workingSet')
def _ws_item(name, attributes):
if name == 'item':
if ps.current_ws is None:
target.element(name, attributes)
elif not attributes.has_key('elementID') and attributes.has_key('factoryID') and attributes.has_key('path') and attributes.has_key('type'):
target.element(name, attributes)
p_name = attributes['path'][1:] # strip off the leading '/'
ps.seen_projects.append(p_name)
else:
p_name = attributes['elementID'][1:] # strip off the leading '='
_workingset_element(target, p_name)
ps.seen_projects.append(p_name)
elif name == 'workingSet':
ps.nested_ws = True
target.element(name, attributes)
# process document
parser.StartElementHandler = _ws_start
parser.EndElementHandler = _ws_end
with open(wspath, 'r') as wsfile:
parser.ParseFile(wsfile)
target.close('workingSetManager')
return target
def _workingset_open(wsdoc, ws):
wsdoc.open('workingSet', {'editPageID': 'org.eclipse.jdt.ui.JavaWorkingSetPage', 'factoryID': 'org.eclipse.ui.internal.WorkingSetFactory', 'id': 'wsid_' + ws, 'label': ws, 'name': ws})
def _workingset_element(wsdoc, p):
wsdoc.element('item', {'elementID': '=' + p, 'factoryID': 'org.eclipse.jdt.ui.PersistableJavaElementFactory'})
def netbeansinit(args, refreshOnly=False, buildProcessorJars=True):
"""(re)generate NetBeans project configurations"""
for suite in suites(True):
_netbeansinit_suite(args, suite, refreshOnly, buildProcessorJars)
def _netbeansinit_project(p, jdks=None, files=None, libFiles=None):
if not exists(join(p.dir, 'nbproject')):
os.makedirs(join(p.dir, 'nbproject'))
jdk = java(p.javaCompliance)
assert jdk
if jdks:
jdks.add(jdk)
out = XMLDoc()
out.open('project', {'name' : p.name, 'default' : 'default', 'basedir' : '.'})
out.element('description', data='Builds, tests, and runs the project ' + p.name + '.')
out.element('import', {'file' : 'nbproject/build-impl.xml'})
out.open('target', {'name' : '-post-compile'})
out.open('exec', {'executable' : sys.executable})
out.element('env', {'key' : 'JAVA_HOME', 'value' : jdk.jdk})
out.element('arg', {'value' : os.path.abspath(__file__)})
out.element('arg', {'value' : 'archive'})
out.element('arg', {'value' : '@GRAAL'})
out.close('exec')
out.close('target')
out.close('project')
update_file(join(p.dir, 'build.xml'), out.xml(indent='\t', newl='\n'))
if files:
files.append(join(p.dir, 'build.xml'))
out = XMLDoc()
out.open('project', {'xmlns' : 'http://www.netbeans.org/ns/project/1'})
out.element('type', data='org.netbeans.modules.java.j2seproject')
out.open('configuration')
out.open('data', {'xmlns' : 'http://www.netbeans.org/ns/j2se-project/3'})
out.element('name', data=p.name)
out.element('explicit-platform', {'explicit-source-supported' : 'true'})
out.open('source-roots')
out.element('root', {'id' : 'src.dir'})
if len(p.annotation_processors()) > 0:
out.element('root', {'id' : 'src.ap-source-output.dir', 'name' : 'Generated Packages'})
out.close('source-roots')
out.open('test-roots')
out.close('test-roots')
out.close('data')
firstDep = True
for dep in p.all_deps([], includeLibs=False, includeAnnotationProcessors=True):
if dep == p:
continue
if dep.isProject():
n = dep.name.replace('.', '_')
if firstDep:
out.open('references', {'xmlns' : 'http://www.netbeans.org/ns/ant-project-references/1'})
firstDep = False
out.open('reference')
out.element('foreign-project', data=n)
out.element('artifact-type', data='jar')
out.element('script', data='build.xml')
out.element('target', data='jar')
out.element('clean-target', data='clean')
out.element('id', data='jar')
out.close('reference')
if not firstDep:
out.close('references')
out.close('configuration')
out.close('project')
update_file(join(p.dir, 'nbproject', 'project.xml'), out.xml(indent=' ', newl='\n'))
if files:
files.append(join(p.dir, 'nbproject', 'project.xml'))
out = StringIO.StringIO()
jdkPlatform = 'JDK_' + str(jdk.version)
annotationProcessorEnabled = "false"
annotationProcessorSrcFolder = ""
if len(p.annotation_processors()) > 0:
annotationProcessorEnabled = "true"
genSrcDir = p.source_gen_dir()
if not exists(genSrcDir):
os.makedirs(genSrcDir)
annotationProcessorSrcFolder = "src.ap-source-output.dir=" + genSrcDir
content = """
annotation.processing.enabled=""" + annotationProcessorEnabled + """
annotation.processing.enabled.in.editor=""" + annotationProcessorEnabled + """
annotation.processing.processors.list=
annotation.processing.run.all.processors=true
application.title=""" + p.name + """
application.vendor=mx
build.classes.dir=${build.dir}
build.classes.excludes=**/*.java,**/*.form
# This directory is removed when the project is cleaned:
build.dir=bin
build.generated.sources.dir=${build.dir}/generated-sources
# Only compile against the classpath explicitly listed here:
build.sysclasspath=ignore
build.test.classes.dir=${build.dir}/test/classes
build.test.results.dir=${build.dir}/test/results
# Uncomment to specify the preferred debugger connection transport:
#debug.transport=dt_socket
debug.classpath=\\
${run.classpath}
debug.test.classpath=\\
${run.test.classpath}
# This directory is removed when the project is cleaned:
dist.dir=dist
dist.jar=${dist.dir}/""" + p.name + """.jar
dist.javadoc.dir=${dist.dir}/javadoc
endorsed.classpath=
excludes=
includes=**
jar.compress=false
# Space-separated list of extra javac options
javac.compilerargs=-XDignore.symbol.file
javac.deprecation=false
javac.source=""" + str(p.javaCompliance) + """
javac.target=""" + str(p.javaCompliance) + """
javac.test.classpath=\\
${javac.classpath}:\\
${build.classes.dir}
javadoc.additionalparam=
javadoc.author=false
javadoc.encoding=${source.encoding}
javadoc.noindex=false
javadoc.nonavbar=false
javadoc.notree=false
javadoc.private=false
javadoc.splitindex=true
javadoc.use=true
javadoc.version=false
javadoc.windowtitle=
main.class=
manifest.file=manifest.mf
meta.inf.dir=${src.dir}/META-INF
mkdist.disabled=false
platforms.""" + jdkPlatform + """.home=""" + jdk.jdk + """
platform.active=""" + jdkPlatform + """
run.classpath=\\
${javac.classpath}:\\
${build.classes.dir}
# Space-separated list of JVM arguments used when running the project
# (you may also define separate properties like run-sys-prop.name=value instead of -Dname=value
# or test-sys-prop.name=value to set system properties for unit tests):
run.jvmargs=
run.test.classpath=\\
${javac.test.classpath}:\\
${build.test.classes.dir}
test.src.dir=./test
""" + annotationProcessorSrcFolder + """
source.encoding=UTF-8""".replace(':', os.pathsep).replace('/', os.sep)
print >> out, content
mainSrc = True
for src in p.srcDirs:
srcDir = join(p.dir, src)
if not exists(srcDir):
os.mkdir(srcDir)
ref = 'file.reference.' + p.name + '-' + src
print >> out, ref + '=' + src
if mainSrc:
print >> out, 'src.dir=${' + ref + '}'
mainSrc = False
else:
print >> out, 'src.' + src + '.dir=${' + ref + '}'
javacClasspath = []
deps = p.all_deps([], True)
annotationProcessorOnlyDeps = []
if len(p.annotation_processors()) > 0:
for ap in p.annotation_processors():
apDep = dependency(ap)
if not apDep in deps:
deps.append(apDep)
annotationProcessorOnlyDeps.append(apDep)
annotationProcessorReferences = []
for dep in deps:
if dep == p:
continue
if dep.isLibrary():
path = dep.get_path(resolve=True)
if path:
if os.sep == '\\':
path = path.replace('\\', '\\\\')
ref = 'file.reference.' + dep.name + '-bin'
print >> out, ref + '=' + path
if libFiles:
libFiles.append(path)
elif dep.isProject():
n = dep.name.replace('.', '_')
relDepPath = os.path.relpath(dep.dir, p.dir).replace(os.sep, '/')
ref = 'reference.' + n + '.jar'
print >> out, 'project.' + n + '=' + relDepPath
print >> out, ref + '=${project.' + n + '}/dist/' + dep.name + '.jar'
if not dep in annotationProcessorOnlyDeps:
javacClasspath.append('${' + ref + '}')
else:
annotationProcessorReferences.append('${' + ref + '}')
print >> out, 'javac.classpath=\\\n ' + (os.pathsep + '\\\n ').join(javacClasspath)
print >> out, 'javac.processorpath=' + (os.pathsep + '\\\n ').join(['${javac.classpath}'] + annotationProcessorReferences)
print >> out, 'javac.test.processorpath=' + (os.pathsep + '\\\n ').join(['${javac.test.classpath}'] + annotationProcessorReferences)
update_file(join(p.dir, 'nbproject', 'project.properties'), out.getvalue())
out.close()
if files:
files.append(join(p.dir, 'nbproject', 'project.properties'))
def _netbeansinit_suite(args, suite, refreshOnly=False, buildProcessorJars=True):
configZip = TimeStampFile(join(suite.mxDir, 'netbeans-config.zip'))
configLibsZip = join(suite.mxDir, 'eclipse-config-libs.zip')
if refreshOnly and not configZip.exists():
return
if _check_ide_timestamp(suite, configZip, 'netbeans'):
logv('[NetBeans configurations are up to date - skipping]')
return
files = []
libFiles = []
jdks = set()
for p in suite.projects:
if p.native:
continue
if exists(join(p.dir, 'plugin.xml')): # eclipse plugin project
continue
_netbeansinit_project(p, jdks, files, libFiles)
log('If using NetBeans:')
# http://stackoverflow.com/questions/24720665/cant-resolve-jdk-internal-package
log(' 1. Edit etc/netbeans.conf in your NetBeans installation and modify netbeans_default_options variable to include "-J-DCachingArchiveProvider.disableCtSym=true"')
log(' 2. Ensure that the following platform(s) are defined (Tools -> Java Platforms):')
for jdk in jdks:
log(' JDK_' + str(jdk.version))
log(' 3. Open/create a Project Group for the directory containing the projects (File -> Project Group -> New Group... -> Folder of Projects)')
_zip_files(files, suite.dir, configZip.path)
_zip_files(libFiles, suite.dir, configLibsZip)
def intellijinit(args, refreshOnly=False):
"""(re)generate Intellij project configurations"""
for suite in suites(True):
_intellij_suite(args, suite, refreshOnly)
def _intellij_suite(args, suite, refreshOnly=False):
libraries = set()
ideaProjectDirectory = join(suite.dir, '.idea')
if not exists(ideaProjectDirectory):
os.mkdir(ideaProjectDirectory)
nameFile = join(ideaProjectDirectory, '.name')
update_file(nameFile, "Graal")
modulesXml = XMLDoc()
modulesXml.open('project', attributes={'version': '4'})
modulesXml.open('component', attributes={'name': 'ProjectModuleManager'})
modulesXml.open('modules')
def _intellij_exclude_if_exists(xml, p, name):
path = join(p.dir, name)
if exists(path):
xml.element('excludeFolder', attributes={'url':'file://$MODULE_DIR$/' + name})
annotationProcessorProfiles = {}
def _complianceToIntellijLanguageLevel(compliance):
return 'JDK_1_' + str(compliance.value)
# create the modules (1 module = 1 Intellij project)
for p in suite.projects:
if p.native:
continue
assert java(p.javaCompliance)
if not exists(p.dir):
os.makedirs(p.dir)
annotationProcessorProfileKey = tuple(p.annotation_processors())
if not annotationProcessorProfileKey in annotationProcessorProfiles:
annotationProcessorProfiles[annotationProcessorProfileKey] = [p]
else:
annotationProcessorProfiles[annotationProcessorProfileKey].append(p)
intellijLanguageLevel = _complianceToIntellijLanguageLevel(p.javaCompliance)
moduleXml = XMLDoc()
moduleXml.open('module', attributes={'type': 'JAVA_MODULE', 'version': '4'})
moduleXml.open('component', attributes={'name': 'NewModuleRootManager', 'LANGUAGE_LEVEL': intellijLanguageLevel, 'inherit-compiler-output': 'false'})
moduleXml.element('output', attributes={'url': 'file://$MODULE_DIR$/bin'})
moduleXml.element('exclude-output')
moduleXml.open('content', attributes={'url': 'file://$MODULE_DIR$'})
for src in p.srcDirs:
srcDir = join(p.dir, src)
if not exists(srcDir):
os.mkdir(srcDir)
moduleXml.element('sourceFolder', attributes={'url':'file://$MODULE_DIR$/' + src, 'isTestSource': 'false'})
if len(p.annotation_processors()) > 0:
genDir = p.source_gen_dir()
if not exists(genDir):
os.mkdir(genDir)
moduleXml.element('sourceFolder', attributes={'url':'file://$MODULE_DIR$/' + os.path.relpath(genDir, p.dir), 'isTestSource': 'false'})
for name in ['.externalToolBuilders', '.settings', 'nbproject']:
_intellij_exclude_if_exists(moduleXml, p, name)
moduleXml.close('content')
moduleXml.element('orderEntry', attributes={'type': 'jdk', 'jdkType': 'JavaSDK', 'jdkName': str(p.javaCompliance)})
moduleXml.element('orderEntry', attributes={'type': 'sourceFolder', 'forTests': 'false'})
deps = p.all_deps([], True, includeAnnotationProcessors=True)
for dep in deps:
if dep == p:
continue
if dep.isLibrary():
libraries.add(dep)
moduleXml.element('orderEntry', attributes={'type': 'library', 'name': dep.name, 'level': 'project'})
elif dep.isProject():
moduleXml.element('orderEntry', attributes={'type': 'module', 'module-name': dep.name})
moduleXml.close('component')
moduleXml.close('module')
moduleFile = join(p.dir, p.name + '.iml')
update_file(moduleFile, moduleXml.xml(indent=' ', newl='\n'))
moduleFilePath = "$PROJECT_DIR$/" + os.path.relpath(moduleFile, suite.dir)
modulesXml.element('module', attributes={'fileurl': 'file://' + moduleFilePath, 'filepath': moduleFilePath})
modulesXml.close('modules')
modulesXml.close('component')
modulesXml.close('project')
moduleXmlFile = join(ideaProjectDirectory, 'modules.xml')
update_file(moduleXmlFile, modulesXml.xml(indent=' ', newl='\n'))
# TODO What about cross-suite dependencies?
librariesDirectory = join(ideaProjectDirectory, 'libraries')
if not exists(librariesDirectory):
os.mkdir(librariesDirectory)
# Setup the libraries that were used above
# TODO: setup all the libraries from the suite regardless of usage?
for library in libraries:
libraryXml = XMLDoc()
libraryXml.open('component', attributes={'name': 'libraryTable'})
libraryXml.open('library', attributes={'name': library.name})
libraryXml.open('CLASSES')
libraryXml.element('root', attributes={'url': 'jar://$PROJECT_DIR$/' + os.path.relpath(library.get_path(True), suite.dir) + '!/'})
libraryXml.close('CLASSES')
libraryXml.element('JAVADOC')
if library.sourcePath:
libraryXml.open('SOURCES')
libraryXml.element('root', attributes={'url': 'jar://$PROJECT_DIR$/' + os.path.relpath(library.get_source_path(True), suite.dir) + '!/'})
libraryXml.close('SOURCES')
else:
libraryXml.element('SOURCES')
libraryXml.close('library')
libraryXml.close('component')
libraryFile = join(librariesDirectory, library.name + '.xml')
update_file(libraryFile, libraryXml.xml(indent=' ', newl='\n'))
# Set annotation processor profiles up, and link them to modules in compiler.xml
compilerXml = XMLDoc()
compilerXml.open('project', attributes={'version': '4'})
compilerXml.open('component', attributes={'name': 'CompilerConfiguration'})
compilerXml.element('option', attributes={'name': "DEFAULT_COMPILER", 'value': 'Javac'})
compilerXml.element('resourceExtensions')
compilerXml.open('wildcardResourcePatterns')
compilerXml.element('entry', attributes={'name': '!?*.java'})
compilerXml.close('wildcardResourcePatterns')
if annotationProcessorProfiles:
compilerXml.open('annotationProcessing')
for processors, modules in annotationProcessorProfiles.items():
compilerXml.open('profile', attributes={'default': 'false', 'name': '-'.join(processors), 'enabled': 'true'})
compilerXml.element('sourceOutputDir', attributes={'name': 'src_gen'}) # TODO use p.source_gen_dir() ?
compilerXml.element('outputRelativeToContentRoot', attributes={'value': 'true'})
compilerXml.open('processorPath', attributes={'useClasspath': 'false'})
for apName in processors:
pDep = dependency(apName)
for entry in pDep.all_deps([], True):
if entry.isLibrary():
compilerXml.element('entry', attributes={'name': '$PROJECT_DIR$/' + os.path.relpath(entry.path, suite.dir)})
elif entry.isProject():
assert entry.isProject()
compilerXml.element('entry', attributes={'name': '$PROJECT_DIR$/' + os.path.relpath(entry.output_dir(), suite.dir)})
compilerXml.close('processorPath')
for module in modules:
compilerXml.element('module', attributes={'name': module.name})
compilerXml.close('profile')
compilerXml.close('annotationProcessing')
compilerXml.close('component')
compilerXml.close('project')
compilerFile = join(ideaProjectDirectory, 'compiler.xml')
update_file(compilerFile, compilerXml.xml(indent=' ', newl='\n'))
# Wite misc.xml for global JDK config
miscXml = XMLDoc()
miscXml.open('project', attributes={'version': '4'})
miscXml.element('component', attributes={'name': 'ProjectRootManager', 'version': '2', 'languageLevel': _complianceToIntellijLanguageLevel(java().javaCompliance), 'project-jdk-name': str(java().javaCompliance), 'project-jdk-type': 'JavaSDK'})
miscXml.close('project')
miscFile = join(ideaProjectDirectory, 'misc.xml')
update_file(miscFile, miscXml.xml(indent=' ', newl='\n'))
# TODO look into copyright settings
# TODO should add vcs.xml support
def ideclean(args):
"""remove all Eclipse and NetBeans project configurations"""
def rm(path):
if exists(path):
os.remove(path)
for s in suites():
rm(join(s.mxDir, 'eclipse-config.zip'))
rm(join(s.mxDir, 'netbeans-config.zip'))
shutil.rmtree(join(s.dir, '.idea'), ignore_errors=True)
for p in projects():
if p.native:
continue
shutil.rmtree(join(p.dir, '.settings'), ignore_errors=True)
shutil.rmtree(join(p.dir, '.externalToolBuilders'), ignore_errors=True)
shutil.rmtree(join(p.dir, 'nbproject'), ignore_errors=True)
rm(join(p.dir, '.classpath'))
rm(join(p.dir, '.checkstyle'))
rm(join(p.dir, '.project'))
rm(join(p.dir, '.factorypath'))
rm(join(p.dir, p.name + '.iml'))
rm(join(p.dir, 'build.xml'))
rm(join(p.dir, 'eclipse-build.xml'))
try:
rm(join(p.dir, p.name + '.jar'))
except:
log("Error removing {0}".format(p.name + '.jar'))
for d in _dists.itervalues():
if d.get_ide_project_dir():
shutil.rmtree(d.get_ide_project_dir(), ignore_errors=True)
def ideinit(args, refreshOnly=False, buildProcessorJars=True):
"""(re)generate Eclipse, NetBeans and Intellij project configurations"""
eclipseinit(args, refreshOnly=refreshOnly, buildProcessorJars=buildProcessorJars)
netbeansinit(args, refreshOnly=refreshOnly, buildProcessorJars=buildProcessorJars)
intellijinit(args, refreshOnly=refreshOnly)
if not refreshOnly:
fsckprojects([])
def fsckprojects(args):
"""find directories corresponding to deleted Java projects and delete them"""
for suite in suites(True):
projectDirs = [p.dir for p in suite.projects]
for dirpath, dirnames, files in os.walk(suite.dir):
if dirpath == suite.dir:
# no point in traversing .hg or lib/
dirnames[:] = [d for d in dirnames if d not in ['.hg', 'lib']]
elif dirpath in projectDirs:
# don't traverse subdirs of an existing project in this suite
dirnames[:] = []
else:
projectConfigFiles = frozenset(['.classpath', 'nbproject'])
indicators = projectConfigFiles.intersection(files)
if len(indicators) != 0:
if not sys.stdout.isatty() or ask_yes_no(dirpath + ' looks like a removed project -- delete it', 'n'):
shutil.rmtree(dirpath)
log('Deleted ' + dirpath)
def javadoc(args, parser=None, docDir='javadoc', includeDeps=True, stdDoclet=True):
"""generate javadoc for some/all Java projects"""
parser = ArgumentParser(prog='mx javadoc') if parser is None else parser
parser.add_argument('-d', '--base', action='store', help='base directory for output')
parser.add_argument('--unified', action='store_true', help='put javadoc in a single directory instead of one per project')
parser.add_argument('--force', action='store_true', help='(re)generate javadoc even if package-list file exists')
parser.add_argument('--projects', action='store', help='comma separated projects to process (omit to process all projects)')
parser.add_argument('--Wapi', action='store_true', dest='warnAPI', help='show warnings about using internal APIs')
parser.add_argument('--argfile', action='store', help='name of file containing extra javadoc options')
parser.add_argument('--arg', action='append', dest='extra_args', help='extra Javadoc arguments (e.g. --arg @-use)', metavar='@<arg>', default=[])
parser.add_argument('-m', '--memory', action='store', help='-Xmx value to pass to underlying JVM')
parser.add_argument('--packages', action='store', help='comma separated packages to process (omit to process all packages)')
parser.add_argument('--exclude-packages', action='store', help='comma separated packages to exclude')
args = parser.parse_args(args)
# build list of projects to be processed
if args.projects is not None:
candidates = [project(name) for name in args.projects.split(',')]
else:
candidates = projects_opt_limit_to_suites()
# optionally restrict packages within a project
packages = []
if args.packages is not None:
packages = [name for name in args.packages.split(',')]
exclude_packages = []
if args.exclude_packages is not None:
exclude_packages = [name for name in args.exclude_packages.split(',')]
def outDir(p):
if args.base is None:
return join(p.dir, docDir)
return join(args.base, p.name, docDir)
def check_package_list(p):
return not exists(join(outDir(p), 'package-list'))
def assess_candidate(p, projects):
if p in projects:
return False
if args.force or args.unified or check_package_list(p):
projects.append(p)
return True
return False
projects = []
for p in candidates:
if not p.native:
if includeDeps:
deps = p.all_deps([], includeLibs=False, includeSelf=False)
for d in deps:
assess_candidate(d, projects)
if not assess_candidate(p, projects):
logv('[package-list file exists - skipping {0}]'.format(p.name))
def find_packages(sourceDirs, pkgs=None):
if pkgs is None:
pkgs = set()
for sourceDir in sourceDirs:
for root, _, files in os.walk(sourceDir):
if len([name for name in files if name.endswith('.java')]) != 0:
pkg = root[len(sourceDir) + 1:].replace(os.sep, '.')
if len(packages) == 0 or pkg in packages:
if len(exclude_packages) == 0 or not pkg in exclude_packages:
pkgs.add(pkg)
return pkgs
extraArgs = [a.lstrip('@') for a in args.extra_args]
if args.argfile is not None:
extraArgs += ['@' + args.argfile]
memory = '2g'
if args.memory is not None:
memory = args.memory
memory = '-J-Xmx' + memory
if not args.unified:
for p in projects:
# The project must be built to ensure javadoc can find class files for all referenced classes
build(['--no-native', '--projects', p.name])
pkgs = find_packages(p.source_dirs(), set())
deps = p.all_deps([], includeLibs=False, includeSelf=False)
links = ['-link', 'http://docs.oracle.com/javase/' + str(p.javaCompliance.value) + '/docs/api/']
out = outDir(p)
for d in deps:
depOut = outDir(d)
links.append('-link')
links.append(os.path.relpath(depOut, out))
cp = classpath(p.name, includeSelf=True)
sp = os.pathsep.join(p.source_dirs())
overviewFile = join(p.dir, 'overview.html')
delOverviewFile = False
if not exists(overviewFile):
with open(overviewFile, 'w') as fp:
print >> fp, '<html><body>Documentation for the <code>' + p.name + '</code> project.</body></html>'
delOverviewFile = True
nowarnAPI = []
if not args.warnAPI:
nowarnAPI.append('-XDignore.symbol.file')
# windowTitle onloy applies to the standard doclet processor
windowTitle = []
if stdDoclet:
windowTitle = ['-windowtitle', p.name + ' javadoc']
try:
log('Generating {2} for {0} in {1}'.format(p.name, out, docDir))
projectJava = java(p.javaCompliance)
# Once https://bugs.openjdk.java.net/browse/JDK-8041628 is fixed,
# this should be reverted to:
# javadocExe = java().javadoc
javadocExe = projectJava.javadoc
run([javadocExe, memory,
'-XDignore.symbol.file',
'-classpath', cp,
'-quiet',
'-d', out,
'-overview', overviewFile,
'-sourcepath', sp,
'-source', str(projectJava.javaCompliance),
'-bootclasspath', projectJava.bootclasspath(),
'-extdirs', projectJava.extdirs()] +
([] if projectJava.javaCompliance < JavaCompliance('1.8') else ['-Xdoclint:none']) +
links +
extraArgs +
nowarnAPI +
windowTitle +
list(pkgs))
log('Generated {2} for {0} in {1}'.format(p.name, out, docDir))
finally:
if delOverviewFile:
os.remove(overviewFile)
else:
# The projects must be built to ensure javadoc can find class files for all referenced classes
build(['--no-native'])
pkgs = set()
sp = []
names = []
for p in projects:
find_packages(p.source_dirs(), pkgs)
sp += p.source_dirs()
names.append(p.name)
links = ['-link', 'http://docs.oracle.com/javase/' + str(java().javaCompliance.value) + '/docs/api/']
out = join(_primary_suite.dir, docDir)
if args.base is not None:
out = join(args.base, docDir)
cp = classpath()
sp = os.pathsep.join(sp)
nowarnAPI = []
if not args.warnAPI:
nowarnAPI.append('-XDignore.symbol.file')
log('Generating {2} for {0} in {1}'.format(', '.join(names), out, docDir))
run([java().javadoc, memory,
'-classpath', cp,
'-quiet',
'-d', out,
'-sourcepath', sp] +
([] if java().javaCompliance < JavaCompliance('1.8') else ['-Xdoclint:none']) +
links +
extraArgs +
nowarnAPI +
list(pkgs))
log('Generated {2} for {0} in {1}'.format(', '.join(names), out, docDir))
def site(args):
"""creates a website containing javadoc and the project dependency graph"""
parser = ArgumentParser(prog='site')
parser.add_argument('-d', '--base', action='store', help='directory for generated site', required=True, metavar='<dir>')
parser.add_argument('--tmp', action='store', help='directory to use for intermediate results', metavar='<dir>')
parser.add_argument('--name', action='store', help='name of overall documentation', required=True, metavar='<name>')
parser.add_argument('--overview', action='store', help='path to the overview content for overall documentation', required=True, metavar='<path>')
parser.add_argument('--projects', action='store', help='comma separated projects to process (omit to process all projects)')
parser.add_argument('--jd', action='append', help='extra Javadoc arguments (e.g. --jd @-use)', metavar='@<arg>', default=[])
parser.add_argument('--exclude-packages', action='store', help='comma separated packages to exclude', metavar='<pkgs>')
parser.add_argument('--dot-output-base', action='store', help='base file name (relative to <dir>/all) for project dependency graph .svg and .jpg files generated by dot (omit to disable dot generation)', metavar='<path>')
parser.add_argument('--title', action='store', help='value used for -windowtitle and -doctitle javadoc args for overall documentation (default: "<name>")', metavar='<title>')
args = parser.parse_args(args)
args.base = os.path.abspath(args.base)
tmpbase = args.tmp if args.tmp else tempfile.mkdtemp(prefix=basename(args.base) + '.', dir=dirname(args.base))
unified = join(tmpbase, 'all')
exclude_packages_arg = []
if args.exclude_packages is not None:
exclude_packages_arg = ['--exclude-packages', args.exclude_packages]
projects = sorted_deps()
projects_arg = []
if args.projects is not None:
projects_arg = ['--projects', args.projects]
projects = [project(name) for name in args.projects.split(',')]
extra_javadoc_args = []
for a in args.jd:
extra_javadoc_args.append('--arg')
extra_javadoc_args.append('@' + a)
try:
# Create javadoc for each project
javadoc(['--base', tmpbase] + exclude_packages_arg + projects_arg + extra_javadoc_args)
# Create unified javadoc for all projects
with open(args.overview) as fp:
content = fp.read()
idx = content.rfind('</body>')
if idx != -1:
args.overview = join(tmpbase, 'overview_with_projects.html')
with open(args.overview, 'w') as fp2:
print >> fp2, content[0:idx]
print >> fp2, """<div class="contentContainer">
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Projects table">
<caption><span>Projects</span><span class="tabEnd"> </span></caption>
<tr><th class="colFirst" scope="col">Project</th><th class="colLast" scope="col"> </th></tr>
<tbody>"""
color = 'row'
for p in projects:
print >> fp2, '<tr class="{1}Color"><td class="colFirst"><a href="../{0}/javadoc/index.html",target = "_top">{0}</a></td><td class="colLast"> </td></tr>'.format(p.name, color)
color = 'row' if color == 'alt' else 'alt'
print >> fp2, '</tbody></table></div>'
print >> fp2, content[idx:]
title = args.title if args.title is not None else args.name
javadoc(['--base', tmpbase,
'--unified',
'--arg', '@-windowtitle', '--arg', '@' + title,
'--arg', '@-doctitle', '--arg', '@' + title,
'--arg', '@-overview', '--arg', '@' + args.overview] + exclude_packages_arg + projects_arg + extra_javadoc_args)
if exists(unified):
shutil.rmtree(unified)
os.rename(join(tmpbase, 'javadoc'), unified)
# Generate dependency graph with Graphviz
if args.dot_output_base is not None:
dotErr = None
try:
if not 'version' in subprocess.check_output(['dot', '-V'], stderr=subprocess.STDOUT):
dotErr = 'dot -V does not print a string containing "version"'
except subprocess.CalledProcessError as e:
dotErr = 'error calling "dot -V": {}'.format(e)
except OSError as e:
dotErr = 'error calling "dot -V": {}'.format(e)
if dotErr != None:
abort('cannot generate dependency graph: ' + dotErr)
dot = join(tmpbase, 'all', str(args.dot_output_base) + '.dot')
svg = join(tmpbase, 'all', str(args.dot_output_base) + '.svg')
jpg = join(tmpbase, 'all', str(args.dot_output_base) + '.jpg')
html = join(tmpbase, 'all', str(args.dot_output_base) + '.html')
with open(dot, 'w') as fp:
dim = len(projects)
print >> fp, 'digraph projects {'
print >> fp, 'rankdir=BT;'
print >> fp, 'size = "' + str(dim) + ',' + str(dim) + '";'
print >> fp, 'node [shape=rect, fontcolor="blue"];'
# print >> fp, 'edge [color="green"];'
for p in projects:
print >> fp, '"' + p.name + '" [URL = "../' + p.name + '/javadoc/index.html", target = "_top"]'
for dep in p.canonical_deps():
if dep in [proj.name for proj in projects]:
print >> fp, '"' + p.name + '" -> "' + dep + '"'
depths = dict()
for p in projects:
d = p.max_depth()
depths.setdefault(d, list()).append(p.name)
print >> fp, '}'
run(['dot', '-Tsvg', '-o' + svg, '-Tjpg', '-o' + jpg, dot])
# Post-process generated SVG to remove title elements which most browsers
# render as redundant (and annoying) tooltips.
with open(svg, 'r') as fp:
content = fp.read()
content = re.sub('<title>.*</title>', '', content)
content = re.sub('xlink:title="[^"]*"', '', content)
with open(svg, 'w') as fp:
fp.write(content)
# Create HTML that embeds the svg file in an <object> frame
with open(html, 'w') as fp:
print >> fp, '<html><body><object data="{}.svg" type="image/svg+xml"></object></body></html>'.format(args.dot_output_base)
if exists(args.base):
shutil.rmtree(args.base)
if args.tmp:
shutil.copytree(tmpbase, args.base)
else:
shutil.move(tmpbase, args.base)
print 'Created website - root is ' + join(args.base, 'all', 'index.html')
finally:
if not args.tmp and exists(tmpbase):
shutil.rmtree(tmpbase)
def _kwArg(kwargs):
if len(kwargs) > 0:
return kwargs.pop(0)
return None
def findclass(args, logToConsole=True, matcher=lambda string, classname: string in classname):
"""find all classes matching a given substring"""
matches = []
for entry, filename in classpath_walk(includeBootClasspath=True):
if filename.endswith('.class'):
if isinstance(entry, zipfile.ZipFile):
classname = filename.replace('/', '.')
else:
classname = filename.replace(os.sep, '.')
classname = classname[:-len('.class')]
for a in args:
if matcher(a, classname):
matches.append(classname)
if logToConsole:
log(classname)
return matches
def select_items(items, descriptions=None, allowMultiple=True):
"""
Presents a command line interface for selecting one or more (if allowMultiple is true) items.
"""
if len(items) <= 1:
return items
else:
if allowMultiple:
log('[0] <all>')
for i in range(0, len(items)):
if descriptions is None:
log('[{0}] {1}'.format(i + 1, items[i]))
else:
assert len(items) == len(descriptions)
wrapper = textwrap.TextWrapper(subsequent_indent=' ')
log('\n'.join(wrapper.wrap('[{0}] {1} - {2}'.format(i + 1, items[i], descriptions[i]))))
while True:
if allowMultiple:
s = raw_input('Enter number(s) of selection (separate multiple choices with spaces): ').split()
else:
s = [raw_input('Enter number of selection: ')]
try:
s = [int(x) for x in s]
except:
log('Selection contains non-numeric characters: "' + ' '.join(s) + '"')
continue
if allowMultiple and 0 in s:
return items
indexes = []
for n in s:
if n not in range(1, len(items) + 1):
log('Invalid selection: ' + str(n))
continue
else:
indexes.append(n - 1)
if allowMultiple:
return [items[i] for i in indexes]
if len(indexes) == 1:
return items[indexes[0]]
return None
def exportlibs(args):
"""export libraries to an archive file"""
parser = ArgumentParser(prog='exportlibs')
parser.add_argument('-b', '--base', action='store', help='base name of archive (default: libs)', default='libs', metavar='<path>')
parser.add_argument('-a', '--include-all', action='store_true', help="include all defined libaries")
parser.add_argument('--arc', action='store', choices=['tgz', 'tbz2', 'tar', 'zip'], default='tgz', help='the type of the archive to create')
parser.add_argument('--no-sha1', action='store_false', dest='sha1', help='do not create SHA1 signature of archive')
parser.add_argument('--no-md5', action='store_false', dest='md5', help='do not create MD5 signature of archive')
parser.add_argument('--include-system-libs', action='store_true', help='include system libraries (i.e., those not downloaded from URLs)')
parser.add_argument('extras', nargs=REMAINDER, help='extra files and directories to add to archive', metavar='files...')
args = parser.parse_args(args)
def createArchive(addMethod):
entries = {}
def add(path, arcname):
apath = os.path.abspath(path)
if not entries.has_key(arcname):
entries[arcname] = apath
logv('[adding ' + path + ']')
addMethod(path, arcname=arcname)
elif entries[arcname] != apath:
logv('[warning: ' + apath + ' collides with ' + entries[arcname] + ' as ' + arcname + ']')
else:
logv('[already added ' + path + ']')
libsToExport = set()
if args.include_all:
for lib in _libs.itervalues():
libsToExport.add(lib)
else:
def isValidLibrary(dep):
if dep in _libs.iterkeys():
lib = _libs[dep]
if len(lib.urls) != 0 or args.include_system_libs:
return lib
return None
# iterate over all project dependencies and find used libraries
for p in _projects.itervalues():
for dep in p.deps:
r = isValidLibrary(dep)
if r:
libsToExport.add(r)
# a library can have other libraries as dependency
size = 0
while size != len(libsToExport):
size = len(libsToExport)
for lib in libsToExport.copy():
for dep in lib.deps:
r = isValidLibrary(dep)
if r:
libsToExport.add(r)
for lib in libsToExport:
add(lib.get_path(resolve=True), lib.path)
if lib.sha1:
add(lib.get_path(resolve=True) + ".sha1", lib.path + ".sha1")
if lib.sourcePath:
add(lib.get_source_path(resolve=True), lib.sourcePath)
if lib.sourceSha1:
add(lib.get_source_path(resolve=True) + ".sha1", lib.sourcePath + ".sha1")
if args.extras:
for e in args.extras:
if os.path.isdir(e):
for root, _, filenames in os.walk(e):
for name in filenames:
f = join(root, name)
add(f, f)
else:
add(e, e)
if args.arc == 'zip':
path = args.base + '.zip'
with zipfile.ZipFile(path, 'w') as zf:
createArchive(zf.write)
else:
path = args.base + '.tar'
mode = 'w'
if args.arc != 'tar':
sfx = args.arc[1:]
mode = mode + ':' + sfx
path = path + '.' + sfx
with tarfile.open(path, mode) as tar:
createArchive(tar.add)
log('created ' + path)
def digest(enabled, path, factory, suffix):
if enabled:
d = factory()
with open(path, 'rb') as f:
while True:
buf = f.read(4096)
if not buf:
break
d.update(buf)
with open(path + '.' + suffix, 'w') as fp:
print >> fp, d.hexdigest()
log('created ' + path + '.' + suffix)
digest(args.sha1, path, hashlib.sha1, 'sha1')
digest(args.md5, path, hashlib.md5, 'md5')
def javap(args):
"""disassemble classes matching given pattern with javap"""
javapExe = java().javap
if not exists(javapExe):
abort('The javap executable does not exists: ' + javapExe)
else:
candidates = findclass(args, logToConsole=False)
if len(candidates) == 0:
log('no matches')
selection = select_items(candidates)
run([javapExe, '-private', '-verbose', '-classpath', classpath()] + selection)
def show_projects(args):
"""show all projects"""
for s in suites():
if len(s.projects) != 0:
log(join(s.mxDir, 'suite*.py'))
for p in s.projects:
log('\t' + p.name)
def show_suites(args):
"""show all suites"""
def _show_section(name, section):
if len(section) != 0:
log(' ' + name + ':')
for e in section:
log(' ' + e.name)
for s in suites():
log(join(s.mxDir, 'suite*.py'))
_show_section('libraries', s.libs)
_show_section('jrelibraries', s.jreLibs)
_show_section('projects', s.projects)
_show_section('distributions', s.dists)
def ask_yes_no(question, default=None):
""""""
assert not default or default == 'y' or default == 'n'
if not sys.stdout.isatty():
if default:
return default
else:
abort("Can not answer '" + question + "?' if stdout is not a tty")
questionMark = '? [yn]: '
if default:
questionMark = questionMark.replace(default, default.upper())
answer = raw_input(question + questionMark) or default
while not answer:
answer = raw_input(question + questionMark)
return answer.lower().startswith('y')
def add_argument(*args, **kwargs):
"""
Define how a single command-line argument.
"""
assert _argParser is not None
_argParser.add_argument(*args, **kwargs)
def update_commands(suite, new_commands):
for key, value in new_commands.iteritems():
if _commands.has_key(key):
warn("redefining command '" + key + "' in suite " + suite.name)
_commands[key] = value
def warn(msg):
if _warn:
print 'WARNING: ' + msg
# Table of commands in alphabetical order.
# Keys are command names, value are lists: [<function>, <usage msg>, <format args to doc string of function>...]
# If any of the format args are instances of Callable, then they are called with an 'env' are before being
# used in the call to str.format().
# Suite extensions should not update this table directly, but use update_commands
_commands = {
'about': [about, ''],
'build': [build, '[options]'],
'checkstyle': [checkstyle, ''],
'canonicalizeprojects': [canonicalizeprojects, ''],
'clean': [clean, ''],
'eclipseinit': [eclipseinit, ''],
'eclipseformat': [eclipseformat, ''],
'exportlibs': [exportlibs, ''],
'findclass': [findclass, ''],
'fsckprojects': [fsckprojects, ''],
'help': [help_, '[command]'],
'ideclean': [ideclean, ''],
'ideinit': [ideinit, ''],
'intellijinit': [intellijinit, ''],
'archive': [_archive, '[options]'],
'projectgraph': [projectgraph, ''],
'pylint': [pylint, ''],
'javap': [javap, '<class name patterns>'],
'javadoc': [javadoc, '[options]'],
'site': [site, '[options]'],
'netbeansinit': [netbeansinit, ''],
'suites': [show_suites, ''],
'projects': [show_projects, ''],
}
_argParser = ArgParser()
def _suitename(mxDir):
base = os.path.basename(mxDir)
parts = base.split('.')
# temporary workaround until mx.graal exists
if len(parts) == 1:
return 'graal'
else:
return parts[1]
def _is_suite_dir(d, mxDirName=None):
"""
Checks if d contains a suite.
If mxDirName is None, matches any suite name, otherwise checks for exactly that suite.
"""
if os.path.isdir(d):
for f in os.listdir(d):
if (mxDirName == None and (f == 'mx' or fnmatch.fnmatch(f, 'mx.*'))) or f == mxDirName:
mxDir = join(d, f)
if exists(mxDir) and isdir(mxDir) and (exists(join(mxDir, 'suite.py'))):
return mxDir
def _check_primary_suite():
if _primary_suite is None:
abort('no primary suite found')
else:
return _primary_suite
def _findPrimarySuiteMxDirFrom(d):
""" search for a suite directory upwards from 'd' """
while d:
mxDir = _is_suite_dir(d)
if mxDir is not None:
return mxDir
parent = dirname(d)
if d == parent:
return None
d = parent
return None
def _findPrimarySuiteMxDir():
# check for explicit setting
if _primary_suite_path is not None:
mxDir = _is_suite_dir(_primary_suite_path)
if mxDir is not None:
return mxDir
else:
abort(_primary_suite_path + ' does not contain an mx suite')
# try current working directory first
mxDir = _findPrimarySuiteMxDirFrom(os.getcwd())
if mxDir is not None:
return mxDir
# backwards compatibility: search from path of this file
return _findPrimarySuiteMxDirFrom(dirname(__file__))
def main():
primarySuiteMxDir = _findPrimarySuiteMxDir()
if primarySuiteMxDir:
global _primary_suite
_primary_suite = _loadSuite(primarySuiteMxDir, True)
else:
abort('no primary suite found')
opts, commandAndArgs = _argParser._parse_cmd_line()
global _opts, _java_homes
_opts = opts
defaultJdk = JavaConfig(opts.java_home, opts.java_dbg_port)
_java_homes = [defaultJdk]
if opts.extra_java_homes:
for java_home in opts.extra_java_homes.split(os.pathsep):
extraJdk = JavaConfig(java_home, opts.java_dbg_port)
if extraJdk > defaultJdk:
abort('Secondary JDK ' + extraJdk.jdk + ' has higher compliance level than default JDK ' + defaultJdk.jdk)
_java_homes.append(extraJdk)
for s in suites():
s._post_init(opts)
if len(commandAndArgs) == 0:
_argParser.print_help()
return
command = commandAndArgs[0]
command_args = commandAndArgs[1:]
if not _commands.has_key(command):
hits = [c for c in _commands.iterkeys() if c.startswith(command)]
if len(hits) == 1:
command = hits[0]
elif len(hits) == 0:
abort('mx: unknown command \'{0}\'\n{1}use "mx help" for more options'.format(command, _format_commands()))
else:
abort('mx: command \'{0}\' is ambiguous\n {1}'.format(command, ' '.join(hits)))
c, _ = _commands[command][:2]
def term_handler(signum, frame):
abort(1)
signal.signal(signal.SIGTERM, term_handler)
def quit_handler(signum, frame):
_send_sigquit()
if get_os() != 'windows':
signal.signal(signal.SIGQUIT, quit_handler)
try:
if opts.timeout != 0:
def alarm_handler(signum, frame):
abort('Command timed out after ' + str(opts.timeout) + ' seconds: ' + ' '.join(commandAndArgs))
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(opts.timeout)
retcode = c(command_args)
if retcode is not None and retcode != 0:
abort(retcode)
except KeyboardInterrupt:
# no need to show the stack trace when the user presses CTRL-C
abort(1)
version = VersionSpec("1.0")
currentUmask = None
if __name__ == '__main__':
# rename this module as 'mx' so it is not imported twice by the commands.py modules
sys.modules['mx'] = sys.modules.pop('__main__')
# Capture the current umask since there's no way to query it without mutating it.
currentUmask = os.umask(0)
os.umask(currentUmask)
main()
|
smarr/graal
|
mxtool/mx.py
|
Python
|
gpl-2.0
| 219,099
|
[
"VisIt"
] |
a5c97a1c7c86f33a82043994bae6f379585ca5e01fe14d330e87c151eda1f716
|
# Placeholder because log moved
# Remove this in version 1.0
import warnings
with warnings.catch_warnings():
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(("log has moved to MDAnalysis.lib.log "
"and will be removed from here in release 1.0"),
DeprecationWarning)
from ..lib.log import *
|
alejob/mdanalysis
|
package/MDAnalysis/core/log.py
|
Python
|
gpl-2.0
| 355
|
[
"MDAnalysis"
] |
ef43bc53ec6b62a11e3e9caa05be7cddd01fe1280936dfd8664459d40ff12bb0
|
from locust import TaskSet, task, HttpLocust
class UserBehavior(TaskSet):
def on_start(self):
self.index = 0
@task
def test_visit(self):
url = self.locust.share_data[self.index]
print('visit url: %s' % url)
self.index = (self.index + 1) % len(self.locust.share_data)
self.client.get(url)
class WebsiteUser(HttpLocust):
host = 'http://debugtalk.com'
task_set = UserBehavior
share_data = ['url1', 'url2', 'url3', 'url4', 'url5']
min_wait = 1000
max_wait = 3000
|
WZQ1397/automatic-repo
|
project/locust/locustcircle_url_test.py
|
Python
|
lgpl-3.0
| 530
|
[
"VisIt"
] |
41c40ec0953345133d80a1bae92a63647d144a4ad457b23b627a9d1486971327
|
# -*- coding: utf-8 -*-
#############################################################################
# SRWLIB Example: Virtual Beamline: a set of utilities and functions allowing to simulate
# operation of an SR Beamline.
# The standard use of this script is from command line, with some optional arguments,
# e.g. for calculation (with default parameter values) of:
# UR Spectrum Through a Slit (Flux within a default aperture):
# python SRWLIB_VirtBL_*.py --sm
# Single-Electron UR Spectrum (Flux per Unit Surface):
# python SRWLIB_VirtBL_*.py --ss
# UR Power Density (at the first optical element):
# python SRWLIB_VirtBL_*.py --pw
# Input Single-Electron UR Intensity Distribution (at the first optical element):
# python SRWLIB_VirtBL_*.py --si
# Single-Electron Wavefront Propagation:
# python SRWLIB_VirtBL_*.py --ws
# Multi-Electron Wavefront Propagation:
# Sequential Mode:
# python SRWLIB_VirtBL_*.py --wm
# Parallel Mode (using MPI / mpi4py), e.g.:
# mpiexec -n 6 python SRWLIB_VirtBL_*.py --wm
# For changing parameters of all these calculaitons from the default valuse, see the definition
# of all options in the list at the end of the script.
# v 0.04
#############################################################################
from __future__ import print_function #Python 2.7 compatibility
from srwl_bl import *
try:
import cPickle as pickle
except:
import pickle
#import time
#*********************************Setting Up Optical Elements and Propagation Parameters
def set_optics(_v):
"""This function describes optical layout of the Coherent Hoard X-ray (CHX) beamline of NSLS-II.
Such function has to be written for every beamline to be simulated; it is specific to a particular beamline.
:param _v: structure containing all parameters allowed to be varied for that particular beamline
"""
#---Nominal Positions of Optical Elements [m] (with respect to straight section center)
zS0 = 33.1798 #White Beam Slits (S0)
zHFM = 34.2608 #Horizontally-Focusing Mirror M1 (HFM)
zS1 = 35.6678 #Pink Beam Slits (S1)
zDCM = 36.4488 #Horizontall-Deflecting Double-Crystal Monochromator (DCM)
zBPM1 = 38.6904 #BPM-1
zBPM2 = 50.3872 #BPM-2
zSSA = 50.6572 #Secondary Source Aperture (SSA)
zEA = 61.9611 #Energy Absorber (EA)
zDBPM1 = 62.272 #Diamond BPM-1
zKBFV = 62.663 #High-Flux Vertically-Focusing KB Mirror M2
zKBFH = 63.0 #High-Flux Horizontally-Focusing KB Mirror M3
zSF = 63.3 #High-Flux Sample position (focus of KBF)
zDBPM2 = 65.9178 #Diamond BPM-2
zKBRV = 66.113 #High-Resolution Vertically-Focusing KB Mirror M4
zKBRH = 66.220 #High-Resolution Horizontally-Focusing KB Mirror M5
zSR = 63.3 #High-Resolution Sample position (focus of KBR)
#zD = 65 #Detector position (?)
#---Instantiation of the Optical Elements
arElNamesAllOpt = [
['S0', 'S0_HFM', 'HFM', 'HFM_S1', 'S1', 'S1_DCM', 'DCM', 'DCM_SSA', 'SSA', 'SSA_KBFV', 'KBFV', 'KBFV_KBFH', 'KBFH', 'KBFH_zSF'], #1
['S0', 'S0_HFM', 'HFM', 'HFM_S1', 'S1', 'S1_DCM', 'DCM', 'DCM_SSA', 'SSA', 'SSA_KBRV', 'KBRV', 'KBRV_KBRH', 'KBRH', 'KBRH_zSR'], #2
['S0', 'S0_HFM', 'HFM', 'HFM_S1', 'S1', 'S1_DCM', 'DCM', 'DCM_SSA', 'SSA', 'SSA_DBPM2', 'DBPM2_KBRV', 'KBRV', 'KBRV_KBRH', 'KBRH', 'KBRH_zSR'], #3
['S0', 'S0_HFM', 'HFM', 'HFM_S1', 'S1', 'S1_SSA', 'SSA', 'SSA_DBPM2', 'DBPM2_KBRV', 'KBRV', 'KBRV_KBRH', 'KBRH', 'KBRH_zSR'] #4
]
arElNamesAll = arElNamesAllOpt[int(round(_v.op_BL - 1))]
'''
#Treat beamline sub-cases / alternative configurations
if(len(_v.op_fin) > 0):
if(_v.op_fin not in arElNamesAll): raise Exception('Optical element with the name specified in the "op_fin" option is not present in this beamline')
#Could be made more general
'''
arElNames = [];
for i in range(len(arElNamesAll)):
arElNames.append(arElNamesAll[i])
if(len(_v.op_fin) > 0):
if(arElNamesAll[i] == _v.op_fin): break
el = []; pp = [] #lists of SRW optical element objects and their corresponding propagation parameters
#S0 (primary slit)
if('S0' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_S0_dx, _v.op_S0_dy)); pp.append(_v.op_S0_pp)
#Drift S0 -> HFM
if('S0_HFM' in arElNames):
el.append(SRWLOptD(zHFM - zS0)); pp.append(_v.op_S0_HFM_pp)
#HFM (Height Profile Error)
if('HFM' in arElNames):
lenHFM = 0.95 #Length [m]
horApHFM = lenHFM*_v.op_HFM_ang #Projected dimensions
verApHFM = 5.e-03 #?
el.append(SRWLOptA('r', 'a', horApHFM, verApHFM)); pp.append(_v.op_HFMA_pp)
if(_v.op_HFM_f != 0.):
el.append(SRWLOptL(_Fx=_v.op_HFM_f)); pp.append(_v.op_HFML_pp)
#To treat Reflectivity (maybe by Planar Mirror?)
#elif(_v.op_HFM_r != 0.):
#Setup Cylindrical Mirror, take into account Reflectivity
#Height Profile Error
ifnHFM = os.path.join(_v.fdir, _v.op_HFM_ifn) if len(_v.op_HFM_ifn) > 0 else ''
if(len(ifnHFM) > 0):
#hProfDataHFM = srwl_uti_read_data_cols(ifnHFM, '\t', 0, 1)
hProfDataHFM = srwl_uti_read_data_cols(ifnHFM, '\t')
opHFM = srwl_opt_setup_surf_height_2d(hProfDataHFM, 'x', _ang=_v.op_HFM_ang, _amp_coef=_v.op_HFM_amp, _nx=1500, _ny=200)
ofnHFM = os.path.join(_v.fdir, _v.op_HFM_ofn) if len(_v.op_HFM_ofn) > 0 else ''
if(len(ofnHFM) > 0):
pathDifHFM = opHFM.get_data(3, 3)
srwl_uti_save_intens_ascii(pathDifHFM, opHFM.mesh, ofnHFM, 0, ['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'], _arUnits=['', 'm', 'm', 'm'])
el.append(opHFM); pp.append(_v.op_HFMT_pp)
#Drift HFM -> S1
if('HFM_S1' in arElNames):
el.append(SRWLOptD(zS1 - zHFM + _v.op_S1_dz)); pp.append(_v.op_HFM_S1_pp)
#S1 slit
if('S1' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_S1_dx, _v.op_S1_dy)); pp.append(_v.op_S1_pp)
#Drift S1 -> DCM
if('S1_DCM' in arElNames):
el.append(SRWLOptD(zDCM - zS1 - _v.op_S1_dz)); pp.append(_v.op_S1_DCM_pp)
#Drift S1 -> SSA
if('S1_SSA' in arElNames):
el.append(SRWLOptD(zSSA - zS1 - _v.op_S1_dz + _v.op_SSA_dz)); pp.append(_v.op_S1_SSA_pp)
#Double-Crystal Monochromator
if('DCM' in arElNames):
tc = 1e-02 # [m] crystal thickness
angAs = 0.*pi/180. # [rad] asymmetry angle
hc = [1,1,1]
if(_v.op_DCM_r == '311'): hc = [3,1,1]
dc = srwl_uti_cryst_pl_sp(hc, 'Si')
#print('DCM Interplannar dist.:', dc)
psi = srwl_uti_cryst_pol_f(_v.op_DCM_e, hc, 'Si')
#print('DCM Fourier Components:', psi)
#---------------------- DCM Crystal #1
opCr1 = SRWLOptCryst(_d_sp=dc, _psi0r=psi[0], _psi0i=psi[1], _psi_hr=psi[2], _psi_hi=psi[3], _psi_hbr=psi[2], _psi_hbi=psi[3], _tc=tc, _ang_as=angAs, _ang_roll=1.5707963)
#Find appropriate orientation of the Crystal #1 and the Output Beam Frame (using a member-function in SRWLOptCryst):
orientDataCr1 = opCr1.find_orient(_en=_v.op_DCM_e, _ang_dif_pl=1.5707963) # Horizontally-deflecting
#Crystal #1 Orientation found:
orientCr1 = orientDataCr1[0]
tCr1 = orientCr1[0] #Tangential Vector to Crystal surface
sCr1 = orientCr1[1]
nCr1 = orientCr1[2] #Normal Vector to Crystal surface
# print('DCM Crystal #1 Orientation (original):')
# print(' t =', tCr1, 's =', orientCr1[1], 'n =', nCr1)
import uti_math
if(_v.op_DCM_ac1 != 0): #Small rotation of DCM Crystal #1:
rot = uti_math.trf_rotation([0,1,0], _v.op_DCM_ac1, [0,0,0])
tCr1 = uti_math.matr_prod(rot[0], tCr1)
sCr1 = uti_math.matr_prod(rot[0], sCr1)
nCr1 = uti_math.matr_prod(rot[0], nCr1)
#Set the Crystal #1 orientation:
opCr1.set_orient(nCr1[0], nCr1[1], nCr1[2], tCr1[0], tCr1[1])
#Orientation of the Outgoing Beam Frame being found:
orientCr1OutFr = orientDataCr1[1]
rxCr1 = orientCr1OutFr[0] #Horizontal Base Vector of the Output Beam Frame
ryCr1 = orientCr1OutFr[1] #Vertical Base Vector of the Output Beam Frame
rzCr1 = orientCr1OutFr[2] #Longitudinal Base Vector of the Output Beam Frame
# print('DCM Crystal #1 Outgoing Beam Frame:')
# print(' ex =', rxCr1, 'ey =', ryCr1, 'ez =', rzCr1)
#Incoming/Outgoing beam frame transformation matrix for the DCM Crystal #1
TCr1 = [rxCr1, ryCr1, rzCr1]
# print('Total transformation matrix after DCM Crystal #1:')
# uti_math.matr_print(TCr1)
#print(' ')
el.append(opCr1); pp.append(_v.op_DCMC1_pp)
#---------------------- DCM Crystal #2
opCr2 = SRWLOptCryst(_d_sp=dc, _psi0r=psi[0], _psi0i=psi[1], _psi_hr=psi[2], _psi_hi=psi[3], _psi_hbr=psi[2], _psi_hbi=psi[3], _tc=tc, _ang_as=angAs, _ang_roll=-1.5707963)
#Find appropriate orientation of the Crystal #2 and the Output Beam Frame
orientDataCr2 = opCr2.find_orient(_en=_v.op_DCM_e, _ang_dif_pl=-1.5707963)
#Crystal #2 Orientation found:
orientCr2 = orientDataCr2[0]
tCr2 = orientCr2[0] #Tangential Vector to Crystal surface
sCr2 = orientCr2[1]
nCr2 = orientCr2[2] #Normal Vector to Crystal surface
# print('Crystal #2 Orientation (original):')
# print(' t =', tCr2, 's =', sCr2, 'n =', nCr2)
if(_v.op_DCM_ac2 != 0): #Small rotation of DCM Crystal #2:
rot = uti_math.trf_rotation([0,1,0], _v.op_DCM_ac2, [0,0,0])
tCr2 = uti_math.matr_prod(rot[0], tCr2)
sCr2 = uti_math.matr_prod(rot[0], sCr2)
nCr2 = uti_math.matr_prod(rot[0], nCr2)
#Set the Crystal #2 orientation
opCr2.set_orient(nCr2[0], nCr2[1], nCr2[2], tCr2[0], tCr2[1])
#Orientation of the Outgoing Beam Frame being found:
orientCr2OutFr = orientDataCr2[1]
rxCr2 = orientCr2OutFr[0] #Horizontal Base Vector of the Output Beam Frame
ryCr2 = orientCr2OutFr[1] #Vertical Base Vector of the Output Beam Frame
rzCr2 = orientCr2OutFr[2] #Longitudinal Base Vector of the Output Beam Frame
# print('DCM Crystal #2 Outgoing Beam Frame:')
# print(' ex =', rxCr2, 'ey =', ryCr2, 'ez =',rzCr2)
#Incoming/Outgoing beam transformation matrix for the DCM Crystal #2
TCr2 = [rxCr2, ryCr2, rzCr2]
Ttot = uti_math.matr_prod(TCr2, TCr1)
# print('Total transformation matrix after DCM Crystal #2:')
uti_math.matr_print(Ttot)
#print(' ')
el.append(opCr2); pp.append(_v.op_DCMC2_pp)
#Drift DCM -> SSA
if('DCM_SSA' in arElNames):
el.append(SRWLOptD(zSSA - zDCM + _v.op_SSA_dz)); pp.append(_v.op_DCM_SSA_pp)
#SSA slit
if('SSA' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_SSA_dx, _v.op_SSA_dy)); pp.append(_v.op_SSA_pp)
#Drift SSA -> DBPM2
if('SSA_DBPM2' in arElNames):
el.append(SRWLOptD(zDBPM2 - zSSA - _v.op_SSA_dz + _v.op_DBPM2_dz)); pp.append(_v.op_SSA_DBPM2_pp)
###############To continue
## #Sample
## if('SMP' in arElNames):
## ifnSMP = os.path.join(v.fdir, v.op_SMP_ifn) if len(v.op_SMP_ifn) > 0 else ''
## if(len(ifnSMP) > 0):
## ifSMP = open(ifnSMP, 'rb')
## opSMP = pickle.load(ifSMP)
## ofnSMP = os.path.join(v.fdir, v.op_SMP_ofn) if len(v.op_SMP_ofn) > 0 else ''
## if(len(ofnSMP) > 0):
## pathDifSMP = opSMP.get_data(3, 3)
## srwl_uti_save_intens_ascii(pathDifSMP, opSMP.mesh, ofnSMP, 0, ['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'], _arUnits=['', 'm', 'm', 'm'])
## el.append(opSMP); pp.append(v.op_SMP_pp)
## ifSMP.close()
## #Drift Sample -> Detector
## if('SMP_D' in arElNames):
## el.append(SRWLOptD(zD - zSample + v.op_D_dz)); pp.append(v.op_SMP_D_pp)
pp.append(_v.op_fin_pp)
return SRWLOptC(el, pp)
#*********************************List of Parameters allowed to be varied
#---List of supported options / commands / parameters allowed to be varied for this Beamline (comment-out unnecessary):
varParam = [
#---Data Folder
['fdir', 's', os.path.join(os.getcwd(), 'data_AMX'), 'folder (directory) name for reading-in input and saving output data files'],
#---Electron Beam
['ebm_nm', 's', 'NSLS-II Low Beta ', 'standard electron beam name'],
['ebm_nms', 's', 'Day1', 'standard electron beam name suffix: e.g. can be Day1, Final'],
['ebm_i', 'f', 0.5, 'electron beam current [A]'],
#['ebeam_e', 'f', 3., 'electron beam avarage energy [GeV]'],
['ebm_de', 'f', 0., 'electron beam average energy deviation [GeV]'],
['ebm_x', 'f', 0., 'electron beam initial average horizontal position [m]'],
['ebm_y', 'f', 0., 'electron beam initial average vertical position [m]'],
['ebm_xp', 'f', 0., 'electron beam initial average horizontal angle [rad]'],
['ebm_yp', 'f', 0., 'electron beam initial average vertical angle [rad]'],
['ebm_z', 'f', 0., 'electron beam initial average longitudinal position [m]'],
['ebm_dr', 'f', 0., 'electron beam longitudinal drift [m] to be performed before a required calculation'],
['ebm_ens', 'f', -1, 'electron beam relative energy spread'],
['ebm_emx', 'f', -1, 'electron beam horizontal emittance [m]'],
['ebm_emy', 'f', -1, 'electron beam vertical emittance [m]'],
#---Undulator
['und_per', 'f', 0.021, 'undulator period [m]'],
['und_len', 'f', 1.5, 'undulator length [m]'],
['und_b', 'f', 0.88770981, 'undulator vertical peak magnetic field [T]'],
#['und_bx', 'f', 0., 'undulator horizontal peak magnetic field [T]'],
#['und_by', 'f', 1., 'undulator vertical peak magnetic field [T]'],
#['und_phx', 'f', 1.5708, 'undulator horizontal magnetic field phase [rad]'],
#['und_phy', 'f', 0., 'undulator vertical magnetic field phase [rad]'],
['und_sx', 'i', 1.0, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
#['und_sy', 'i', -1, 'undulator vertical magnetic field symmetry vs longitudinal position'],
['und_zc', 'f', 1.305, 'undulator center longitudinal position [m]'],
['und_mdir', 's', 'magn_meas', 'name of magnetic measurements sub-folder'],
['und_mfs', 's', 'ivu21_amx_sum.txt', 'name of magnetic measurements for different gaps summary file'],
#['und_g', 'f', 0., 'undulator gap [mm] (assumes availability of magnetic measurement or simulation data)'],
#---Calculation Types
#Electron Trajectory
['tr', '', '', 'calculate electron trajectory', 'store_true'],
['tr_cti', 'f', 0., 'initial time moment (c*t) for electron trajectory calculation [m]'],
['tr_ctf', 'f', 0., 'final time moment (c*t) for electron trajectory calculation [m]'],
['tr_np', 'f', 50000, 'number of points for trajectory calculation'],
['tr_mag', 'i', 1, 'magnetic field to be used for trajectory calculation: 1- approximate, 2- accurate'],
['tr_fn', 's', 'res_trj.dat', 'file name for saving calculated trajectory data'],
['tr_pl', 's', 'xxpyypz', 'plot the resulting trajectiry in graph(s): ""- dont plot, otherwise the string should list the trajectory components to plot'],
#Single-Electron Spectrum vs Photon Energy
['ss', '', '', 'calculate single-e spectrum vs photon energy', 'store_true'],
['ss_ei', 'f', 100., 'initial photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ef', 'f', 20000., 'final photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ne', 'i', 10000, 'number of points vs photon energy for single-e spectrum vs photon energy calculation'],
['ss_x', 'f', 0., 'horizontal position [m] for single-e spectrum vs photon energy calculation'],
['ss_y', 'f', 0., 'vertical position [m] for single-e spectrum vs photon energy calculation'],
['ss_meth', 'i', 1, 'method to use for single-e spectrum vs photon energy calculation: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['ss_prec', 'f', 0.01, 'relative precision for single-e spectrum vs photon energy calculation (nominal value is 0.01)'],
['ss_pol', 'i', 6, 'polarization component to extract after spectrum vs photon energy calculation: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['ss_mag', 'i', 1, 'magnetic field to be used for single-e spectrum vs photon energy calculation: 1- approximate, 2- accurate'],
['ss_fn', 's', 'res_spec_se.dat', 'file name for saving calculated single-e spectrum vs photon energy'],
['ss_pl', 's', 'e', 'plot the resulting single-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#Multi-Electron Spectrum vs Photon Energy (taking into account e-beam emittance, energy spread and collection aperture size)
['sm', '', '', 'calculate multi-e spectrum vs photon energy', 'store_true'],
['sm_ei', 'f', 100., 'initial photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ef', 'f', 20000., 'final photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ne', 'i', 10000, 'number of points vs photon energy for multi-e spectrum vs photon energy calculation'],
['sm_x', 'f', 0., 'horizontal center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_rx', 'f', 0.001, 'range of horizontal position / horizontal aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_nx', 'i', 1, 'number of points vs horizontal position for multi-e spectrum vs photon energy calculation'],
['sm_y', 'f', 0., 'vertical center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_ry', 'f', 0.001, 'range of vertical position / vertical aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_ny', 'i', 1, 'number of points vs vertical position for multi-e spectrum vs photon energy calculation'],
['sm_mag', 'i', 1, 'magnetic field to be used for calculation of multi-e spectrum spectrum or intensity distribution: 1- approximate, 2- accurate'],
['sm_hi', 'i', 1, 'initial UR spectral harmonic to be taken into accountfor multi-e spectrum vs photon energy calculation'],
['sm_hf', 'i', 15, 'final UR spectral harmonic to be taken into accountfor multi-e spectrum vs photon energy calculation'],
['sm_prl', 'f', 1., 'longitudinal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_pra', 'f', 1., 'azimuthal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_type', 'i', 1, 'calculate flux (=1) or flux per unit surface (=2)'],
['sm_pol', 'i', 6, 'polarization component to extract after calculation of multi-e flux or intensity: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['sm_fn', 's', 'res_spec_me.dat', 'file name for saving calculated milti-e spectrum vs photon energy'],
['sm_pl', 's', 'e', 'plot the resulting spectrum-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#to add options for the multi-e calculation from "accurate" magnetic field
#Power Density Distribution vs horizontal and vertical position
['pw', '', '', 'calculate SR power density distribution', 'store_true'],
['pw_x', 'f', 0., 'central horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_rx', 'f', 0.015, 'range of horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_nx', 'i', 100, 'number of points vs horizontal position for calculation of power density distribution'],
['pw_y', 'f', 0., 'central vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ry', 'f', 0.015, 'range of vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ny', 'i', 100, 'number of points vs vertical position for calculation of power density distribution'],
['pw_pr', 'f', 1., 'precision factor for calculation of power density distribution'],
['pw_meth', 'i', 1, 'power density computation method (1- "near field", 2- "far field")'],
['pw_zi', 'f', 0., 'initial longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_zf', 'f', 0., 'final longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_mag', 'i', 1, 'magnetic field to be used for power density calculation: 1- approximate, 2- accurate'],
['pw_fn', 's', 'res_pow.dat', 'file name for saving calculated power density distribution'],
['pw_pl', 's', 'xy', 'plot the resulting power density distribution in a graph: ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
#Single-Electron Intensity distribution vs horizontal and vertical position
['si', '', '', 'calculate single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position', 'store_true'],
#Single-Electron Wavefront Propagation
['ws', '', '', 'calculate single-electron (/ fully coherent) wavefront propagation', 'store_true'],
#Multi-Electron (partially-coherent) Wavefront Propagation
['wm', '', '', 'calculate multi-electron (/ partially coherent) wavefront propagation', 'store_true'],
['w_e', 'f', 9000., 'photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ef', 'f', -1., 'final photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ne', 'i', 1, 'number of points vs photon energy for calculation of intensity distribution'],
['w_x', 'f', 0., 'central horizontal position [m] for calculation of intensity distribution'],
['w_rx', 'f', 2.4e-03, 'range of horizontal position [m] for calculation of intensity distribution'],
['w_nx', 'i', 100, 'number of points vs horizontal position for calculation of intensity distribution'],
['w_y', 'f', 0., 'central vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ry', 'f', 2.0e-03, 'range of vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ny', 'i', 100, 'number of points vs vertical position for calculation of intensity distribution'],
['w_smpf', 'f', 1., 'sampling factor for calculation of intensity distribution vs horizontal and vertical position'],
['w_meth', 'i', 1, 'method to use for calculation of intensity distribution vs horizontal and vertical position'],
['w_prec', 'f', 0.01, 'relative precision for calculation of intensity distribution vs horizontal and vertical position'],
['w_mag', 'i', 1, 'magnetic field to be used for calculation of intensity distribution vs horizontal and vertical position: 1- approximate, 2- accurate'],
['si_pol', 'i', 6, 'polarization component to extract after calculation of intensity distribution: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['si_type', 'i', 0, 'type of a characteristic to be extracted after calculation of intensity distribution: 0- Single-Electron Intensity, 1- Multi-Electron Intensity, 2- Single-Electron Flux, 3- Multi-Electron Flux, 4- Single-Electron Radiation Phase, 5- Re(E): Real part of Single-Electron Electric Field, 6- Im(E): Imaginary part of Single-Electron Electric Field, 7- Single-Electron Intensity, integrated over Time or Photon Energy'],
['si_fn', 's', 'res_int_se.dat', 'file name for saving calculated single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position'],
['ws_fni', 's', 'res_int_pr_se.dat', 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'],
['ws_pl', 's', 'xy', 'plot the propagated radiaiton intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['ws_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters (/ Intensity distribution): coordinate (0) or angular (1)'],
['si_pl', 's', 'xy', 'plot the input intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['wm_nm', 'i', 100000, 'number of macro-electrons (coherent wavefronts) for calculation of multi-electron wavefront propagation'],
['wm_na', 'i', 5, 'number of macro-electrons (coherent wavefronts) to average on each node at parallel (MPI-based) calculation of multi-electron wavefront propagation'],
['wm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons / coherent wavefronts) for intermediate intensity at multi-electron wavefront propagation calculation'],
['wm_ch', 'i', 0, 'type of a characteristic to be extracted after calculation of multi-electron wavefront propagation: #0- intensity (s0); 1- four Stokes components; 2- mutual intensity cut vs x; 3- mutual intensity cut vs y'],
['wm_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters: coordinate (0) or angular (1)'],
['wm_x0', 'f', 0, 'horizontal center position for mutual intensity cut calculation'],
['wm_y0', 'f', 0, 'vertical center position for mutual intensity cut calculation'],
['wm_ei', 'i', 0, 'integration over photon energy is required (1) or not (0); if the integration is required, the limits are taken from w_e, w_ef'],
['wm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['wm_fni', 's', 'res_int_pr_me.dat', 'file name for saving propagated multi-e intensity distribution vs horizontal and vertical position'],
#['ws_fn', 's', '', 'file name for saving single-e (/ fully coherent) wavefront data'],
#['wm_fn', 's', '', 'file name for saving multi-e (/ partially coherent) wavefront data'],
#to add options
['op_r', 'f', 33.1798, 'longitudinal position of the first optical element [m]'],
['op_fin', 's', 'S3_SMP', 'name of the final optical element wavefront has to be propagated through'],
#NOTE: the above option/variable names (fdir, ebm*, und*, ss*, sm*, pw*, is*, ws*, wm*) should be the same in all beamline scripts
#on the other hand, the beamline optics related options below (op*) are specific to a particular beamline (and can be differ from beamline to beamline).
#However, the default values of all the options/variables (above and below) can differ from beamline to beamline.
#---Beamline Optics
['op_BL', 'f', 1, 'beamline version/option number'],
['op_S0_dx', 'f', 2.375e-03, 'slit S0: horizontal size [m]'],
['op_S0_dy', 'f', 2.0e-03, 'slit S0: vertical size [m]'],
['op_HFM_f', 'f', 11.0893, 'mirror HFM: focal length [m] (effective if op_HFM_f != 0)'],
['op_HFM_r', 'f', 8.924e+03, 'mirror HFM: radius of curvature [m] (effective if op_HFM_r != 0 and op_HFM_f == 0)'],
['op_HFM_ang', 'f', 2.5e-03, 'mirror HFM: angle of incidence [rad]'],
['op_HFM_mat', 's', '', 'mirror HFM: coating material; possible options: Si, Cr, Rh, Pt'],
['op_HFM_ifn', 's', 'mir_metro/SRX_HFM_height_prof.dat', 'mirror HFM: input file name of height profile data'],
#['op_HFM_ifn', 's', '', 'mirror HFM: input file name of height profile data'],
['op_HFM_amp', 'f', 1., 'mirror HFM: amplification coefficient for height profile data'],
['op_HFM_ofn', 's', 'res_SRX_HFM_opt_path_dif.dat', 'mirror HCM: output file name of optical path difference data'],
['op_S1_dz', 'f', 0., 'S1: offset of longitudinal position [m]'],
['op_S1_dx', 'f', 2.375e-03, 'slit S1: horizontal size [m]'],
['op_S1_dy', 'f', 10.0e-03, 'slit S1: vertical size [m]'],
['op_DCM_e', 'f', 9000., 'DCM: central photon energy DCM is tuned to [eV]'],
['op_DCM_r', 's', '111', 'DCM: reflection type (can be either "111" or "311")'],
['op_DCM_ac1', 'f', 0., 'DCM: angular deviation of 1st crystal from exact Bragg angle [rad]'],
['op_DCM_ac2', 'f', 0., 'DCM: angular deviation of 2nd crystal from exact Bragg angle [rad]'],
['op_SSA_dz', 'f', 0., 'slit SSA: offset of longitudinal position [m]'],
['op_SSA_dx', 'f', 3.0e-03, 'slit SSA: horizontal size [m]'],
['op_SSA_dy', 'f', 3.0e-03, 'slit SSA: vertical size [m]'],
['op_DBPM2_dz', 'f', 0., 'slit DBPM2: offset of longitudinal position [m]'],
###############To continue
## ['op_SMP_dz', 'f', 0., 'sample: offset of longitudinal position [m]'],
## ['op_SMP_ifn', 's', 'CHX_SMP_CDI_001.pickle', 'sample: model file name (binary "dumped" SRW transmission object)'],
## ['op_SMP_ofn', 's', 'res_CHX_SMP_opt_path_dif.dat', 'sample: output file name of optical path difference data'],
## ['op_D_dz', 'f', 0., 'detector: offset of longitudinal position [m]'],
#to add options for different beamline cases, etc.
#Propagation Param.: [0][1][2][3][4] [5] [6] [7] [8] [9][10][11]
#['op_S0_pp', 'f', [0, 0, 1, 0, 0, 4.5, 5.0, 1.5, 2.5, 0, 0, 0], 'slit S0: propagation parameters'],
#['op_S0_pp', 'f', [0, 0, 1, 0, 0, 2.2, 6.0, 3.0, 15.0, 0, 0, 0], 'slit S0: propagation parameters'],
#['op_S0_pp', 'f', [0, 0, 1, 0, 0, 2.0, 15.0,1.5, 15.0,0, 0, 0], 'slit S0: propagation parameters'],
['op_S0_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'slit S0: propagation parameters'],
['op_S0_HFM_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S0 -> HFM: propagation parameters'],
['op_HFMA_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'mirror HCM: Aperture propagation parameters'],
['op_HFML_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'mirror HCM: Lens propagation parameters'],
['op_HFMT_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'mirror HCM: Transmission propagation parameters'],
['op_HFM_S1_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift HDM -> S1: propagation parameters'],
['op_S1_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'slit S1: propagation parameters'],
['op_S1_SSA_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S1 -> SSA: propagation parameters'],
['op_S1_DCM_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S1 -> DCM: propagation parameters'],
['op_DCMC1_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'DCM C1: propagation parameters'],
['op_DCMC2_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'DCM C2: propagation parameters'],
['op_DCM_SSA_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift DCM -> SSA: propagation parameters'],
['op_SSA_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'slit SSA: propagation parameters'],
['op_SSA_DBPM2_pp', 'f',[0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift SSA -> DBPM2: propagation parameters'],
###############To continue
## ['op_S3_SMP_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S3 -> sample: propagation parameters'],
## ['op_SMP_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'sample: propagation parameters'],
## ['op_SMP_D_pp', 'f', [0, 0, 1, 3, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'sample -> detector: propagation parameters'],
#['op_fin_pp', 'f', [0, 0, 1, 0, 1, 0.1, 5.0, 1.0, 1.5, 0, 0, 0], 'final post-propagation (resize) parameters'],
['op_fin_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'final post-propagation (resize) parameters'],
#[ 0]: Auto-Resize (1) or not (0) Before propagation
#[ 1]: Auto-Resize (1) or not (0) After propagation
#[ 2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
#[ 3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation
#[ 4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
#[ 5]: Horizontal Range modification factor at Resizing (1. means no modification)
#[ 6]: Horizontal Resolution modification factor at Resizing
#[ 7]: Vertical Range modification factor at Resizing
#[ 8]: Vertical Resolution modification factor at Resizing
#[ 9]: Type of wavefront Shift before Resizing (not yet implemented)
#[10]: New Horizontal wavefront Center position after Shift (not yet implemented)
#[11]: New Vertical wavefront Center position after Shift (not yet implemented)
#[12]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Horizontal Coordinate
#[13]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Vertical Coordinate
#[14]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Longitudinal Coordinate
#[15]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Horizontal Coordinate
#[16]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Vertical Coordinate
]
varParam = srwl_uti_ext_options(varParam) #Adding other default options
#*********************************Entry
if __name__ == "__main__":
#---Parse options, defining Beamline elements and running calculations
v = srwl_uti_parse_options(varParam)
#---Add some constant "parameters" (not allowed to be varied) for the beamline
v.und_per = 0.021 #['und_per', 'f', 0.021, 'undulator period [m]'],
v.und_len = 1.5 #['und_len', 'f', 1.5, 'undulator length [m]'],
v.und_zc = 1.305 #['und_zc', 'f', 1.305, 'undulator center longitudinal position [m]'],
v.und_sy = -1 #['und_sy', 'i', -1, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
v.und_sx = 1 #['und_sx', 'i', 1, 'undulator vertical magnetic field symmetry vs longitudinal position'],
#---Setup optics only if Wavefront Propagation is required:
v.ws = True
op = set_optics(v) if(v.ws or v.wm) else None
#---Run all requested calculations
SRWLBeamline('AMX/FMX beamline').calc_all(v, op)
|
radiasoft/sirepo
|
tests/template/srw_import_data/amx.py
|
Python
|
apache-2.0
| 34,767
|
[
"CRYSTAL"
] |
d06308a3a5a491cb72a94e5c57bf910544b7cbc38a26de5c7107958593b58cf3
|
# Test BUG #0015025: Toggling annotation text visibility in Catalyst/batch doesn't work.
from paraview.simple import *
from paraview import smtesting
smtesting.ProcessCommandLineArguments()
v = CreateRenderView()
v.OrientationAxesVisibility = 0
HeadingText = Text(Text="Hello World")
HeadingRep = Show()
HeadingRep.WindowLocation = 'UpperCenter'
HeadingRep.FontSize = 18
HeadingRep.TextScaleMode = 'Viewport'
Show()
Render()
# raw_input("Visible: %d: " % HeadingRep.Visibility)
Hide()
Render()
#raw_input("Visible: %d: " % HeadingRep.Visibility)
Show()
Render()
#raw_input("Visible: %d: " % HeadingRep.Visibility)
Hide()
Render()
#raw_input("Visible: %d: " % HeadingRep.Visibility)
Show()
Render()
#raw_input("Visible: %d: " % HeadingRep.Visibility)
if not smtesting.DoRegressionTesting(v.SMProxy):
# This will lead to VTK object leaks.
import sys
sys.exit(1)
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/ParaViewCore/ServerManager/Default/Testing/Python/AnnotationVisibility.py
|
Python
|
gpl-3.0
| 875
|
[
"ParaView",
"VTK"
] |
003cf9c46a85670dad775f3b13964018df2be8b6eb38f45b648d92ff3a51ac86
|
# -*- coding: utf-8 -*-
import logging
from cStringIO import StringIO
from math import exp
from lxml import etree
from path import path # NOTE (THK): Only used for detecting presence of syllabus
import requests
from datetime import datetime
import dateutil.parser
from lazy import lazy
from xmodule.modulestore import Location
from xmodule.seq_module import SequenceDescriptor, SequenceModule
from xmodule.graders import grader_from_conf
import json
from xblock.fields import Scope, List, String, Dict, Boolean
from .fields import Date
from xmodule.modulestore.locator import CourseLocator
from django.utils.timezone import UTC
log = logging.getLogger(__name__)
class StringOrDate(Date):
def from_json(self, value):
"""
Parse an optional metadata key containing a time or a string:
if present, assume it's a string if it doesn't parse.
"""
try:
result = super(StringOrDate, self).from_json(value)
except ValueError:
return value
if result is None:
return value
else:
return result
def to_json(self, value):
"""
Convert a time struct or string to a string.
"""
try:
result = super(StringOrDate, self).to_json(value)
except:
return value
if result is None:
return value
else:
return result
edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False,
remove_comments=True, remove_blank_text=True)
_cached_toc = {}
class Textbook(object):
def __init__(self, title, book_url):
self.title = title
self.book_url = book_url
@lazy
def start_page(self):
return int(self.table_of_contents[0].attrib['page'])
@lazy
def end_page(self):
# The last page should be the last element in the table of contents,
# but it may be nested. So recurse all the way down the last element
last_el = self.table_of_contents[-1]
while last_el.getchildren():
last_el = last_el[-1]
return int(last_el.attrib['page'])
@lazy
def table_of_contents(self):
"""
Accesses the textbook's table of contents (default name "toc.xml") at the URL self.book_url
Returns XML tree representation of the table of contents
"""
toc_url = self.book_url + 'toc.xml'
# cdodge: I've added this caching of TOC because in Mongo-backed instances (but not Filesystem stores)
# course modules have a very short lifespan and are constantly being created and torn down.
# Since this module in the __init__() method does a synchronous call to AWS to get the TOC
# this is causing a big performance problem. So let's be a bit smarter about this and cache
# each fetch and store in-mem for 10 minutes.
# NOTE: I have to get this onto sandbox ASAP as we're having runtime failures. I'd like to swing back and
# rewrite to use the traditional Django in-memory cache.
try:
# see if we already fetched this
if toc_url in _cached_toc:
(table_of_contents, timestamp) = _cached_toc[toc_url]
age = datetime.now(UTC) - timestamp
# expire every 10 minutes
if age.seconds < 600:
return table_of_contents
except Exception as err:
pass
# Get the table of contents from S3
log.info("Retrieving textbook table of contents from %s" % toc_url)
try:
r = requests.get(toc_url)
except Exception as err:
msg = 'Error %s: Unable to retrieve textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
# TOC is XML. Parse it
try:
table_of_contents = etree.fromstring(r.text)
except Exception as err:
msg = 'Error %s: Unable to parse XML for textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
return table_of_contents
def __eq__(self, other):
return (self.title == other.title and
self.book_url == other.book_url)
def __ne__(self, other):
return not self == other
class TextbookList(List):
def from_json(self, values):
textbooks = []
for title, book_url in values:
try:
textbooks.append(Textbook(title, book_url))
except:
# If we can't get to S3 (e.g. on a train with no internet), don't break
# the rest of the courseware.
log.exception("Couldn't load textbook ({0}, {1})".format(title, book_url))
continue
return textbooks
def to_json(self, values):
json_data = []
for val in values:
if isinstance(val, Textbook):
json_data.append((val.title, val.book_url))
elif isinstance(val, tuple):
json_data.append(val)
else:
continue
return json_data
class CourseFields(object):
lti_passports = List(help="LTI tools passports as id:client_key:client_secret", scope=Scope.settings)
textbooks = TextbookList(help="List of pairs of (title, url) for textbooks used in this course",
default=[], scope=Scope.content)
wiki_slug = String(help="Slug that points to the wiki for this course", scope=Scope.content)
enrollment_start = Date(help="Date that enrollment for this class is opened", scope=Scope.settings)
enrollment_end = Date(help="Date that enrollment for this class is closed", scope=Scope.settings)
start = Date(help="Start time when this module is visible",
default=datetime(2030, 1, 1, tzinfo=UTC()),
scope=Scope.settings)
end = Date(help="Date that this class ends", scope=Scope.settings)
advertised_start = String(help="Date that this course is advertised to start", scope=Scope.settings)
tags = String(help="", default = "", scope=Scope.settings)
grading_policy = Dict(help="Grading policy definition for this class",
default={"GRADER": [
{
"type": "Homework",
"min_count": 12,
"drop_count": 2,
"short_label": "HW",
"weight": 0.15
},
{
"type": "Lab",
"min_count": 12,
"drop_count": 2,
"weight": 0.15
},
{
"type": "Midterm Exam",
"short_label": "Midterm",
"min_count": 1,
"drop_count": 0,
"weight": 0.3
},
{
"type": "Final Exam",
"short_label": "Final",
"min_count": 1,
"drop_count": 0,
"weight": 0.4
}
],
"GRADE_CUTOFFS": {
"Pass": 0.5
}},
scope=Scope.content)
available_for_demo = Boolean(help="Is this course available for demousers", default=False, scope=Scope.settings)
show_in_lms = Boolean(help="Whether to show this course in LMS", default=True, scope=Scope.settings)
has_dynamic_graph = Boolean(help="Has this course dynamic graph or not", default=False, scope=Scope.settings)
enrollment_is_closed_msg = String(help="Message will be shown, when enrollment for a course is closed", default="Enrollment is closed", scope=Scope.settings)
locked_subsections = Boolean(help="Whether available to lock subsections this course", default=True, scope=Scope.settings)
show_calculator = Boolean(help="Whether to show the calculator in this course", default=False, scope=Scope.settings)
display_name = String(help="Display name for this module", default="Empty", display_name="Display Name", scope=Scope.settings)
subject = String(help="Subject for this course", default="Empty", scope=Scope.settings)
show_chat = Boolean(help="Whether to show the chat widget in this course", default=False, scope=Scope.settings)
tabs = List(help="List of tabs to enable in this course", scope=Scope.settings)
end_of_course_survey_url = String(help="Url for the end-of-course survey", scope=Scope.settings)
discussion_blackouts = List(help="List of pairs of start/end dates for discussion blackouts", scope=Scope.settings)
discussion_topics = Dict(help="Map of topics names to ids", scope=Scope.settings)
discussion_sort_alpha = Boolean(scope=Scope.settings, default=False, help="Sort forum categories and subcategories alphabetically.")
announcement = Date(help="Date this course is announced", scope=Scope.settings)
cohort_config = Dict(help="Dictionary defining cohort configuration", scope=Scope.settings)
is_new = Boolean(help="Whether this course should be flagged as new", scope=Scope.settings)
no_grade = Boolean(help="True if this course isn't graded", default=False, scope=Scope.settings)
disable_progress_graph = Boolean(help="True if this course shouldn't display the progress graph", default=False, scope=Scope.settings)
pdf_textbooks = List(help="List of dictionaries containing pdf_textbook configuration", scope=Scope.settings)
html_textbooks = List(help="List of dictionaries containing html_textbook configuration", scope=Scope.settings)
remote_gradebook = Dict(scope=Scope.settings)
allow_anonymous = Boolean(scope=Scope.settings, default=True)
allow_anonymous_to_peers = Boolean(scope=Scope.settings, default=False)
advanced_modules = List(help="Beta modules used in your course", scope=Scope.settings)
has_children = True
checklists = List(scope=Scope.settings,
default=[
{"short_description": "Getting Started With Studio",
"items": [{"short_description": "Add Course Team Members",
"long_description": "Grant your collaborators permission to edit your course so you can work together.",
"is_checked": False,
"action_url": "ManageUsers",
"action_text": "Edit Course Team",
"action_external": False},
{"short_description": "Set Important Dates for Your Course",
"long_description": "Establish your course's student enrollment and launch dates on the Schedule and Details page.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Details & Schedule",
"action_external": False},
{"short_description": "Draft Your Course's Grading Policy",
"long_description": "Set up your assignment types and grading policy even if you haven't created all your assignments.",
"is_checked": False,
"action_url": "SettingsGrading",
"action_text": "Edit Grading Settings",
"action_external": False},
{"short_description": "Explore the Other Studio Checklists",
"long_description": "Discover other available course authoring tools, and find help when you need it.",
"is_checked": False,
"action_url": "",
"action_text": "",
"action_external": False}]},
{"short_description": "Draft a Rough Course Outline",
"items": [{"short_description": "Create Your First Section and Subsection",
"long_description": "Use your course outline to build your first Section and Subsection.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Set Section Release Dates",
"long_description": "Specify the release dates for each Section in your course. Sections become visible to students on their release dates.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Designate a Subsection as Graded",
"long_description": "Set a Subsection to be graded as a specific assignment type. Assignments within graded Subsections count toward a student's final grade.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Reordering Course Content",
"long_description": "Use drag and drop to reorder the content in your course.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Renaming Sections",
"long_description": "Rename Sections by clicking the Section name from the Course Outline.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Deleting Course Content",
"long_description": "Delete Sections, Subsections, or Units you don't need anymore. Be careful, as there is no Undo function.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Add an Instructor-Only Section to Your Outline",
"long_description": "Some course authors find using a section for unsorted, in-progress work useful. To do this, create a section and set the release date to the distant future.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False}]},
{"short_description": "Explore edX's Support Tools",
"items": [{"short_description": "Explore the Studio Help Forum",
"long_description": "Access the Studio Help forum from the menu that appears when you click your user name in the top right corner of Studio.",
"is_checked": False,
"action_url": "http://help.edge.edx.org/",
"action_text": "Visit Studio Help",
"action_external": True},
{"short_description": "Enroll in edX 101",
"long_description": "Register for edX 101, edX's primer for course creation.",
"is_checked": False,
"action_url": "https://edge.edx.org/courses/edX/edX101/How_to_Create_an_edX_Course/about",
"action_text": "Register for edX 101",
"action_external": True},
{"short_description": "Download the Studio Documentation",
"long_description": "Download the searchable Studio reference documentation in PDF form.",
"is_checked": False,
"action_url": "http://files.edx.org/Getting_Started_with_Studio.pdf",
"action_text": "Download Documentation",
"action_external": True}]},
{"short_description": "Draft Your Course About Page",
"items": [{"short_description": "Draft a Course Description",
"long_description": "Courses on edX have an About page that includes a course video, description, and more. Draft the text students will read before deciding to enroll in your course.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Schedule & Details",
"action_external": False},
{"short_description": "Add Staff Bios",
"long_description": "Showing prospective students who their instructor will be is helpful. Include staff bios on the course About page.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Schedule & Details",
"action_external": False},
{"short_description": "Add Course FAQs",
"long_description": "Include a short list of frequently asked questions about your course.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Schedule & Details",
"action_external": False},
{"short_description": "Add Course Prerequisites",
"long_description": "Let students know what knowledge and/or skills they should have before they enroll in your course.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Schedule & Details",
"action_external": False}]}
])
info_sidebar_name = String(scope=Scope.settings, default='Course Handouts')
show_timezone = Boolean(
help="True if timezones should be shown on dates in the courseware. Deprecated in favor of due_date_display_format.",
scope=Scope.settings, default=True
)
due_date_display_format = String(
help="Format supported by strftime for displaying due dates. Takes precedence over show_timezone.",
scope=Scope.settings, default=None
)
enrollment_domain = String(help="External login method associated with user accounts allowed to register in course",
scope=Scope.settings)
course_image = String(
help="Filename of the course image",
scope=Scope.settings,
# Ensure that courses imported from XML keep their image
default="images_course_image.jpg"
)
# An extra property is used rather than the wiki_slug/number because
# there are courses that change the number for different runs. This allows
# courses to share the same css_class across runs even if they have
# different numbers.
#
# TODO get rid of this as soon as possible or potentially build in a robust
# way to add in course-specific styling. There needs to be a discussion
# about the right way to do this, but arjun will address this ASAP. Also
# note that the courseware template needs to change when this is removed.
css_class = String(help="DO NOT USE THIS", scope=Scope.settings)
# TODO: This is a quick kludge to allow CS50 (and other courses) to
# specify their own discussion forums as external links by specifying a
# "discussion_link" in their policy JSON file. This should later get
# folded in with Syllabus, Course Info, and additional Custom tabs in a
# more sensible framework later.
discussion_link = String(help="DO NOT USE THIS", scope=Scope.settings)
# TODO: same as above, intended to let internal CS50 hide the progress tab
# until we get grade integration set up.
# Explicit comparison to True because we always want to return a bool.
hide_progress_tab = Boolean(help="DO NOT USE THIS", scope=Scope.settings)
display_organization = String(help="An optional display string for the course organization that will get rendered in the LMS",
scope=Scope.settings)
display_coursenumber = String(help="An optional display string for the course number that will get rendered in the LMS",
scope=Scope.settings)
class CourseDescriptor(CourseFields, SequenceDescriptor):
module_class = SequenceModule
def __init__(self, *args, **kwargs):
"""
Expects the same arguments as XModuleDescriptor.__init__
"""
super(CourseDescriptor, self).__init__(*args, **kwargs)
_ = self.runtime.service(self, "i18n").ugettext
if self.wiki_slug is None:
if isinstance(self.location, Location):
self.wiki_slug = self.location.course
elif isinstance(self.location, CourseLocator):
self.wiki_slug = self.location.package_id or self.display_name
if self.due_date_display_format is None and self.show_timezone is False:
# For existing courses with show_timezone set to False (and no due_date_display_format specified),
# set the due_date_display_format to what would have been shown previously (with no timezone).
# Then remove show_timezone so that if the user clears out the due_date_display_format,
# they get the default date display.
self.due_date_display_format = u"%d.%m.%Y at %H:%M"
delattr(self, 'show_timezone')
# NOTE: relies on the modulestore to call set_grading_policy() right after
# init. (Modulestore is in charge of figuring out where to load the policy from)
# NOTE (THK): This is a last-minute addition for Fall 2012 launch to dynamically
# disable the syllabus content for courses that do not provide a syllabus
if self.system.resources_fs is None:
self.syllabus_present = False
else:
self.syllabus_present = self.system.resources_fs.exists(path('syllabus'))
self._grading_policy = {}
self.set_grading_policy(self.grading_policy)
if self.discussion_topics == {}:
self.discussion_topics = {_('General'): {'id': self.location.html_id()}}
# TODO check that this is still needed here and can't be by defaults.
if not self.tabs:
# When calling the various _tab methods, can omit the 'type':'blah' from the
# first arg, since that's only used for dispatch
tabs = []
tabs.append({'type': 'courseware'})
# Translators: "Course Info" is the name of the course's information and updates page
tabs.append({'type': 'course_info', 'name': _('Course Info')})
if self.syllabus_present:
tabs.append({'type': 'syllabus'})
tabs.append({'type': 'textbooks'})
# # If they have a discussion link specified, use that even if we feature
# # flag discussions off. Disabling that is mostly a server safety feature
# # at this point, and we don't need to worry about external sites.
if self.discussion_link:
tabs.append({'type': 'external_discussion', 'link': self.discussion_link})
else:
# Translators: "Discussion" is the title of the course forum page
tabs.append({'type': 'discussion', 'name': _('Discussion')})
# Translators: "Wiki" is the title of the course's wiki page
tabs.append({'type': 'wiki', 'name': _('Wiki')})
if not self.hide_progress_tab:
# Translators: "Progress" is the title of the student's grade information page
tabs.append({'type': 'progress', 'name': _('Progress')})
self.tabs = tabs
def set_grading_policy(self, course_policy):
"""
The JSON object can have the keys GRADER and GRADE_CUTOFFS. If either is
missing, it reverts to the default.
"""
if course_policy is None:
course_policy = {}
# Load the global settings as a dictionary
grading_policy = self.grading_policy
# BOY DO I HATE THIS grading_policy CODE ACROBATICS YET HERE I ADD MORE (dhm)--this fixes things persisted w/
# defective grading policy values (but not None)
if 'GRADER' not in grading_policy:
grading_policy['GRADER'] = CourseFields.grading_policy.default['GRADER']
if 'GRADE_CUTOFFS' not in grading_policy:
grading_policy['GRADE_CUTOFFS'] = CourseFields.grading_policy.default['GRADE_CUTOFFS']
# Override any global settings with the course settings
grading_policy.update(course_policy)
# Here is where we should parse any configurations, so that we can fail early
# Use setters so that side effecting to .definitions works
self.raw_grader = grading_policy['GRADER'] # used for cms access
self.grade_cutoffs = grading_policy['GRADE_CUTOFFS']
@classmethod
def read_grading_policy(cls, paths, system):
"""Load a grading policy from the specified paths, in order, if it exists."""
# Default to a blank policy dict
policy_str = '{}'
for policy_path in paths:
if not system.resources_fs.exists(policy_path):
continue
log.debug("Loading grading policy from {0}".format(policy_path))
try:
with system.resources_fs.open(policy_path) as grading_policy_file:
policy_str = grading_policy_file.read()
# if we successfully read the file, stop looking at backups
break
except (IOError):
msg = "Unable to load course settings file from '{0}'".format(policy_path)
log.warning(msg)
return policy_str
@classmethod
def from_xml(cls, xml_data, system, id_generator):
instance = super(CourseDescriptor, cls).from_xml(xml_data, system, id_generator)
# bleh, have to parse the XML here to just pull out the url_name attribute
# I don't think it's stored anywhere in the instance.
course_file = StringIO(xml_data.encode('ascii', 'ignore'))
xml_obj = etree.parse(course_file, parser=edx_xml_parser).getroot()
policy_dir = None
url_name = xml_obj.get('url_name', xml_obj.get('slug'))
if url_name:
policy_dir = 'policies/' + url_name
# Try to load grading policy
paths = ['grading_policy.json']
if policy_dir:
paths = [policy_dir + '/grading_policy.json'] + paths
try:
policy = json.loads(cls.read_grading_policy(paths, system))
except ValueError:
system.error_tracker("Unable to decode grading policy as json")
policy = {}
# now set the current instance. set_grading_policy() will apply some inheritance rules
instance.set_grading_policy(policy)
return instance
@classmethod
def definition_from_xml(cls, xml_object, system):
textbooks = []
for textbook in xml_object.findall("textbook"):
textbooks.append((textbook.get('title'), textbook.get('book_url')))
xml_object.remove(textbook)
# Load the wiki tag if it exists
wiki_slug = None
wiki_tag = xml_object.find("wiki")
if wiki_tag is not None:
wiki_slug = wiki_tag.attrib.get("slug", default=None)
xml_object.remove(wiki_tag)
definition, children = super(CourseDescriptor, cls).definition_from_xml(xml_object, system)
definition['textbooks'] = textbooks
definition['wiki_slug'] = wiki_slug
return definition, children
def definition_to_xml(self, resource_fs):
xml_object = super(CourseDescriptor, self).definition_to_xml(resource_fs)
if len(self.textbooks) > 0:
textbook_xml_object = etree.Element('textbook')
for textbook in self.textbooks:
textbook_xml_object.set('title', textbook.title)
textbook_xml_object.set('book_url', textbook.book_url)
xml_object.append(textbook_xml_object)
return xml_object
def has_ended(self):
"""
Returns True if the current time is after the specified course end date.
Returns False if there is no end date specified.
"""
if self.end is None:
return False
return datetime.now(UTC()) > self.end
def has_started(self):
return datetime.now(UTC()) > self.start
@property
def grader(self):
return grader_from_conf(self.raw_grader)
@property
def raw_grader(self):
# force the caching of the xblock value so that it can detect the change
# pylint: disable=pointless-statement
self.grading_policy['GRADER']
return self._grading_policy['RAW_GRADER']
@raw_grader.setter
def raw_grader(self, value):
# NOTE WELL: this change will not update the processed graders. If we need that, this needs to call grader_from_conf
self._grading_policy['RAW_GRADER'] = value
self.grading_policy['GRADER'] = value
@property
def grade_cutoffs(self):
return self._grading_policy['GRADE_CUTOFFS']
@grade_cutoffs.setter
def grade_cutoffs(self, value):
self._grading_policy['GRADE_CUTOFFS'] = value
# XBlock fields don't update after mutation
policy = self.grading_policy
policy['GRADE_CUTOFFS'] = value
self.grading_policy = policy
@property
def lowest_passing_grade(self):
return min(self._grading_policy['GRADE_CUTOFFS'].values())
@property
def is_cohorted(self):
"""
Return whether the course is cohorted.
"""
config = self.cohort_config
if config is None:
return False
return bool(config.get("cohorted"))
@property
def auto_cohort(self):
"""
Return whether the course is auto-cohorted.
"""
if not self.is_cohorted:
return False
return bool(self.cohort_config.get(
"auto_cohort", False))
@property
def auto_cohort_groups(self):
"""
Return the list of groups to put students into. Returns [] if not
specified. Returns specified list even if is_cohorted and/or auto_cohort are
false.
"""
if self.cohort_config is None:
return []
else:
return self.cohort_config.get("auto_cohort_groups", [])
@property
def top_level_discussion_topic_ids(self):
"""
Return list of topic ids defined in course policy.
"""
topics = self.discussion_topics
return [d["id"] for d in topics.values()]
@property
def cohorted_discussions(self):
"""
Return the set of discussions that is explicitly cohorted. It may be
the empty set. Note that all inline discussions are automatically
cohorted based on the course's is_cohorted setting.
"""
config = self.cohort_config
if config is None:
return set()
return set(config.get("cohorted_discussions", []))
@property
def is_newish(self):
"""
Returns if the course has been flagged as new. If
there is no flag, return a heuristic value considering the
announcement and the start dates.
"""
flag = self.is_new
if flag is None:
# Use a heuristic if the course has not been flagged
announcement, start, now = self._sorting_dates()
if announcement and (now - announcement).days < 30:
# The course has been announced for less that month
return True
elif (now - start).days < 1:
# The course has not started yet
return True
else:
return False
elif isinstance(flag, basestring):
return flag.lower() in ['true', 'yes', 'y']
else:
return bool(flag)
@property
def sorting_score(self):
"""
Returns a tuple that can be used to sort the courses according
the how "new" they are. The "newness" score is computed using a
heuristic that takes into account the announcement and
(advertized) start dates of the course if available.
The lower the number the "newer" the course.
"""
# Make courses that have an announcement date shave a lower
# score than courses than don't, older courses should have a
# higher score.
announcement, start, now = self._sorting_dates()
scale = 300.0 # about a year
if announcement:
days = (now - announcement).days
score = -exp(-days / scale)
else:
days = (now - start).days
score = exp(days / scale)
return score
def _sorting_dates(self):
# utility function to get datetime objects for dates used to
# compute the is_new flag and the sorting_score
announcement = self.announcement
if announcement is not None:
announcement = announcement
try:
start = dateutil.parser.parse(self.advertised_start)
if start.tzinfo is None:
start = start.replace(tzinfo=UTC())
except (ValueError, AttributeError):
start = self.start
now = datetime.now(UTC())
return announcement, start, now
@lazy
def grading_context(self):
"""
This returns a dictionary with keys necessary for quickly grading
a student. They are used by grades.grade()
The grading context has two keys:
graded_sections - This contains the sections that are graded, as
well as all possible children modules that can affect the
grading. This allows some sections to be skipped if the student
hasn't seen any part of it.
The format is a dictionary keyed by section-type. The values are
arrays of dictionaries containing
"section_descriptor" : The section descriptor
"xmoduledescriptors" : An array of xmoduledescriptors that
could possibly be in the section, for any student
all_descriptors - This contains a list of all xmodules that can
effect grading a student. This is used to efficiently fetch
all the xmodule state for a FieldDataCache without walking
the descriptor tree again.
"""
all_descriptors = []
graded_sections = {}
def yield_descriptor_descendents(module_descriptor):
for child in module_descriptor.get_children():
yield child
for module_descriptor in yield_descriptor_descendents(child):
yield module_descriptor
for c in self.get_children():
for s in c.get_children():
if s.graded:
xmoduledescriptors = list(yield_descriptor_descendents(s))
xmoduledescriptors.append(s)
# The xmoduledescriptors included here are only the ones that have scores.
section_description = {
'section_descriptor': s,
'xmoduledescriptors': filter(lambda child: child.has_score, xmoduledescriptors)
}
section_format = s.format if s.format is not None else ''
graded_sections[section_format] = graded_sections.get(section_format, []) + [section_description]
all_descriptors.extend(xmoduledescriptors)
all_descriptors.append(s)
return {'graded_sections': graded_sections,
'all_descriptors': all_descriptors, }
@staticmethod
def make_id(org, course, url_name):
return '/'.join([org, course, url_name])
@staticmethod
def id_to_location(course_id):
'''Convert the given course_id (org/course/name) to a location object.
Throws ValueError if course_id is of the wrong format.
'''
org, course, name = course_id.split('/')
return Location('i4x', org, course, 'course', name)
@staticmethod
def location_to_id(location):
'''Convert a location of a course to a course_id. If location category
is not "course", raise a ValueError.
location: something that can be passed to Location
'''
loc = Location(location)
if loc.category != "course":
raise ValueError("{0} is not a course location".format(loc))
return "/".join([loc.org, loc.course, loc.name])
@property
def id(self):
"""Return the course_id for this course"""
return self.location_to_id(self.location)
@property
def start_date_text(self):
def try_parse_iso_8601(text):
try:
result = Date().from_json(text)
if result is None:
result = text.title()
else:
result = result.strftime("%d.%m.%Y")
except ValueError:
result = text.title()
return result
if isinstance(self.advertised_start, basestring):
return try_parse_iso_8601(self.advertised_start)
elif self.advertised_start is None and self.start is None:
# TODO this is an impossible state since the init function forces start to have a value
return 'TBD'
else:
return (self.advertised_start or self.start).strftime("%d.%m.%Y")
@property
def end_date_text(self):
"""
Returns the end date for the course formatted as a string.
If the course does not have an end date set (course.end is None), an empty string will be returned.
"""
return '' if self.end is None else self.end.strftime("%d.%m.%Y")
@property
def forum_posts_allowed(self):
date_proxy = Date()
try:
blackout_periods = [(date_proxy.from_json(start),
date_proxy.from_json(end))
for start, end
in self.discussion_blackouts]
now = datetime.now(UTC())
for start, end in blackout_periods:
if start <= now <= end:
return False
except:
log.exception("Error parsing discussion_blackouts for course {0}".format(self.id))
return True
@property
def number(self):
return self.location.course
@property
def display_number_with_default(self):
"""
Return a display course number if it has been specified, otherwise return the 'course' that is in the location
"""
if self.display_coursenumber:
return self.display_coursenumber
return self.number
@property
def org(self):
return self.location.org
@property
def display_org_with_default(self):
"""
Return a display organization if it has been specified, otherwise return the 'org' that is in the location
"""
if self.display_organization:
return self.display_organization
return self.org
|
pelikanchik/edx-platform
|
common/lib/xmodule/xmodule/course_module.py
|
Python
|
agpl-3.0
| 42,126
|
[
"VisIt"
] |
7b91c9406a47ce0d36ebb83dcbfea8c2e7d17ebda0071b6c02284d86a8b7bd84
|
""" Utilities to evaluate pairwise distances or affinity of sets of samples.
This module contains both distance metrics and kernels. A brief summary is
given on the two here.
Distance metrics are a function d(a, b) such that d(a, b) < d(a, c) if objects
a and b are considered "more similar" to objects a and c. Two objects exactly
alike would have a distance of zero.
One of the most popular examples is Euclidean distance.
To be a 'true' metric, it must obey the following four conditions:
1. d(a, b) >= 0, for all a and b
2. d(a, b) == 0, if and only if a = b, positive definiteness
3. d(a, b) == d(b, a), symmetry
4. d(a, c) <= d(a, b) + d(b, c), the triangle inequality
Kernels are measures of similarity, i.e. s(a, b) > s(a, c) if objects a and b
are considered "more similar" to objects a and c. A kernel must also be
positive semi-definite.
There are a number of ways to convert between a distance metric and a similarity
measure, such as a kernel. Let D be the distance, and S be the kernel:
1. S = np.exp(-D * gamma), where one heuristic for choosing
gamma is 1 / num_features
2. S = 1. / (D / np.max(D))
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# License: BSD Style.
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix, issparse
from ..utils import safe_asanyarray, atleast2d_or_csr, deprecated
from ..utils.extmath import safe_sparse_dot
# Utility Functions
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checkes that they are at least two dimensional. Finally, the function
checks that the size of the second dimension of the two arrays is equal.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples_a, n_features]
Y: {array-like, sparse matrix}, shape = [n_samples_b, n_features]
Returns
-------
safe_X: {array-like, sparse matrix}, shape = [n_samples_a, n_features]
An array equal to X, guarenteed to be a numpy array.
safe_Y: {array-like, sparse matrix}, shape = [n_samples_b, n_features]
An array equal to Y if Y was not None, guarenteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
if Y is X or Y is None:
X = Y = safe_asanyarray(X)
else:
X = safe_asanyarray(X)
Y = safe_asanyarray(Y)
X = atleast2d_or_csr(X)
Y = atleast2d_or_csr(Y)
if len(X.shape) < 2:
raise ValueError("X is required to be at least two dimensional.")
if len(Y.shape) < 2:
raise ValueError("Y is required to be at least two dimensional.")
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices")
return X, Y
# Distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two main advantages. First, it is computationally
efficient when dealing with sparse data. Second, if x varies but y
remains unchanged, then the right-most dot-product `dot(y, y)` can be
pre-computed.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples_1, n_features]
Y: {array-like, sparse matrix}, shape = [n_samples_2, n_features]
Y_norm_squared: array-like, shape = [n_samples_2], optional
Pre-computed dot-products of vectors in Y (e.g., `(Y**2).sum(axis=1)`)
squared: boolean, optional
Return squared Euclidean distances.
Returns
-------
distances: {array, sparse matrix}, shape = [n_samples_1, n_samples_2]
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if issparse(X):
XX = X.multiply(X).sum(axis=1)
else:
XX = np.sum(X * X, axis=1)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is None:
if issparse(Y):
# scipy.sparse matrices don't have element-wise scalar
# exponentiation, and tocsr has a copy kwarg only on CSR matrices.
YY = Y.copy() if isinstance(Y, csr_matrix) else Y.tocsr()
YY.data **= 2
YY = np.asarray(YY.sum(axis=1)).T
else:
YY = np.sum(Y ** 2, axis=1)[np.newaxis, :]
else:
YY = atleast2d_or_csr(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
# TODO: a faster Cython implementation would do the clipping of negative
# values in a single pass over the output matrix.
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
distances = np.maximum(distances, 0)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances)
@deprecated("use euclidean_distances instead")
def euclidian_distances(*args, **kwargs):
return euclidean_distances(*args, **kwargs)
def manhattan_distances(X, Y=None, sum_over_features=True):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X: array_like
An array with shape (n_samples_X, n_features).
Y: array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features: bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Returns
-------
D: array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise l1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)
array([[0]])
>>> manhattan_distances(3, 2)
array([[1]])
>>> manhattan_distances(2, 3)
array([[1]])
>>> manhattan_distances([[1, 2], [3, 4]], [[1, 2], [0, 3]])
array([[0, 2],
[4, 4]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)
array([[ 1., 1.],
[ 1., 1.]])
"""
X, Y = check_pairwise_arrays(X, Y)
n_samples_X, n_features_X = X.shape
n_samples_Y, n_features_Y = Y.shape
if n_features_X != n_features_Y:
raise Exception("X and Y should have the same number of features!")
D = np.abs(X[:, np.newaxis, :] - Y[np.newaxis, :, :])
if sum_over_features:
D = np.sum(D, axis=2)
else:
D = D.reshape((n_samples_X * n_samples_Y, n_features_X))
return D
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X: array of shape (n_samples_1, n_features)
Y: array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=0, coef0=1):
"""
Compute the polynomial kernel between X and Y.
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X: array of shape (n_samples_1, n_features)
Y: array of shape (n_samples_2, n_features)
degree: int
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma == 0:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=0, coef0=1):
"""
Compute the sigmoid kernel between X and Y.
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X: array of shape (n_samples_1, n_features)
Y: array of shape (n_samples_2, n_features)
degree: int
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma == 0:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=0):
"""
Compute the rbf (gaussian) kernel between X and Y.
K(X, Y) = exp(-gamma ||X-Y||^2)
Parameters
----------
X: array of shape (n_samples_1, n_features)
Y: array of shape (n_samples_2, n_features)
gamma: float
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma == 0:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
# Helper functions - distance
pairwise_distance_functions = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'cityblock': manhattan_distances
}
def distance_metrics():
""" Valid metrics for pairwise_distances
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=========== ====================================
metric Function
=========== ====================================
'cityblock' sklearn.pairwise.manhattan_distances
'euclidean' sklearn.pairwise.euclidean_distances
'l1' sklearn.pairwise.manhattan_distances
'l2' sklearn.pairwise.euclidean_distances
'manhattan' sklearn.pairwise.manhattan_distances
=========== ====================================
"""
return pairwise_distance_functions
def pairwise_distances(X, Y=None, metric="euclidean", **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatability with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Please note that support for sparse matrices is currently limited to those
metrics listed in pairwise.pairwise_distance_functions.
Valid values for metric are:
- from scikits.learn: ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeucludean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
Note in the case of 'euclidean' and 'cityblock' (which are valid
scipy.spatial.distance metrics), the values will use the scikits.learn
implementation, which is faster and has support for sparse matrices.
For a verbose description of the metrics from scikits.learn, see the
__doc__ of the sklearn.pairwise.distance_metrics function.
Parameters
----------
X: array [n_samples_a, n_samples_a] if metric == "precomputed", or,
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y: array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.pairwise_distance_functions.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
**kwds: optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D: array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if metric == "precomputed":
if X.shape[0] != X.shape[1]:
raise ValueError("X is not square!")
return X
elif metric in pairwise_distance_functions:
return pairwise_distance_functions[metric](X, Y, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate distance for each element in X and Y.
D = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# Kernel assumed to be symmetric.
D[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
D[j][i] = D[i][j]
return D
else:
# Note: the distance module doesn't support sparse matrices!
if type(X) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
if Y is None:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
else:
if type(Y) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
return distance.cdist(X, Y, metric=metric, **kwds)
# Helper functions - distance
pairwise_kernel_functions = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'linear': linear_kernel
}
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ==================================
metric Function
============ ==================================
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
============ ==================================
"""
return pairwise_kernel_functions
def pairwise_kernels(X, Y=None, metric="linear", **kwds):
""" Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatability with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are:
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear']
Parameters
----------
X: array [n_samples_a, n_samples_a] if metric == "precomputed", or,
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y: array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric: string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.pairwise_kernel_functions.
If metric is "precomputed", X is assumed to be a kernel matrix and
must be square.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
**kwds: optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K: array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
"""
if metric == "precomputed":
if X.shape[0] != X.shape[1]:
raise ValueError("X is not square!")
return X
elif metric in pairwise_kernel_functions:
return pairwise_kernel_functions[metric](X, Y, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate kernel for each element in X and Y.
K = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# Kernel assumed to be symmetric.
K[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
K[j][i] = K[i][j]
return K
else:
raise AttributeError("Unknown metric %s" % metric)
|
ominux/scikit-learn
|
sklearn/metrics/pairwise.py
|
Python
|
bsd-3-clause
| 20,713
|
[
"Gaussian"
] |
b14fb26d857f5ae3f25422aac9ce0cc3ad1d7474aa762a9ea03f7f0da61bb069
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
# with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
# long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='osio-api-tests', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.2.dev1', # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='A pytest based test suite that tests APIs provided by OpenShift.io or OSIO', # Required # noqa
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description='A test suite that tests APIs provided by OpenShift.io or OSIO using pytest, requests and jmespath', # Optional # noqa
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/fabric8io/fabric8-test/tree/master/EE_API_automation/pytest', # Optional # noqa
# This should be your name or the name of the organization which owns the
# project.
author='Ruchir Garg', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='ruchirg@gmail.com', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7'
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='pytest tests for REST API for openshift.io', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
# package_dir={'':'src'}, # tell distutils packages are under src
packages=find_packages(), # Required
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['pytest',
'requests2',
'jmespath'], # Optional
# Python version requirement
python_requires='>=2.7, <3',
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
# extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
# package_data={ # Optional
# # If any package contains *.json files, include them:
# '': ['*.json'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[('scripts', ['run_me.sh'])], # Optional
include_package_data=True,
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
scripts=['run_me.sh'],
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
ldimaggi/fabric8-test
|
EE_API_automation/pytest/setup.py
|
Python
|
apache-2.0
| 6,875
|
[
"VisIt"
] |
b72599d22a75318a322c3672761273adaacf00d17cb056cf12cd895d57348197
|
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
import math
from collections import defaultdict
import itertools
from itertools import combinations
from itertools import product
from typing import Dict, Any
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from scipy.special import comb
import pytest
import joblib
from numpy.testing import assert_allclose
from sklearn.dummy import DummyRegressor
from sklearn.metrics import mean_poisson_deviance
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import _convert_container
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._testing import skip_if_no_parallel
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.metrics import mean_squared_error
from sklearn.tree._classes import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# Larger classification sample used for testing feature importances
X_large, y_large = datasets.make_classification(
n_samples=500,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0,
)
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# Make regression dataset
X_reg, y_reg = datasets.make_regression(n_samples=500, n_features=10, random_state=1)
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
# Get the default backend in joblib to test parallelism and interaction with
# different backends
DEFAULT_JOBLIB_BACKEND = joblib.parallel.get_active_backend()[0].__class__
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS: Dict[str, Any] = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
FOREST_CLASSIFIERS_REGRESSORS: Dict[str, Any] = FOREST_CLASSIFIERS.copy()
FOREST_CLASSIFIERS_REGRESSORS.update(FOREST_REGRESSORS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf)
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf)
# also test apply
leaf_indices = clf.apply(X)
assert leaf_indices.shape == (len(X), clf.n_estimators)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_classification_toy(name):
check_classification_toy(name)
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with criterion %s and score = %f" % (criterion, score)
clf = ForestClassifier(
n_estimators=10, criterion=criterion, max_features=2, random_state=1
)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.5, "Failed with criterion %s and score = %f" % (criterion, score)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
@pytest.mark.parametrize("criterion", ("gini", "entropy"))
def test_iris(name, criterion):
check_iris_criterion(name, criterion)
def check_regression_criterion(name, criterion):
# Check consistency on regression dataset.
ForestRegressor = FOREST_REGRESSORS[name]
reg = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
reg.fit(X_reg, y_reg)
score = reg.score(X_reg, y_reg)
assert (
score > 0.93
), "Failed with max_features=None, criterion %s and score = %f" % (
criterion,
score,
)
reg = ForestRegressor(
n_estimators=5, criterion=criterion, max_features=6, random_state=1
)
reg.fit(X_reg, y_reg)
score = reg.score(X_reg, y_reg)
assert score > 0.92, "Failed with max_features=6, criterion %s and score = %f" % (
criterion,
score,
)
@pytest.mark.parametrize("name", FOREST_REGRESSORS)
@pytest.mark.parametrize(
"criterion", ("squared_error", "absolute_error", "friedman_mse")
)
def test_regression(name, criterion):
check_regression_criterion(name, criterion)
def test_poisson_vs_mse():
"""Test that random forest with poisson criterion performs better than
mse for a poisson target.
There is a similar test for DecisionTreeRegressor.
"""
rng = np.random.RandomState(42)
n_train, n_test, n_features = 500, 500, 10
X = datasets.make_low_rank_matrix(
n_samples=n_train + n_test, n_features=n_features, random_state=rng
)
# We create a log-linear Poisson model and downscale coef as it will get
# exponentiated.
coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
y = rng.poisson(lam=np.exp(X @ coef))
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_test, random_state=rng
)
# We prevent some overfitting by setting min_samples_split=10.
forest_poi = RandomForestRegressor(
criterion="poisson", min_samples_leaf=10, max_features="sqrt", random_state=rng
)
forest_mse = RandomForestRegressor(
criterion="squared_error",
min_samples_leaf=10,
max_features="sqrt",
random_state=rng,
)
forest_poi.fit(X_train, y_train)
forest_mse.fit(X_train, y_train)
dummy = DummyRegressor(strategy="mean").fit(X_train, y_train)
for X, y, data_name in [(X_train, y_train, "train"), (X_test, y_test, "test")]:
metric_poi = mean_poisson_deviance(y, forest_poi.predict(X))
# squared_error forest might produce non-positive predictions => clip
# If y = 0 for those, the poisson deviance gets too good.
# If we drew more samples, we would eventually get y > 0 and the
# poisson deviance would explode, i.e. be undefined. Therefore, we do
# not clip to a tiny value like 1e-15, but to 1e-6. This acts like a
# small penalty to the non-positive predictions.
metric_mse = mean_poisson_deviance(
y, np.clip(forest_mse.predict(X), 1e-6, None)
)
metric_dummy = mean_poisson_deviance(y, dummy.predict(X))
# As squared_error might correctly predict 0 in train set, its train
# score can be better than Poisson. This is no longer the case for the
# test set. But keep the above comment for clipping in mind.
if data_name == "test":
assert metric_poi < metric_mse
assert metric_poi < 0.8 * metric_dummy
@pytest.mark.parametrize("criterion", ("poisson", "squared_error"))
def test_balance_property_random_forest(criterion):
""" "Test that sum(y_pred)==sum(y_true) on the training set."""
rng = np.random.RandomState(42)
n_train, n_test, n_features = 500, 500, 10
X = datasets.make_low_rank_matrix(
n_samples=n_train + n_test, n_features=n_features, random_state=rng
)
coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
y = rng.poisson(lam=np.exp(X @ coef))
reg = RandomForestRegressor(
criterion=criterion, n_estimators=10, bootstrap=False, random_state=rng
)
reg.fit(X, y)
assert np.sum(reg.predict(X)) == pytest.approx(np.sum(y))
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert not hasattr(r, "classes_")
assert not hasattr(r, "n_classes_")
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert not hasattr(r, "classes_")
assert not hasattr(r, "n_classes_")
@pytest.mark.parametrize("name", FOREST_REGRESSORS)
def test_regressor_attributes(name):
check_regressor_attributes(name)
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(
n_estimators=10, random_state=1, max_features=1, max_depth=1
)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(
np.sum(clf.predict_proba(iris.data), axis=1), np.ones(iris.data.shape[0])
)
assert_array_almost_equal(
clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data))
)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_probability(name):
check_probability(name)
def check_importances(name, criterion, dtype, tolerance):
# cast as dype
X = X_large.astype(dtype, copy=False)
y = y_large.astype(dtype, copy=False)
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=10, criterion=criterion, random_state=0)
est.fit(X, y)
importances = est.feature_importances_
# The forest estimator can detect that only the first 3 features of the
# dataset are informative:
n_important = np.sum(importances > 0.1)
assert importances.shape[0] == 10
assert n_important == 3
assert np.all(importances[:3] > 0.1)
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=10, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert np.all(importances >= 0.0)
for scale in [0.5, 100]:
est = ForestEstimator(n_estimators=10, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert np.abs(importances - importances_bis).mean() < tolerance
@pytest.mark.parametrize("dtype", (np.float64, np.float32))
@pytest.mark.parametrize(
"name, criterion",
itertools.chain(
product(FOREST_CLASSIFIERS, ["gini", "entropy"]),
product(FOREST_REGRESSORS, ["squared_error", "friedman_mse", "absolute_error"]),
),
)
def test_importances(dtype, name, criterion):
tolerance = 0.01
if name in FOREST_REGRESSORS and criterion == "absolute_error":
tolerance = 0.05
check_importances(name, criterion, dtype, tolerance)
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.0
for count in np.bincount(samples):
p = 1.0 * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.0
for k in range(n_features):
# Weight of each B of size k
coef = 1.0 / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (
coef
* (1.0 * n_samples_b / n_samples) # P(B=b)
* (
entropy(y_)
- sum(
[
entropy(c) * len(c) / n_samples_b
for c in children
]
)
)
)
return imp
data = np.array(
[
[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0],
]
)
X, y = np.array(data[:, :7], dtype=bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(
n_estimators=500, max_features=1, criterion="entropy", random_state=0
).fit(X, y)
importances = (
sum(
tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_
)
/ clf.n_estimators
)
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert np.abs(true_importances - importances).mean() < 0.01
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_unfitted_feature_importances(name):
err_msg = (
"This {} instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator.".format(name)
)
with pytest.raises(NotFittedError, match=err_msg):
getattr(FOREST_ESTIMATORS[name](), "feature_importances_")
@pytest.mark.parametrize("ForestClassifier", FOREST_CLASSIFIERS.values())
@pytest.mark.parametrize("X_type", ["array", "sparse_csr", "sparse_csc"])
@pytest.mark.parametrize(
"X, y, lower_bound_accuracy",
[
(
*datasets.make_classification(n_samples=300, n_classes=2, random_state=0),
0.9,
),
(
*datasets.make_classification(
n_samples=1000, n_classes=3, n_informative=6, random_state=0
),
0.65,
),
(
iris.data,
iris.target * 2 + 1,
0.65,
),
(
*datasets.make_multilabel_classification(n_samples=300, random_state=0),
0.18,
),
],
)
def test_forest_classifier_oob(ForestClassifier, X, y, X_type, lower_bound_accuracy):
"""Check that OOB score is close to score on a test set."""
X = _convert_container(X, constructor_name=X_type)
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.5,
random_state=0,
)
classifier = ForestClassifier(
n_estimators=40,
bootstrap=True,
oob_score=True,
random_state=0,
)
assert not hasattr(classifier, "oob_score_")
assert not hasattr(classifier, "oob_decision_function_")
classifier.fit(X_train, y_train)
test_score = classifier.score(X_test, y_test)
assert abs(test_score - classifier.oob_score_) <= 0.1
assert classifier.oob_score_ >= lower_bound_accuracy
assert hasattr(classifier, "oob_score_")
assert not hasattr(classifier, "oob_prediction_")
assert hasattr(classifier, "oob_decision_function_")
if y.ndim == 1:
expected_shape = (X_train.shape[0], len(set(y)))
else:
expected_shape = (X_train.shape[0], len(set(y[:, 0])), y.shape[1])
assert classifier.oob_decision_function_.shape == expected_shape
@pytest.mark.parametrize("ForestRegressor", FOREST_REGRESSORS.values())
@pytest.mark.parametrize("X_type", ["array", "sparse_csr", "sparse_csc"])
@pytest.mark.parametrize(
"X, y, lower_bound_r2",
[
(
*datasets.make_regression(
n_samples=500, n_features=10, n_targets=1, random_state=0
),
0.7,
),
(
*datasets.make_regression(
n_samples=500, n_features=10, n_targets=2, random_state=0
),
0.55,
),
],
)
def test_forest_regressor_oob(ForestRegressor, X, y, X_type, lower_bound_r2):
"""Check that forest-based regressor provide an OOB score close to the
score on a test set."""
X = _convert_container(X, constructor_name=X_type)
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.5,
random_state=0,
)
regressor = ForestRegressor(
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=0,
)
assert not hasattr(regressor, "oob_score_")
assert not hasattr(regressor, "oob_prediction_")
regressor.fit(X_train, y_train)
test_score = regressor.score(X_test, y_test)
assert abs(test_score - regressor.oob_score_) <= 0.1
assert regressor.oob_score_ >= lower_bound_r2
assert hasattr(regressor, "oob_score_")
assert hasattr(regressor, "oob_prediction_")
assert not hasattr(regressor, "oob_decision_function_")
if y.ndim == 1:
expected_shape = (X_train.shape[0],)
else:
expected_shape = (X_train.shape[0], y.ndim)
assert regressor.oob_prediction_.shape == expected_shape
@pytest.mark.parametrize("ForestEstimator", FOREST_CLASSIFIERS_REGRESSORS.values())
def test_forest_oob_warning(ForestEstimator):
"""Check that a warning is raised when not enough estimator and the OOB
estimates will be inaccurate."""
estimator = ForestEstimator(
n_estimators=1,
oob_score=True,
bootstrap=True,
random_state=0,
)
with pytest.warns(UserWarning, match="Some inputs do not have OOB scores"):
estimator.fit(iris.data, iris.target)
@pytest.mark.parametrize("ForestEstimator", FOREST_CLASSIFIERS_REGRESSORS.values())
@pytest.mark.parametrize(
"X, y, params, err_msg",
[
(
iris.data,
iris.target,
{"oob_score": True, "bootstrap": False},
"Out of bag estimation only available if bootstrap=True",
),
(
iris.data,
rng.randint(low=0, high=5, size=(iris.data.shape[0], 2)),
{"oob_score": True, "bootstrap": True},
"The type of target cannot be used to compute OOB estimates",
),
],
)
def test_forest_oob_error(ForestEstimator, X, y, params, err_msg):
estimator = ForestEstimator(**params)
with pytest.raises(ValueError, match=err_msg):
estimator.fit(X, y)
@pytest.mark.parametrize("oob_score", [True, False])
def test_random_trees_embedding_raise_error_oob(oob_score):
with pytest.raises(TypeError, match="got an unexpected keyword argument"):
RandomTreesEmbedding(oob_score=oob_score)
with pytest.raises(NotImplementedError, match="OOB score not supported"):
RandomTreesEmbedding()._set_oob_score_and_attributes(X, y)
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {"n_estimators": (1, 2), "max_depth": (1, 2)})
clf.fit(iris.data, iris.target)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_gridsearch(name):
# Check that base trees can be grid-searched.
check_gridsearch(name)
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert len(forest) == 10
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
def test_parallel(name):
if name in FOREST_CLASSIFIERS:
X = iris.data
y = iris.target
elif name in FOREST_REGRESSORS:
X = X_reg
y = y_reg
check_parallel(name, X, y)
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert type(obj2) == obj.__class__
score2 = obj2.score(X, y)
assert score == score2
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
def test_pickle(name):
if name in FOREST_CLASSIFIERS:
X = iris.data
y = iris.target
elif name in FOREST_REGRESSORS:
X = X_reg
y = y_reg
check_pickle(name, X[::2], y[::2])
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [
[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2],
]
y_train = [
[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3],
]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert len(proba) == 2
assert proba[0].shape == (4, 2)
assert proba[1].shape == (4, 4)
log_proba = est.predict_log_proba(X_test)
assert len(log_proba) == 2
assert log_proba[0].shape == (4, 2)
assert log_proba[1].shape == (4, 4)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
def test_multioutput(name):
check_multioutput(name)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_multioutput_string(name):
# Check estimators on multi-output problems with string outputs.
X_train = [
[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2],
]
y_train = [
["red", "blue"],
["red", "blue"],
["red", "blue"],
["green", "green"],
["green", "green"],
["green", "green"],
["red", "purple"],
["red", "purple"],
["red", "purple"],
["green", "yellow"],
["green", "yellow"],
["green", "yellow"],
]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [
["red", "blue"],
["green", "green"],
["red", "purple"],
["green", "yellow"],
]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_equal(y_pred, y_test)
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert len(proba) == 2
assert proba[0].shape == (4, 2)
assert proba[1].shape == (4, 4)
log_proba = est.predict_log_proba(X_test)
assert len(log_proba) == 2
assert log_proba[0].shape == (4, 2)
assert log_proba[1].shape == (4, 4)
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert clf.n_classes_ == 2
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_classes_shape(name):
check_classes_shape(name)
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert type(X_transformed) == np.ndarray
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(
n_estimators=10, sparse_output=False, random_state=0
)
hasher_sparse = RandomTreesEmbedding(
n_estimators=10, sparse_output=True, random_state=0
)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(), X_transformed.toarray())
# one leaf active per data point per forest
assert X_transformed.shape[0] == X.shape[0]
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert linear_clf.score(X_reduced, y) == 1.0
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs, random_state=12345).fit(
X_train, y_train
)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
reg = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in reg.estimators_:
tree = "".join(
("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature, tree.tree_.threshold)
)
uniques[tree] += 1
uniques = sorted([(1.0 * count / n_trees, tree) for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert len(uniques) == 5
assert 0.20 > uniques[0][0] # Rough approximation of 1/6.
assert 0.20 > uniques[1][0]
assert 0.20 > uniques[2][0]
assert 0.20 > uniques[3][0]
assert uniques[4][0] > 0.3
assert uniques[4][1] == "0,1/0,0/--0,2/--"
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
reg = ExtraTreesRegressor(max_features=1, random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in reg.estimators_:
tree = "".join(
("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature, tree.tree_.threshold)
)
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert len(uniques) == 8
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(
max_depth=1, max_leaf_nodes=4, n_estimators=1, random_state=0
).fit(X, y)
assert est.estimators_[0].get_depth() == 1
est = ForestEstimator(max_depth=1, n_estimators=1, random_state=0).fit(X, y)
assert est.estimators_[0].get_depth() == 1
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_max_leaf_nodes_max_depth(name):
check_max_leaf_nodes_max_depth(name)
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
with pytest.raises(ValueError):
ForestEstimator(min_samples_split=-1).fit(X, y)
with pytest.raises(ValueError):
ForestEstimator(min_samples_split=0).fit(X, y)
with pytest.raises(ValueError):
ForestEstimator(min_samples_split=1.1).fit(X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert np.min(node_samples) > len(X) * 0.5 - 1, "Failed with {0}".format(name)
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert np.min(node_samples) > len(X) * 0.5 - 1, "Failed with {0}".format(name)
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_min_samples_split(name):
check_min_samples_split(name)
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
with pytest.raises(ValueError):
ForestEstimator(min_samples_leaf=-1).fit(X, y)
with pytest.raises(ValueError):
ForestEstimator(min_samples_leaf=0).fit(X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert np.min(leaf_count) > 4, "Failed with {0}".format(name)
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert np.min(leaf_count) > len(X) * 0.25 - 1, "Failed with {0}".format(name)
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_min_samples_leaf(name):
check_min_samples_leaf(name)
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(
min_weight_fraction_leaf=frac, n_estimators=1, random_state=0
)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert (
np.min(leaf_weights) >= total_weight * est.min_weight_fraction_leaf
), "Failed with {0} min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf
)
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_min_weight_fraction_leaf(name):
check_min_weight_fraction_leaf(name)
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(
sparse.feature_importances_, dense.feature_importances_
)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X), dense.predict_proba(X))
assert_array_almost_equal(
sparse.predict_log_proba(X), dense.predict_log_proba(X)
)
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(
sparse.transform(X).toarray(), dense.transform(X).toarray()
)
assert_array_almost_equal(
sparse.fit_transform(X).toarray(), dense.fit_transform(X).toarray()
)
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
@pytest.mark.parametrize("sparse_matrix", (csr_matrix, csc_matrix, coo_matrix))
def test_sparse_input(name, sparse_matrix):
X, y = datasets.make_multilabel_classification(random_state=0, n_samples=50)
check_sparse_input(name, X, sparse_matrix(X), y)
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_almost_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_almost_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_almost_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_almost_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_almost_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_almost_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_almost_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_almost_equal(est.fit(X, y).predict(X), y)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
@pytest.mark.parametrize("dtype", (np.float64, np.float32))
def test_memory_layout(name, dtype):
check_memory_layout(name, dtype)
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
with pytest.raises(ValueError):
ForestEstimator(n_estimators=1, random_state=0).fit(X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
with pytest.raises(ValueError):
est.predict(X)
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_1d_input(name):
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
with ignore_warnings():
check_1d_input(name, X, X_2d, y)
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight="balanced", random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(
class_weight=[
{0: 2.0, 1: 2.0, 2: 1.0},
{0: 2.0, 1: 1.0, 2: 2.0},
{0: 1.0, 1: 2.0, 2: 2.0},
],
random_state=0,
)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight="balanced", random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1.0, 1: 100.0, 2: 1.0}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight**2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_class_weights(name):
check_class_weights(name)
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight="balanced", random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(
class_weight=[{-1: 0.5, 1: 1.0}, {-2: 1.0, 2: 1.0}], random_state=0
)
clf.fit(X, _y)
# smoke test for balanced subsample
clf = ForestClassifier(class_weight="balanced_subsample", random_state=0)
clf.fit(X, _y)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_class_weight_balanced_and_bootstrap_multi_output(name):
check_class_weight_balanced_and_bootstrap_multi_output(name)
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight="the larch", random_state=0)
with pytest.raises(ValueError):
clf.fit(X, y)
with pytest.raises(ValueError):
clf.fit(X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight="balanced", warm_start=True, random_state=0)
clf.fit(X, y)
warn_msg = (
"Warm-start fitting without increasing n_estimators does not fit new trees."
)
with pytest.warns(UserWarning, match=warn_msg):
clf.fit(X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
with pytest.raises(ValueError):
clf.fit(X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.0}], random_state=0)
with pytest.raises(ValueError):
clf.fit(X, _y)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_class_weight_errors(name):
check_class_weight_errors(name)
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
est_ws = None
for n_estimators in [5, 10]:
if est_ws is None:
est_ws = ForestEstimator(
n_estimators=n_estimators, random_state=random_state, warm_start=True
)
else:
est_ws.set_params(n_estimators=n_estimators)
est_ws.fit(X, y)
assert len(est_ws) == n_estimators
est_no_ws = ForestEstimator(
n_estimators=10, random_state=random_state, warm_start=False
)
est_no_ws.fit(X, y)
assert set([tree.random_state for tree in est_ws]) == set(
[tree.random_state for tree in est_no_ws]
)
assert_array_equal(
est_ws.apply(X), est_no_ws.apply(X), err_msg="Failed with {0}".format(name)
)
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_warm_start(name):
check_warm_start(name)
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1)
est.fit(X, y)
est_2 = ForestEstimator(
n_estimators=5, max_depth=1, warm_start=True, random_state=2
)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False, random_state=1)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.apply(X), est.apply(X))
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_warm_start_clear(name):
check_warm_start_clear(name)
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=4)
with pytest.raises(ValueError):
est.fit(X, y)
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_warm_start_smaller_n_estimators(name):
check_warm_start_smaller_n_estimators(name)
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True, random_state=1)
est.fit(X, y)
est_2 = ForestEstimator(
n_estimators=5, max_depth=3, warm_start=True, random_state=1
)
est_2.fit(X, y)
# Now est_2 equals est.
est_2.set_params(random_state=2)
warn_msg = (
"Warm-start fitting without increasing n_estimators does not fit new trees."
)
with pytest.warns(UserWarning, match=warn_msg):
est_2.fit(X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(est.apply(X), est_2.apply(X))
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_warm_start_equal_n_estimators(name):
check_warm_start_equal_n_estimators(name)
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
est = ForestEstimator(
n_estimators=15,
max_depth=3,
warm_start=False,
random_state=1,
bootstrap=True,
oob_score=True,
)
est.fit(X, y)
est_2 = ForestEstimator(
n_estimators=5,
max_depth=3,
warm_start=False,
random_state=1,
bootstrap=True,
oob_score=False,
)
est_2.fit(X, y)
est_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
est_2.fit(X, y)
assert hasattr(est_2, "oob_score_")
assert est.oob_score_ == est_2.oob_score_
# Test that oob_score is computed even if we don't need to train
# additional trees.
est_3 = ForestEstimator(
n_estimators=15,
max_depth=3,
warm_start=True,
random_state=1,
bootstrap=True,
oob_score=False,
)
est_3.fit(X, y)
assert not hasattr(est_3, "oob_score_")
est_3.set_params(oob_score=True)
ignore_warnings(est_3.fit)(X, y)
assert est.oob_score_ == est_3.oob_score_
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
def test_warm_start_oob(name):
check_warm_start_oob(name)
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in "ABCDEFGHIJKLMNOPQRSTU"[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert indicator.shape[1] == n_nodes_ptr[-1]
assert indicator.shape[0] == n_samples
assert_array_equal(
np.diff(n_nodes_ptr), [e.tree_.node_count for e in est.estimators_]
)
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [
indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])
]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
def test_decision_path(name):
check_decision_path(name)
def test_min_impurity_decrease():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [
RandomForestClassifier,
RandomForestRegressor,
ExtraTreesClassifier,
ExtraTreesRegressor,
]
for Estimator in all_estimators:
est = Estimator(min_impurity_decrease=0.1)
est.fit(X, y)
for tree in est.estimators_:
# Simply check if the parameter is passed on correctly. Tree tests
# will suffice for the actual working of this param
assert tree.min_impurity_decrease == 0.1
def test_poisson_y_positive_check():
est = RandomForestRegressor(criterion="poisson")
X = np.zeros((3, 3))
y = [-1, 1, 3]
err_msg = (
r"Some value\(s\) of y are negative which is "
r"not allowed for Poisson regression."
)
with pytest.raises(ValueError, match=err_msg):
est.fit(X, y)
y = [0, 0, 0]
err_msg = (
r"Sum of y is not strictly positive which "
r"is necessary for Poisson regression."
)
with pytest.raises(ValueError, match=err_msg):
est.fit(X, y)
# mypy error: Variable "DEFAULT_JOBLIB_BACKEND" is not valid type
class MyBackend(DEFAULT_JOBLIB_BACKEND): # type: ignore
def __init__(self, *args, **kwargs):
self.count = 0
super().__init__(*args, **kwargs)
def start_call(self):
self.count += 1
return super().start_call()
joblib.register_parallel_backend("testing", MyBackend)
@skip_if_no_parallel
def test_backend_respected():
clf = RandomForestClassifier(n_estimators=10, n_jobs=2)
with joblib.parallel_backend("testing") as (ba, n_jobs):
clf.fit(X, y)
assert ba.count > 0
# predict_proba requires shared memory. Ensure that's honored.
with joblib.parallel_backend("testing") as (ba, _):
clf.predict_proba(X)
assert ba.count == 0
def test_forest_feature_importances_sum():
X, y = make_classification(
n_samples=15, n_informative=3, random_state=1, n_classes=3
)
clf = RandomForestClassifier(
min_samples_leaf=5, random_state=42, n_estimators=200
).fit(X, y)
assert math.isclose(1, clf.feature_importances_.sum(), abs_tol=1e-7)
def test_forest_degenerate_feature_importances():
# build a forest of single node trees. See #13636
X = np.zeros((10, 10))
y = np.ones((10,))
gbr = RandomForestRegressor(n_estimators=10).fit(X, y)
assert_array_equal(gbr.feature_importances_, np.zeros(10, dtype=np.float64))
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
def test_max_samples_bootstrap(name):
# Check invalid `max_samples` values
est = FOREST_CLASSIFIERS_REGRESSORS[name](bootstrap=False, max_samples=0.5)
err_msg = (
r"`max_sample` cannot be set if `bootstrap=False`. "
r"Either switch to `bootstrap=True` or set "
r"`max_sample=None`."
)
with pytest.raises(ValueError, match=err_msg):
est.fit(X, y)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
@pytest.mark.parametrize(
"max_samples, exc_type, exc_msg",
[
(
int(1e9),
ValueError,
"`max_samples` must be in range 1 to 6 but got value 1000000000",
),
(
2.0,
ValueError,
r"`max_samples` must be in range \(0.0, 1.0\] but got value 2.0",
),
(
0.0,
ValueError,
r"`max_samples` must be in range \(0.0, 1.0\] but got value 0.0",
),
(
np.nan,
ValueError,
r"`max_samples` must be in range \(0.0, 1.0\] but got value nan",
),
(
np.inf,
ValueError,
r"`max_samples` must be in range \(0.0, 1.0\] but got value inf",
),
(
"str max_samples?!",
TypeError,
r"`max_samples` should be int or float, but got " r"type '\<class 'str'\>'",
),
(
np.ones(2),
TypeError,
r"`max_samples` should be int or float, but got type "
r"'\<class 'numpy.ndarray'\>'",
),
],
# Avoid long error messages in test names:
# https://github.com/scikit-learn/scikit-learn/issues/21362
ids=lambda x: x[:10].replace("]", "") if isinstance(x, str) else x,
)
def test_max_samples_exceptions(name, max_samples, exc_type, exc_msg):
# Check invalid `max_samples` values
est = FOREST_CLASSIFIERS_REGRESSORS[name](bootstrap=True, max_samples=max_samples)
with pytest.raises(exc_type, match=exc_msg):
est.fit(X, y)
@pytest.mark.parametrize("name", FOREST_REGRESSORS)
def test_max_samples_boundary_regressors(name):
X_train, X_test, y_train, y_test = train_test_split(
X_reg, y_reg, train_size=0.7, test_size=0.3, random_state=0
)
ms_1_model = FOREST_REGRESSORS[name](
bootstrap=True, max_samples=1.0, random_state=0
)
ms_1_predict = ms_1_model.fit(X_train, y_train).predict(X_test)
ms_None_model = FOREST_REGRESSORS[name](
bootstrap=True, max_samples=None, random_state=0
)
ms_None_predict = ms_None_model.fit(X_train, y_train).predict(X_test)
ms_1_ms = mean_squared_error(ms_1_predict, y_test)
ms_None_ms = mean_squared_error(ms_None_predict, y_test)
assert ms_1_ms == pytest.approx(ms_None_ms)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_max_samples_boundary_classifiers(name):
X_train, X_test, y_train, _ = train_test_split(
X_large, y_large, random_state=0, stratify=y_large
)
ms_1_model = FOREST_CLASSIFIERS[name](
bootstrap=True, max_samples=1.0, random_state=0
)
ms_1_proba = ms_1_model.fit(X_train, y_train).predict_proba(X_test)
ms_None_model = FOREST_CLASSIFIERS[name](
bootstrap=True, max_samples=None, random_state=0
)
ms_None_proba = ms_None_model.fit(X_train, y_train).predict_proba(X_test)
np.testing.assert_allclose(ms_1_proba, ms_None_proba)
def test_forest_y_sparse():
X = [[1, 2, 3]]
y = csr_matrix([4, 5, 6])
est = RandomForestClassifier()
msg = "sparse multilabel-indicator for y is not supported."
with pytest.raises(ValueError, match=msg):
est.fit(X, y)
@pytest.mark.parametrize("ForestClass", [RandomForestClassifier, RandomForestRegressor])
def test_little_tree_with_small_max_samples(ForestClass):
rng = np.random.RandomState(1)
X = rng.randn(10000, 2)
y = rng.randn(10000) > 0
# First fit with no restriction on max samples
est1 = ForestClass(
n_estimators=1,
random_state=rng,
max_samples=None,
)
# Second fit with max samples restricted to just 2
est2 = ForestClass(
n_estimators=1,
random_state=rng,
max_samples=2,
)
est1.fit(X, y)
est2.fit(X, y)
tree1 = est1.estimators_[0].tree_
tree2 = est2.estimators_[0].tree_
msg = "Tree without `max_samples` restriction should have more nodes"
assert tree1.node_count > tree2.node_count, msg
# FIXME: remove in 1.2
@pytest.mark.parametrize(
"Estimator",
[
ExtraTreesClassifier,
ExtraTreesRegressor,
RandomForestClassifier,
RandomForestRegressor,
RandomTreesEmbedding,
],
)
def test_n_features_deprecation(Estimator):
# Check that we raise the proper deprecation warning if accessing
# `n_features_`.
X = np.array([[1, 2], [3, 4]])
y = np.array([1, 0])
est = Estimator().fit(X, y)
with pytest.warns(FutureWarning, match="`n_features_` was deprecated"):
est.n_features_
# TODO: Remove in v1.3
@pytest.mark.parametrize(
"Estimator",
[
ExtraTreesClassifier,
ExtraTreesRegressor,
RandomForestClassifier,
RandomForestRegressor,
],
)
def test_max_features_deprecation(Estimator):
"""Check warning raised for max_features="auto" deprecation."""
X = np.array([[1, 2], [3, 4]])
y = np.array([1, 0])
est = Estimator(max_features="auto")
err_msg = (
r"`max_features='auto'` has been deprecated in 1.1 "
r"and will be removed in 1.3. To keep the past behaviour, "
r"explicitly set `max_features=(1.0|'sqrt')` or remove this "
r"parameter as it is also the default value for RandomForest"
r"(Regressors|Classifiers) and ExtraTrees(Regressors|"
r"Classifiers)\."
)
with pytest.warns(FutureWarning, match=err_msg):
est.fit(X, y)
# TODO: Remove in v1.2
@pytest.mark.parametrize(
"old_criterion, new_criterion",
[
("mse", "squared_error"),
("mae", "absolute_error"),
],
)
def test_criterion_deprecated(old_criterion, new_criterion):
est1 = RandomForestRegressor(criterion=old_criterion, random_state=0)
with pytest.warns(
FutureWarning, match=f"Criterion '{old_criterion}' was deprecated"
):
est1.fit(X, y)
est2 = RandomForestRegressor(criterion=new_criterion, random_state=0)
est2.fit(X, y)
assert_allclose(est1.predict(X), est2.predict(X))
@pytest.mark.parametrize("Forest", FOREST_REGRESSORS)
def test_mse_criterion_object_segfault_smoke_test(Forest):
# This is a smoke test to ensure that passing a mutable criterion
# does not cause a segfault when fitting with concurrent threads.
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/12623
from sklearn.tree._criterion import MSE
y = y_reg.reshape(-1, 1)
n_samples, n_outputs = y.shape
mse_criterion = MSE(n_outputs, n_samples)
est = FOREST_REGRESSORS[Forest](n_estimators=2, n_jobs=2, criterion=mse_criterion)
est.fit(X_reg, y)
def test_random_trees_embedding_feature_names_out():
"""Check feature names out for Random Trees Embedding."""
random_state = np.random.RandomState(0)
X = np.abs(random_state.randn(100, 4))
hasher = RandomTreesEmbedding(
n_estimators=2, max_depth=2, sparse_output=False, random_state=0
).fit(X)
names = hasher.get_feature_names_out()
expected_names = [
f"randomtreesembedding_{tree}_{leaf}"
# Note: nodes with indices 0, 1 and 4 are internal split nodes and
# therefore do not appear in the expected output feature names.
for tree, leaf in [
(0, 2),
(0, 3),
(0, 5),
(0, 6),
(1, 2),
(1, 3),
(1, 5),
(1, 6),
]
]
assert_array_equal(expected_names, names)
|
scikit-learn/scikit-learn
|
sklearn/ensemble/tests/test_forest.py
|
Python
|
bsd-3-clause
| 59,844
|
[
"Brian"
] |
72115c71c860356060bc16f378e9c2533c18d7d02546903736a5880e5c896a21
|
# Copyright 2012 IKT Leibniz Universitaet Hannover
# GPL2
# Zdravko Bozakov (zb@ikt.uni-hannover.de)
import os
import sys
import pprint
import logging
import threading
import time
import subprocess
from collections import deque
try:
# import numpy as np #FIXME
from numpy import *
import types # for cython binding
except ImportError:
print __name__ + ": please make sure the following packages are installed:"
print "\tpython-numpy"
exit(1)
import hphelper
from hphelper import WARN, ERROR
import hplotting
try:
import setproctitle
setproctitle.setproctitle('h-probe')
except:
pass # Ignore errors, since this is only cosmetic
try:
# import cython functions if available
import hpfast
except (ImportError, ValueError) as e:
pass
options = hphelper.options
DEBUG = hphelper.DEBUG
if not options.DEBUG:
# avoid warnings whe using log10 with negative values
#np.seterr(invalid='ignore')
pass
class XcovEst(object):
def __init__(self, max_lag):
self.L = max_lag # max covariance lag
self._xc = zeros(self.L, dtype=int)
self.win = zeros(self.L, dtype=bool)
self.probe_count = 0
self.slot_count = 0
#self.av_coeff = self.aggvar_coeff(self.L)
def append(self, x, zero_count = 0):
""" Appends the last received probe to the sliding window win
containing the last L values (lags) and adds the contents of win
to the biased covariance vector xc.
Args:
x: Contains the probe value which must be 0 or 1
zero_count: Specifies the number of empty time slots to
append before the current probe.
"""
self.slot_count += zero_count+1 # keep track of total number or counted slots (should be all)
self.probe_count += x # increment counter for each was received probe (zero or one)
zero_count = min(self.L-1, zero_count)
#if zero_count<0: return # ERROR: negative slot increment - should not happen!
# shift window to the left (concatenate is faster than roll for shifting)
self.win = concatenate( (self.win[zero_count+1:], zeros(zero_count, dtype=bool), [bool(x)]) )
# increment autocovariance (just for non-zero values of win and only if x=1)
if x == 1:
self._xc[nonzero(self.win)] += x
def test(self, data=None):
if data == None:
data = [1,1,0,1,1,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,1,0,1]
for x in data:
self.append(x)
print self.mean
print self.xcov
#print self
@property
def xc(self):
""" return the unscaled autocorrelation """
# flip array so that lag zero is on the left hand side
return self._xc[::-1]
@property
def mean(self):
""" return the mean of the observation vector """
try:
return self.probe_count*1.0/self.slot_count
except ZeroDivisionError:
return nan
def var(self, mean_w=0.0):
"""Returns the variance of the observation process """
if not mean_w:
mean_w = self.mean
# the covariance at lag 0 is the variance of the
# observation process
try:
return self._xc[-1]*1.0/self.slot_count - mean_w**2
except:
return nan
@property
def xcov(self):
"""calculates the autocovariance estimate from the sliding window sums
xc. Returns lags 1 to L."""
N_unbiased = self.slot_count - arange(self.L, dtype=float)
N_unbiased[N_unbiased < 1] = nan
return self.xc*1.0/N_unbiased - self.mean**2
def aggvar_coeff(self, L):
try:
av_coeff = diag(ones(L))
except MemoryError:
ERROR('L is too large for aggregated variance estimator (L=%d)' % L)
raise SystemExit(-1)
for i in xrange(L):
av_coeff[i, :i+1] = arange(i+1, 0, -1)
av_coeff *= 2
av_coeff[:,0] /=2
return av_coeff
@property
def aggvar___(self):
""" """
xc = self.xcov
av = empty(self.L)
for m in xrange(1,self.L):
t = arange(1,m)
av[m] = xc[0]/m + sum((m-t)*xc[t])*2/m**2
av[0] = nan
return av
@property
def aggvar(self):
""" """
try:
av = dot(self.av_coeff,self.xcov)/arange(1,self.L+1)**2
except AttributeError:
print "generating coefficients for aggregated variance estimate..."
self.av_coeff = self.aggvar_coeff(self.L)
print 'done.'
av = dot(self.av_coeff,self.xcov)/arange(1,self.L+1)**2
return av
def __str__(self):
s = '\t'.join(['%.6f' % i for i in self.xcov])
return s
class XcovEstimator(threading.Thread):
"""An online estimator for the covariance of a point process.
The object is fed samples using the append() function. It
maintains a sliding window self.win and adds its values to self.xc
at each time-step.
"""
def __init__(self, buf, slots):
self.stats = hphelper.stats_stats()
self.xc = XcovEst(options.L)
self.L = self.xc.L # max covariance lag
self.buf = buf
self.slots = slots
self.min_win_seq = 1
self.max_win_seq = self.L
self.terminated = False
self.mean_a = options.rate
self.var_a = self.mean_a - self.mean_a**2
# start progress bar thread
hphelper.bar_init(options, self.stats)
threading.Thread.__init__(self)
def conf_int(self, xcov=None):
"""Returns the 95 confidence interval level for sampled iid Gaussian process."""
if not xcov:
xcov = self.xc.xcov[1:]
mean_a = self.mean_a
var_a = self.var_a
mean_w = self.xc.mean
var_w = self.xc.var(mean_w)
mean_y_est = mean_w/mean_a
var_y_est = (var_w - mean_y_est**2*var_a)/(var_a + mean_a**2)
A = var_a*mean_y_est**2 + mean_a*var_y_est
return 2*sqrt((A**2 + 4*mean_a**2*mean_y_est**2*A)/self.xc.slot_count)
#return 2*var_a*sqrt(var_a**2 + 4* mean_a**2)/sqrt(self.slot_count) # ca95
def fit(self, thresh=None, lag_range=(None,None)):
"""Performs a linear fit on the covariance estimate.
Performs a regression on the logarithm of the covariance
estimate returned by xc.xcov. Omits all values larger equal
thresh.
Args:
thresh: A float above which the covariance values are
set to NaN.
lag_range: A an integer tuple specifying the range of lags
to use for the fitting.
"""
if all(lag_range):
min_lag, max_lag = lag_range
else:
min_lag, max_lag = (1, self.L)
xc = self.xc.xcov[1:]
if thresh:
xc[xc <= thresh] = nan
logy = log10(xc[min_lag-1:max_lag-1])
logx = log10(arange(min_lag, max_lag))
try:
(d,y0) = polyfit(logx[~isnan(logy)], logy[~isnan(logy)],1)
return (d, 10**y0)
except Exception as e:
return (-1, -1)
def getdata_str(self):
"""Returns a string of covariance values which can be piped
into gnuplot."""
y = self.xc.xcov[1:]
# y = self.xc.aggvar
if any(y):
return '\n'.join([str(a) for a in y])
else:
return None
def hurst(self, d=None, thresh=0):
"""Returns the Hurst parameter estimate."""
if not d:
(d,y0) = self.fit(thresh=thresh)
return (d+2)/2
def pprint(self):
"""print out the biased covariance"""
print 'xc=[',
for i in reversed(self.xc):
print '%.8f' % (i),
print '];'
def run(self):
stats = self.stats
last_seq = -1 # store maximum sequence number received until now
last_slot = 0
if options.min_rtt == -1.0:
min_rtt = inf
else:
min_rtt = options.min_rtt
while 1:
try: # TODO get rid of try block
(seq, slot, rtt) = self.buf.popleft()
except IndexError:
continue # loop until buffer is not empty
if seq == -2: break
stats.update(seq, rtt, slot)
if seq!=last_seq+1:
# unexpected sequence number
seq_delta = seq-last_seq-1
if seq_delta<0:
# discard probe if it was received out of order
# seq_delta == -1 --> duplicate packet
stats.rx_out_of_order += 1
continue
# all intermediate packets were missing
stats.rcv_err += seq_delta
## each dropped probe indicates a full queue append, a
## 1 to the covariance vector
#while seq!=last_seq+1:
# last_seq += 1
# next_slot = slots[last_seq]
# if next_slot==-1: # slottimes vector might be incomplete
# continue
# slot_delta = next_slot - last_slot
# last_slot = next_slot
# stats.rcv_err += 1 # increment dropped packets counter
# self.append(1, slot_delta)
last_seq = seq
slot_delta = slot - last_slot
last_slot = slot
# check if the probe saw a busy period (True/False)
probe = rtt > min_rtt
self.xc.append(probe, slot_delta-1)
try:
# try to bind cython methods
# http://wiki.cython.org/FAQ#HowdoIimplementasingleclassmethodinaCythonmodule.3F
XcovEstimator.append = types.MethodType(hpfast.xc_append_f, None, XcovEstimator)
XcovEstimator.xcov = types.MethodType(hpfast.xcov2_f, None, XcovEstimator)
min = hpfast.min
max = hpfast.max
DEBUG('cython methods bounded')
except (NameError, AttributeError) as e:
if options.DEBUG:
print e
def xcparser(pipe, ns, slottimes):
if not options.start_time:
options.start_time = time.time()
if not options.savefile:
# default save name is destination + YYMMDD + HHMM
options.savefile = options.DST + time.strftime("_%Y%m%d_%H%M",
time.localtime(options.start_time))
options.savefile += options.tag
options.savefile += '_xc'
timetime = time.time # faster: http://wiki.python.org/moin/PythonSpeed/PerformanceTips
hphelper.set_affinity('parser')
rcv_buf = deque() # TODO rcv_buf = xcfast.fixed_buf(options.pnum)
xc = XcovEstimator(rcv_buf, slottimes)
xc.daemon = True
xc.name='parseloop'
# start xcplotter Thread
xcplotter_thread = threading.Thread(target=xcplotter, args=(xc,))
xcplotter_thread.daemon = True
#block until sender + receiver say they are ready
while not all([ns.RCV_READY,ns.SND_READY]):
time.sleep(0.1)
DEBUG('starting parser: '+__name__)
xc.stats.run_start = timetime()
xcplotter_thread.start()
xc.start()
data = None
try:
while 1: # faster than while True
data = pipe.recv() # get (seq, slot, rtt) from capture process
(seq, slot, rtt) = data
rcv_buf.append((seq, slot, rtt))
except (KeyboardInterrupt):
rcv_buf.append((-2,-2,-2))
print '\n\nparse loop interrupted...'
except (ValueError) as e:
rcv_buf.append((-2,-2,-2))
print '\a', # all packets received
try:
xc.join()
xcplotter_thread.join()
except KeyboardInterrupt:
pass
# display statistics
xc.stats.run_end = timetime()
xc.stats.rx_slots = xc.xc.slot_count
xc.stats.pprint()
(d,y0) = xc.fit()
print
print "\tH=%.2f (slope %.4f y0=%.4f)" % ((d+2)/2, d, y0 )
print
fname = options.savefile + '.dat'
print "saving covariance to " + fname + " ..."
try:
fs = open(fname, mode='w')
fs.write('% ' + options.IPDST + ' ' + str(options))
for j in xc.xc.xcov[1:]:
fs.write("%e\n" % (j))
fs.close()
except KeyboardInterrupt:
print 'canceled saving.'
def xcplotter(xc, gp=None):
"""Initialize gnuplot and periodically update the graph."""
if options.no_plot: return
gp = hplotting.gp_plotter()
if not gp.gp: return
getdata_str = xc.getdata_str
gp_cmd = gp.cmd
xc_conf_int = xc.conf_int
fps = 1.0/options.fps
# use these to plot axis ranges
min_x, max_x = (1,options.L)
min_y, max_y = (1e-6,1e-0)
# set plot options
gp.setup(xlabel='log_{10}(lag) [s]',
ylabel='log_{10}(autocovariance)',
xrange=(min_x, max_x),
yrange=(min_y,max_y),
xtics=[(i*options.delta,i) for i in 10**arange(log10(options.L)+1)],
)
ydata = ''
i=0
while xc.is_alive():
i += 1
if i%10==0: # plot confidence levels every 10 frames
ci_level = xc_conf_int()
#gp.arrow(min_x, ci_level, max_x, ci_level, '3', 8)
gp.level(ci_level, min_x, max_x)
# TODO does not terminate cleanly if X11 display cannot be opened
time.sleep(fps)
ydata = getdata_str()
if ydata:
gp_cmd("plot '-' with points ls 3\n %s\n e" % ydata, flush=True)
# calculate confidence interval
ci_level = xc_conf_int()
# plot confidence interval level
#gp.arrow(min_x, ci_level, max_x, ci_level, '3', 8)
gp.level(ci_level, min_x, max_x)
# perform fitting for values larger than ci_level
(d,y0) = xc.fit() # TODO thresh=ci_level
H = xc.hurst(d)
ydata = getdata_str()
if H:
# plot H linear fit and label it
gp.label('H=%.2f' % H, 2, 1.2*(y0))
gp.arrow(1, y0, xc.L, y0*xc.L**d, '4')
#xh = 10**((log10(ci_level)-log10(y0))/d)
#gp.arrow(1, y0, xh, y0*xh**d, '4')
if ydata:
gp_cmd("plot '-' with points ls 3\n %s\n e" % ydata)
# save plot to EPS
gp.set_term_eps(options.savefile)
# we must replot everything to save it to the file
# plot H linear fit and label it
gp.label('H=%.2f' % H, 2, 1.2*(y0))
#gp.arrow(1, y0, xc.L, y0*xc.L**d, '4')
gp.level(ci_level, min_x, max_x)
if ydata:
gp_cmd("plot '-' with points ls 3\n %s\n e" % ydata)
gp.quit()
|
bozakov/H-probe
|
parser_xcov.py
|
Python
|
gpl-2.0
| 14,995
|
[
"Gaussian"
] |
e4b8ce2560fa7d7b1e6a702ca5da20a0797424b3f18df17df09461307b59d10a
|
# Copyright (C) 2015-2017
# Jakub Krajniak (jkrajniak at gmail.com)
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Some helper classes usefull when parsing the gromacs topology
import espressopp
import math
import os
def convertTable(gro_in_file, esp_out_file, sigma=1.0, epsilon=1.0, c6=1.0, c12=1.0):
"""Convert GROMACS tabulated file into ESPResSo++ tabulated file (new file
is created). First column of input file can be either distance or angle.
For non-bonded files, c6 and c12 can be provided. Default value for sigma, epsilon,
c6 and c12 is 1.0. Electrostatics are not taken into account (f and fd columns).
Args:
gro_in_file: the GROMACS tabulated file name (bonded, nonbonded, angle
or dihedral).
esp_out_file: filename of the ESPResSo++ tabulated file to be written.
sigma: optional, depending on whether you want to convert units or not.
epsilon: optional, depending on whether you want to convert units or not.
c6: optional
c12: optional
"""
# determine file type
bonded, angle, dihedral = False, False, False
if gro_in_file[6] == "b":
bonded = True
if gro_in_file[6] == "a":
angle = True
bonded = True
if gro_in_file[6] == "d":
dihedral = True
bonded = True
fin = open(gro_in_file, 'r')
fout = open(esp_out_file, 'w')
if bonded: # bonded has 3 columns
for line in fin:
if line.startswith('#'):
continue
columns = line.split()
r = float(columns[0])
f = float(columns[1]) # energy
fd = float(columns[2]) # force
# convert units
if angle or dihedral: # degrees to radians
r = math.radians(r)
fd = fd*180/math.pi
else:
r = r / sigma
e = f / epsilon
f = fd*sigma / epsilon
if (not angle and not dihedral and r != 0) or \
(angle and r <= math.pi and r >= 0) or \
(dihedral and r >= -math.pi and r <= math.pi):
fout.write("%15.8g %15.8g %15.8g\n" % (r, e, f))
else: # non-bonded has 7 columns
for line in fin:
if line.startswith('#'): # skip comment lines
continue
# TODO: Skiped columns 1 and 2, electrostatics is not implemented yet.
columns = line.split()
r = float(columns[0])
g = float(columns[3]) # dispersion
gd = float(columns[4])
h = float(columns[5]) # repulsion
hd = float(columns[6])
e = c6*g + c12*h
f = c6*gd + c12*hd
# convert units
r = r / sigma
e = e / epsilon
f = f*sigma / epsilon
if r != 0: # skip 0
fout.write("%15.8g %15.8g %15.8g\n" % (r, e, f))
fin.close()
fout.close()
class FileBuffer():
def __init__(self):
self.linecount = 0
self.lines = []
self.pos = 0
def appendline(self, line):
self.lines.append(line)
self.linecount = len(self.lines)
def readline(self):
try:
line = self.lines[self.pos]
except:
return ''
self.pos += 1
return line
def readlastline(self):
try:
line = self.lines[self.pos-1]
except:
return ''
return line
def seek(self, p):
self.pos = p
def tell(self):
return self.pos
def eof(self):
return self.pos == len(self.lines)
def FillFileBuffer(fname, filebuffer, cwd=None, defines=None):
if cwd is None:
cwd = '.'
if defines is None:
defines = {}
f = open(os.path.join(cwd, fname), 'r')
for line in f:
if line.startswith(';'):
continue
if "include" in line:
name = line.split()[1].strip('\"')
cwd_name = os.path.dirname(name)
if cwd_name != '':
cwd = cwd_name
FillFileBuffer(name, filebuffer, cwd, defines)
elif 'define' in line:
t = line.strip().split()
if len(t) > 2:
defines[t[1]] = ' '.join(t[2:])
else:
l = line.rstrip('\n')
if l:
filebuffer.appendline(l)
f.close()
return
def PostProcessFileBuffer(filebuffer, defines):
"""Replace all defines with the value from the dictionary."""
ret_fb = FileBuffer()
define_keys = set(defines)
for line in filebuffer.lines:
line = line.strip()
if line:
if not (line.startswith(';') or line.startswith('#define')
or line.startswith('#include') or line.startswith('#ifdef')
or line.startswith('#ifndef')):
def_key = set.intersection(set(map(str.strip, line.split())), define_keys)
if def_key:
def_key = def_key.pop()
ret_fb.appendline(
line.replace(def_key, defines[def_key]))
else:
ret_fb.appendline(line)
else:
ret_fb.appendline(line)
return ret_fb
def FindType(proposedtype, typelist):
list=[typeid for (typeid,atype) in typelist.iteritems() if atype==proposedtype ]
if len(list)>1:
print "Error: duplicate type definitons", proposedtype.parameters
exit()
elif len(list)==0:
return None
return list[0]
def convertc6c12(c6, c12):
if c12 == 0.0:
return 1.0, 0.0
sig = pow(c12/c6, 1.0/6.)
if sig > 0.0:
eps = 0.25*c6*pow(sig, -6.0)
else:
eps = 0.0
return sig, eps
class InteractionType:
def __init__(self, parameters):
self.parameters=parameters
def __eq__(self,other):
# interaction types are defined to be equal if all parameters are equal
for k, v in self.parameters.iteritems():
if k not in other.parameters: return False
if other.parameters[k]!=v: return False
return True
def createEspressoInteraction(self, system, fpl):
print("WARNING: could not set up interaction for {}: Espressopp potential not implemented".format(self.parameters))
def automaticExclusion(self):
return False
class HarmonicBondedInteractionType(InteractionType):
def createEspressoInteraction(self, system, fpl):
# interaction specific stuff here
# spring constant kb is half the gromacs spring constant
pot = espressopp.interaction.Harmonic(self.parameters['kb']/2.0, self.parameters['b0'])
print 'setting harmonic bond k=', self.parameters['kb']/2.0, 'b0=', self.parameters['b0']
return espressopp.interaction.FixedPairListHarmonic(system, fpl, pot)
def automaticExclusion(self):
return True
class MorseBondedInteractionType(InteractionType):
def createEspressoInteraction(self, system, fpl, ftpl=None):
# interaction specific stuff here
if ftpl is not None:
raise RuntimeError('Morse potential is not implemented to support Adress')
pot = espressopp.interaction.Morse(self.parameters['D'], self.parameters['beta'], self.parameters['rmin'])
interb = espressopp.interaction.FixedPairListMorse(system, fpl, pot)
return interb
def automaticExclusion(self):
return True
class FENEBondedInteractionType(InteractionType):
def createEspressoInteraction(self, system, fpl, ftpl=None):
# interaction specific stuff here
# spring constant kb is half the gromacs spring constant
if ftpl is not None:
raise RuntimeError('FENE potential is not implemented to support Adress')
pot = espressopp.interaction.Fene(self.parameters['kb']/2.0, self.parameters['b0'])
interb = espressopp.interaction.FixedPairListFene(system, fpl, pot)
return interb
def automaticExclusion(self):
return True
class HarmonicAngleInteractionType(InteractionType):
def createEspressoInteraction(self, system, fpl, ftpl=None):
# interaction specific stuff here
# spring constant kb is half the gromacs spring constant. Also convert deg to rad
K = self.parameters['k'] / 2.0
theta0 = self.parameters['theta']*math.pi/180.0
print 'setting angular harmonic k=', K, 'theta=', theta0
pot = espressopp.interaction.AngularHarmonic(K=K, theta0=theta0)
if ftpl is not None:
return espressopp.interaction.FixedTripleListAdressAngularHarmonic(system, fpl, pot, ftpl)
else:
return espressopp.interaction.FixedTripleListAngularHarmonic(system, fpl, pot)
class TabulatedBondInteractionType(InteractionType):
def createEspressoInteraction(self, system, fpl, ftpl=None):
spline=1
fg = "table_b"+str(self.parameters['tablenr'])+".xvg"
fe = fg.split(".")[0]+".pot" # name of espressopp file
if not os.path.exists(fe):
convertTable(fg, fe)
print('Tabulated bond: {}'.format(fe))
potTab = espressopp.interaction.Tabulated(itype=spline, filename=fe)
if ftpl is not None:
return espressopp.interaction.FixedPairListAdressTabulated(system, fpl, potTab, ftpl)
else:
return espressopp.interaction.FixedPairListTabulated(system, fpl, potTab)
def automaticExclusion(self):
return self.parameters['excl']
class TabulatedAngleInteractionType(InteractionType):
def createEspressoInteraction(self, system, fpl, ftpl=None):
spline=1
fg = "table_a"+str(self.parameters['tablenr'])+".xvg"
fe = fg.split(".")[0]+".pot" # name of espressopp file
if not os.path.exists(fe):
convertTable(fg, fe)
print('Tabulated angular: {}'.format(fe))
potTab = espressopp.interaction.TabulatedAngular(itype=spline, filename=fe)
if ftpl is not None:
return espressopp.interaction.FixedTripleListAdressTabulatedAngular(system, fpl, potTab, ftpl)
else:
return espressopp.interaction.FixedTripleListTabulatedAngular(system, fpl, potTab)
class TabulatedDihedralInteractionType(InteractionType):
def createEspressoInteraction(self, system, fpl, ftpl=None):
spline = 1
fg = "table_d"+str(self.parameters['tablenr'])+".xvg"
fe = fg.split(".")[0]+".pot" # name of espressopp file
if not os.path.exists(fe):
convertTable(fg, fe)
print('Tabulated dihedral: {}'.format(fe))
potTab = espressopp.interaction.TabulatedDihedral(itype=spline, filename=fe)
if ftpl is not None:
return espressopp.interaction.FixedQuadrupleListAdressTabulatedDihedral(system, fpl, potTab, ftpl)
else:
return espressopp.interaction.FixedQuadrupleListTabulatedDihedral(system, fpl, potTab)
class RyckaertBellemansDihedralInteractionType(InteractionType):
def createEspressoInteraction(self, system, fpl, ftpl=None):
print('RyckaertBellemans: {}'.format(self.parameters))
pot = espressopp.interaction.DihedralRB(**self.parameters)
if ftpl is not None:
return espressopp.interaction.FixedQuadrupleListAdressDihedralRB(system, fpl, pot, ftpl)
else:
return espressopp.interaction.FixedQuadrupleListDihedralRB(system, fpl, pot)
class HarmonicNCosDihedralInteractionType(InteractionType):
def createEspressoInteraction(self, system, fpl, ftpl=None):
theta0 = self.parameters['theta0'] * math.pi/180.0
print('HarmonicNCosDihedral, theta0: {}, k:{}, n: {}'.format(
theta0, self.parameters['k'], self.parameters['n']))
pot = espressopp.interaction.DihedralHarmonicNCos(
K=self.parameters['k'], phi0=theta0, multiplicity=self.parameters['n'])
if ftpl is not None:
return espressopp.interaction.FixedQuadrupleListAdressDihedralHarmonicNCos(system, fpl, pot, ftpl)
else:
return espressopp.interaction.FixedQuadrupleListDihedralHarmonicNCos(system, fpl, pot)
def ParseBondTypeParam(line):
tmp = line.split()
btype= tmp[2]
# TODO: handle exclusions automatically
if btype == "8":
p=TabulatedBondInteractionType({"tablenr":int(float(tmp[3])),"k":float(tmp[4]), 'excl':True})
elif btype == "9":
p=TabulatedBondInteractionType({"tablenr":int(float(tmp[3])), "k":float(tmp[4]), 'excl':False})
elif btype == "1":
p=HarmonicBondedInteractionType({"b0":float(tmp[3]), "kb":float(tmp[4])})
elif btype == "2":
# Converts from GROMOS Harmonic to Harmonic
# k_harm = 2*kb*b0**2
kb = 0.5*2*float(tmp[4])*(float(tmp[3])**2)
p=HarmonicBondedInteractionType({"b0":float(tmp[3]), "kb":kb})
elif btype == "3":
p=MorseBondedInteractionType({"b0":float(tmp[3]), "D":float(tmp[4]), "beta":float(tmp[5])})
elif btype == "7":
p=FENEBondedInteractionType({"b0":float(tmp[3]), "kb":float(tmp[4])})
elif btype == "9":
p=TabulatedBondInteractionType({"tablenr":int(float(tmp[3])), "k":float(tmp[4])})
else:
print "Unsupported bond type", tmp[2], "in line:"
print line
exit()
return p
def ParseAngleTypeParam(line):
tmp = line.split()
type= float(tmp[3])
if type == 1:
p=HarmonicAngleInteractionType({"theta":float(tmp[4]), "k":float(tmp[5])})
elif type == 2:
theta = float(tmp[4])*math.pi/180.0
k = 0.5*float(tmp[5])*(math.sin(theta)**2)
p=HarmonicAngleInteractionType({"theta":float(tmp[4]), "k":k})
elif type == 8:
p=TabulatedAngleInteractionType({"tablenr":int(float(tmp[4])),"k":float(tmp[5])})
else:
print "Unsupported angle type", type, "in line:"
print line
exit()
return p
def ParseDihedralTypeParam(line):
#
tmp = line.split(';')[0].split()
type = int(tmp[4])
if type == 8:
p=TabulatedDihedralInteractionType({"tablenr":int(float(tmp[5])), "k":float(tmp[6])})
elif type == 3:
tmp[5:11] = map(float, tmp[5:11])
p = RyckaertBellemansDihedralInteractionType({'K0': tmp[5], 'K1': tmp[6], 'K2': tmp[7], 'K3': tmp[8], 'K4': tmp[9], 'K5': tmp[10]})
elif type == 1:
p = HarmonicNCosDihedralInteractionType({'theta0': float(tmp[5]), 'k': float(tmp[6]), 'n': int(tmp[7])})
else:
print "Unsupported dihedral type", type, "in line:"
print line
return False
return p
# Usefull code for generating the regular exclusions
class Node():
def __init__(self, id):
self.id=id
self.neighbours=[]
def addNeighbour(self, nb):
self.neighbours.append(nb)
def FindNodeById(id, nodes):
list=[n for n in nodes if n.id==id ]
if len(list)>1:
print "Error: duplicate nodes", id
exit()
elif len(list)==0:
return None
return list[0]
def FindNNextNeighbours(startnode, numberNeighbours, neighbours, forbiddenNodes):
if numberNeighbours==0:
return neighbours
#avoid going back the same path
forbiddenNodes.append(startnode)
# Loop over next neighbours and add them to the neighbours list
# Recursively call the function with numberNeighbours-1
for n in startnode.neighbours:
if not n in forbiddenNodes:
if n not in neighbours: neighbours.append(n) # avoid double counting in rings
FindNNextNeighbours(n, numberNeighbours-1, neighbours, forbiddenNodes)
def GenerateRegularExclusions(bonds, nrexcl, exclusions):
nodes=[]
# make a Node object for each atom involved in bonds
for b in bonds:
bids=b[0:2]
for i in bids:
if FindNodeById(i, nodes)==None:
n=Node(i)
nodes.append(n)
# find the next neighbours for each node and append them
for b in bonds:
permutations=[(b[0], b[1]), (b[1], b[0])]
for p in permutations:
n=FindNodeById(p[0], nodes)
nn=FindNodeById(p[1], nodes)
n.addNeighbour(nn)
# for each atom, call the FindNNextNeighbours function, which recursively
# seraches for nrexcl next neighbours
for n in nodes:
neighbours=[]
FindNNextNeighbours(n, nrexcl, neighbours, forbiddenNodes=[])
for nb in neighbours:
# check if the permutation is already in the exclusion list
# this may be slow, but to do it in every MD step is even slower...
# TODO: find a clever algorithm which does avoid permuations from the start
if not (n.id, nb.id) in exclusions:
if not (nb.id, n.id) in exclusions:
exclusions.append((n.id, nb.id))
return exclusions
|
MrTheodor/AdResSLab
|
adresslab/topology_helper.py
|
Python
|
gpl-3.0
| 17,636
|
[
"ESPResSo",
"GROMOS",
"Gromacs"
] |
bc30b9d91d2c57ae58bf75a6e6c0c2935858ed02f2bc697e1929757cfa2380f9
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Guessing unknown Topology information --- :mod:`MDAnalysis.topology.guessers`
=============================================================================
In general `guess_atom_X` returns the guessed value for a single value,
while `guess_Xs` will work on an array of many atoms.
"""
from six.moves import map
import numpy as np
import warnings
from ..lib import distances
from . import tables
def guess_masses(atom_types):
"""Guess the mass of many atoms based upon their type
Parameters
----------
atom_types
Type of each atom
Returns
-------
atom_masses : np.ndarray dtype float64
"""
masses = np.array(list(map(get_atom_mass, atom_types)), dtype=np.float64)
if np.any(masses == 0.0):
# figure out where the misses were and report
misses = np.unique(np.asarray(atom_types)[np.where(masses == 0.0)])
warnings.warn("Failed to guess the mass for the following atom types: {}"
"".format(', '.join(misses)))
return masses
def guess_types(atom_names):
"""Guess the atom type of many atoms based on atom name
Parameters
----------
atom_names
Name of each atom
Returns
-------
atom_types : np.ndarray dtype object
"""
return np.array(list(map(guess_atom_element, atom_names)),
dtype=object)
def guess_atom_type(atomname):
"""Guess atom type from the name.
At the moment, this function simply returns the element, as
guessed by :func:`guess_atom_element`.
.. SeeAlso:: :func:`guess_atom_element` and
:mod:`MDAnalysis.topology.tables`
"""
return guess_atom_element(atomname)
def guess_atom_element(atomname):
"""Guess the element of the atom from the name.
Looks in dict to see if element is found, otherwise it uses the first
character in the atomname. The table comes from CHARMM and AMBER atom
types, where the first character is not sufficient to determine the atom
type. Some GROMOS ions have also been added.
.. Warning: The translation table is incomplete. This will probably result
in some mistakes, but it still better than nothing!
.. SeeAlso:: :func:`guess_atom_type` and
:mod:`MDAnalysis.topology.tables` (where the data are stored)
"""
if atomname == '':
return ''
try:
return tables.atomelements[atomname]
except KeyError:
if atomname[0].isdigit():
# catch 1HH etc
try:
return atomname[1]
except IndexError:
pass
return atomname[0]
def guess_bonds(atoms, coords, **kwargs):
r"""Guess if bonds exist between two atoms based on their distance.
Bond between two atoms is created, if the two atoms are within
.. math::
d < f \cdot (R_1 + R_2)
of each other, where :math:`R_1` and :math:`R_2` are the VdW radii
of the atoms and :math:`f` is an ad-hoc *fudge_factor*. This is
the `same algorithm that VMD uses`_.
Parameters
----------
atoms : AtomGroup
atoms for which bonds should be guessed
coords : array
coordinates of the atoms (i.e., `AtomGroup.positions)`)
fudge_factor : float, optional
The factor by which atoms must overlap eachother to be considered a
bond. Larger values will increase the number of bonds found. [0.72]
vdwradii : dict, optional
To supply custom vdwradii for atoms in the algorithm. Must be a dict
of format {type:radii}. The default table of van der Waals radii is
hard-coded as :data:`MDAnalysis.topology.tables.vdwradii`. Any user
defined vdwradii passed as an argument will supercede the table
values. [``None``]
lower_bound : float, optional
The minimum bond length. All bonds found shorter than this length will
be ignored. This is useful for parsing PDB with altloc records where
atoms with altloc A and B maybe very close together and there should be
no chemical bond between them. [0.1]
box : dimensions, optional
Bonds are found using a distance search, if unit cell information is
given, periodic boundary conditions will be considered in the distance
search. [``None``]
Returns
-------
list
List of tuples suitable for use in Universe topology building.
Warnings
--------
No check is done after the bonds are guessed to see if Lewis
structure is correct. This is wrong and will burn somebody.
Raises
------
:exc:`ValueError` if inputs are malformed or `vdwradii` data is missing.
.. _`same algorithm that VMD uses`:
http://www.ks.uiuc.edu/Research/vmd/vmd-1.9.1/ug/node26.html
.. versionadded:: 0.7.7
.. versionchanged:: 0.9.0
Updated method internally to use more :mod:`numpy`, should work
faster. Should also use less memory, previously scaled as
:math:`O(n^2)`. *vdwradii* argument now augments table list
rather than replacing entirely.
"""
# why not just use atom.positions?
if len(atoms) != len(coords):
raise ValueError("'atoms' and 'coord' must be the same length")
fudge_factor = kwargs.get('fudge_factor', 0.72)
vdwradii = tables.vdwradii.copy() # so I don't permanently change it
user_vdwradii = kwargs.get('vdwradii', None)
if user_vdwradii: # this should make algo use their values over defaults
vdwradii.update(user_vdwradii)
# Try using types, then elements
atomtypes = atoms.types
# check that all types have a defined vdw
if not all(val in vdwradii for val in set(atomtypes)):
raise ValueError(("vdw radii for types: " +
", ".join([t for t in set(atomtypes) if
not t in vdwradii]) +
". These can be defined manually using the" +
" keyword 'vdwradii'"))
lower_bound = kwargs.get('lower_bound', 0.1)
box = kwargs.get('box', None)
# to speed up checking, calculate what the largest possible bond
# atom that would warrant attention.
# then use this to quickly mask distance results later
max_vdw = max([vdwradii[t] for t in atomtypes])
bonds = []
for i, atom in enumerate(atoms[:-1]):
vdw_i = vdwradii[atomtypes[i]]
max_d = (vdw_i + max_vdw) * fudge_factor
# using self_distance_array scales O(n^2)
# 20,000 atoms = 1.6 Gb memory
dist = distances.distance_array(coords[i][None, :], coords[i + 1:],
box=box)[0]
idx = np.where((dist > lower_bound) & (dist <= max_d))[0]
for a in idx:
j = i + 1 + a
atom_j = atoms[j]
if dist[a] < (vdw_i + vdwradii[atomtypes[j]]) * fudge_factor:
# because of method used, same bond won't be seen twice,
# so don't need to worry about duplicates
bonds.append((atom.index, atom_j.index))
return tuple(bonds)
def guess_angles(bonds):
"""Given a list of Bonds, find all angles that exist between atoms.
Works by assuming that if atoms 1 & 2 are bonded, and 2 & 3 are bonded,
then (1,2,3) must be an angle.
:Returns:
List of tuples defining the angles.
Suitable for use in u._topology
.. seeAlso:: :meth:`guess_bonds`
.. versionadded 0.9.0
"""
angles_found = set()
for b in bonds:
for atom in b:
other_a = b.partner(atom) # who's my friend currently in Bond
for other_b in atom.bonds:
if other_b != b: # if not the same bond I start as
third_a = other_b.partner(atom)
desc = tuple([other_a.index, atom.index, third_a.index])
if desc[0] > desc[-1]: # first index always less than last
desc = desc[::-1]
angles_found.add(desc)
return tuple(angles_found)
def guess_dihedrals(angles):
"""Given a list of Angles, find all dihedrals that exist between atoms.
Works by assuming that if (1,2,3) is an angle, and 3 & 4 are bonded,
then (1,2,3,4) must be a dihedral.
:Returns:
List of tuples defining the dihedrals.
Suitable for use in u._topology
.. versionadded 0.9.0
"""
dihedrals_found = set()
for b in angles:
a_tup = tuple([a.index for a in b]) # angle as tuple of numbers
# if searching with b[0], want tuple of (b[2], b[1], b[0], +new)
# search the first and last atom of each angle
for atom, prefix in zip([b.atoms[0], b.atoms[-1]],
[a_tup[::-1], a_tup]):
for other_b in atom.bonds:
if not other_b.partner(atom) in b:
third_a = other_b.partner(atom)
desc = prefix + (third_a.index,)
if desc[0] > desc[-1]:
desc = desc[::-1]
dihedrals_found.add(desc)
return tuple(dihedrals_found)
def guess_improper_dihedrals(angles):
"""Given a list of Angles, find all improper dihedrals that exist between
atoms.
Works by assuming that if (1,2,3) is an angle, and 2 & 4 are bonded,
then (2, 1, 3, 4) must be an improper dihedral.
ie the improper dihedral is the angle between the planes formed by
(1, 2, 3) and (1, 3, 4)
:Returns:
List of tuples defining the improper dihedrals.
Suitable for use in u._topology
.. versionadded 0.9.0
"""
dihedrals_found = set()
for b in angles:
atom = b[1] # select middle atom in angle
# start of improper tuple
a_tup = tuple([b[a].index for a in [1, 2, 0]])
# if searching with b[1], want tuple of (b[1], b[2], b[0], +new)
# search the first and last atom of each angle
for other_b in atom.bonds:
other_atom = other_b.partner(atom)
# if this atom isn't in the angle I started with
if not other_atom in b:
desc = a_tup + (other_atom.index,)
if desc[0] > desc[-1]:
desc = desc[::-1]
dihedrals_found.add(desc)
return tuple(dihedrals_found)
def get_atom_mass(element):
"""Return the atomic mass in u for *element*.
Masses are looked up in :data:`MDAnalysis.topology.tables.masses`.
.. Warning:: Unknown masses are set to 0.00
"""
try:
return tables.masses[element]
except KeyError:
return 0.000
def guess_atom_mass(atomname):
"""Guess a mass based on the atom name.
:func:`guess_atom_element` is used to determine the kind of atom.
.. warning:: Anything not recognized is simply set to 0; if you rely on the
masses you might want to double check.
"""
return get_atom_mass(guess_atom_element(atomname))
def guess_atom_charge(atomname):
"""Guess atom charge from the name.
.. Warning:: Not implemented; simply returns 0.
"""
# TODO: do something slightly smarter, at least use name/element
return 0.0
|
alejob/mdanalysis
|
package/MDAnalysis/topology/guessers.py
|
Python
|
gpl-2.0
| 12,236
|
[
"Amber",
"CHARMM",
"GROMOS",
"MDAnalysis",
"VMD"
] |
894953d5143d03c035cd1624033190c306c783d3d928fd8145931de3933f636e
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005-2007 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
""" Templates implementation for person editors. """
import gtk
from stoqlib.domain.person import Company, Individual, Person, Supplier
from stoqlib.exceptions import DatabaseInconsistency
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.base.slaves import NoteSlave
from stoqlib.gui.dialogs.addressdialog import AddressAdditionDialog
from stoqlib.gui.dialogs.contactsdialog import ContactInfoListDialog
from stoqlib.gui.editors.baseeditor import BaseEditorSlave, BaseEditor
from stoqlib.gui.search.callsearch import CallsSearch
from stoqlib.gui.search.creditcheckhistorysearch import CreditCheckHistorySearch
from stoqlib.gui.slaves.addressslave import AddressSlave
from stoqlib.gui.templates.companytemplate import CompanyEditorTemplate
from stoqlib.gui.templates.individualtemplate import IndividualEditorTemplate
from stoqlib.gui.utils.databaseform import DatabaseForm
from stoqlib.lib.message import warning
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class _PersonEditorTemplate(BaseEditorSlave):
model_type = Person
gladefile = 'PersonEditorTemplate'
proxy_widgets = ('name',
'phone_number',
'fax_number',
'mobile_number',
'email')
def __init__(self, store, model, visual_mode, ui_form_name, parent):
self._parent = parent
if ui_form_name:
self.db_form = DatabaseForm(ui_form_name)
else:
self.db_form = None
super(_PersonEditorTemplate, self).__init__(store, model,
visual_mode=visual_mode)
#
# BaseEditorSlave hooks
#
def create_model(self, store):
return Person(name=u"", store=store)
def setup_proxies(self):
self._setup_widgets()
self._setup_form_fields()
self.proxy = self.add_proxy(self.model,
_PersonEditorTemplate.proxy_widgets)
def setup_slaves(self):
self.address_slave = AddressSlave(
self.store, self.model, self.model.get_main_address(),
visual_mode=self.visual_mode,
db_form=self.db_form)
self.attach_slave('address_holder', self.address_slave)
self.attach_model_slave('note_holder', NoteSlave, self.model)
def on_confirm(self):
main_address = self.address_slave.model
main_address.person = self.model
#
# Public API
#
def add_extra_tab(self, tab_label, slave, position=None):
"""Adds an extra tab to the editor
:param tab_label: the label that will be display on the tab
:param slave: the slave that will be attached to the new tab
:param position: the position the tab will be attached
"""
event_box = gtk.EventBox()
self.person_notebook.append_page(event_box, gtk.Label(tab_label))
self.attach_slave(tab_label, slave, event_box)
event_box.show()
if position is not None:
self.person_notebook.reorder_child(event_box, position)
self.person_notebook.set_current_page(position)
def attach_role_slave(self, slave):
self.attach_slave('role_holder', slave)
def attach_model_slave(self, name, slave_type, slave_model):
slave = slave_type(self.store, slave_model,
visual_mode=self.visual_mode)
self.attach_slave(name, slave)
return slave
#
# Kiwi handlers
#
def on_name__map(self, entry):
self.name.grab_focus()
def on_address_button__clicked(self, button):
main_address = self.model.get_main_address()
if not main_address.is_valid_model():
msg = _(u"You must define a valid main address before\n"
"adding additional addresses")
warning(msg)
return
result = run_dialog(AddressAdditionDialog, self._parent,
self.store, person=self.model,
reuse_store=not self.visual_mode)
if not result:
return
new_main_address = self.model.get_main_address()
if new_main_address is not main_address:
self.address_slave.set_model(new_main_address)
def on_contact_info_button__clicked(self, button):
run_dialog(ContactInfoListDialog, self._parent, self.store,
person=self.model, reuse_store=not self.visual_mode)
def on_calls_button__clicked(self, button):
run_dialog(CallsSearch, self._parent, self.store,
person=self.model, reuse_store=not self.visual_mode)
def on_credit_check_history_button__clicked(self, button):
run_dialog(CreditCheckHistorySearch, self._parent, self.store,
client=self.model.client, reuse_store=not self.visual_mode)
#
# Private API
#
def _setup_widgets(self):
individual = self.model.individual
company = self.model.company
if not (individual or company):
raise DatabaseInconsistency('A person must have at least a '
'company or an individual set.')
tab_child = self.person_data_tab
if individual and company:
tab_text = _('Individual/Company Data')
self.company_frame.set_label(_('Company Data'))
self.company_frame.show()
self.individual_frame.set_label(_('Individual Data'))
self.individual_frame.show()
elif individual:
tab_text = _('Individual Data')
self.company_frame.hide()
label_widget = self.individual_frame.get_label_widget()
if label_widget is not None:
label_widget.hide()
self.individual_frame.show()
else:
tab_text = _('Company Data')
self.individual_frame.hide()
label_widget = self.company_frame.get_label_widget()
if label_widget is not None:
label_widget.hide()
self.company_frame.show()
self.person_notebook.set_tab_label_text(tab_child, tab_text)
addresses = self.model.get_total_addresses()
if addresses == 2:
self.address_button.set_label(_("1 More Address..."))
elif addresses > 2:
self.address_button.set_label(_("%i More Addresses...")
% (addresses - 1))
if not self.model.client:
self.credit_check_history_button.hide()
def _setup_form_fields(self):
if not self.db_form:
return
self.db_form.update_widget(self.name,
other=self.name_lbl)
self.db_form.update_widget(self.phone_number,
other=self.phone_number_lbl)
self.db_form.update_widget(self.fax_number, u'fax',
other=self.fax_lbl)
self.db_form.update_widget(self.email,
other=self.email_lbl)
self.db_form.update_widget(self.mobile_number,
other=self.mobile_lbl)
class BasePersonRoleEditor(BaseEditor):
"""A base class for person role editors. This class can not be
instantiated directly.
:attribute main_slave:
:attribute individual_slave:
:attribute company_slave:
:cvar help_section: the help button for this wizard,
usually describing how to create a new person
"""
size = (-1, -1)
help_section = None
ui_form_name = None
need_cancel_confirmation = True
def __init__(self, store, model=None, role_type=None, person=None,
visual_mode=False, parent=None, document=None):
""" Creates a new BasePersonRoleEditor object
:param store: a store
:param model:
:param none_type: None, ROLE_INDIVIDUAL or ROLE_COMPANY
:param person:
:param visual_mode:
"""
if not (model or role_type is not None):
raise ValueError('A role_type attribute is required')
self._parent = parent
self.individual_slave = None
self.company_slave = None
self._person_slave = None
self.main_slave = None
self.role_type = role_type
self.person = person
self.document = document
BaseEditor.__init__(self, store, model, visual_mode=visual_mode)
# FIXME: Implement and use IDescribable on the model
self.set_description(self.model.person.name)
#
# BaseEditor hooks
#
def create_model(self, store):
# XXX: Waiting fix for bug 2163. We should not need anymore to
# provide empty values for mandatory attributes
if not self.person:
self.person = Person(name=u"", store=store)
if not self.role_type in [Person.ROLE_INDIVIDUAL,
Person.ROLE_COMPANY]:
raise ValueError("Invalid value for role_type attribute, %r" % (
self.role_type, ))
if (self.role_type == Person.ROLE_INDIVIDUAL and
not self.person.individual):
Individual(person=self.person, store=store, cpf=self.document)
elif (self.role_type == Person.ROLE_COMPANY and
not self.person.company):
Company(person=self.person, store=store, cnpj=self.document)
else:
pass
return self.person
def setup_slaves(self):
individual = self.model.person.individual
company = self.model.person.company
assert individual or company
self._person_slave = _PersonEditorTemplate(self.store,
self.model.person,
visual_mode=self.visual_mode,
ui_form_name=self.ui_form_name,
parent=self._parent or self)
if individual:
slave = IndividualEditorTemplate(self.store,
model=individual,
person_slave=self._person_slave,
visual_mode=self.visual_mode)
self.individual_slave = slave
self.main_slave = slave
if company:
slave = CompanyEditorTemplate(self.store,
model=company,
person_slave=self._person_slave,
visual_mode=self.visual_mode)
self.company_slave = slave
self.main_slave = slave
self.attach_slave('main_holder', slave)
self.main_slave.attach_slave('main_holder', self._person_slave)
def on_confirm(self):
if (isinstance(self.model, Supplier) and
not sysparam.has_object('SUGGESTED_SUPPLIER')):
sysparam.set_object(self.store, 'SUGGESTED_SUPPLIER', self.model)
#
# Public API
#
def get_person_slave(self):
return self._person_slave
def set_phone_number(self, phone_number):
slave = self.get_person_slave()
slave.set_phone_number(phone_number)
|
tiagocardosos/stoq
|
stoqlib/gui/templates/persontemplate.py
|
Python
|
gpl-2.0
| 12,295
|
[
"VisIt"
] |
6dcde2c1764ea1cc07742b1a8d7e11666b371e5a4ae29550c7eb8bc3e501581b
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from kademlia import version
setup(
name="kademlia",
version=version,
description="Kademlia is a distributed hash table for decentralized peer-to-peer computer networks.",
author="Brian Muller",
author_email="bamuller@gmail.com",
license="MIT",
url="http://github.com/bmuller/kademlia",
packages=find_packages(),
requires=["twisted", "rpcudp"],
install_requires=['twisted>=14.0', "rpcudp>=1.0"]
)
|
Tha-Robert/kademlia
|
setup.py
|
Python
|
mit
| 504
|
[
"Brian"
] |
70eb5f503059f23fc9a33793a9d1fa8e7763e1b04da57b5aee78709ac2e36e49
|
# text from https://microbit-micropython.readthedocs.io/en/latest/microbit_micropython_api.html
def sleep(ms):
"""sleep for the given number of milliseconds."""
def running_time():
"""returns the number of milliseconds since the micro:bit was last switched on."""
def panic(error_code):
"""makes the micro:bit enter panic mode (this usually happens when the DAL runs out of memory, and causes a sad face to be drawn on the display). The error # code can be any arbitrary integer value."""
def reset():
"""resets the micro:bit."""
class _Button:
def is_pressed():
"""returns True or False to indicate if the button is pressed at the time of the method call."""
def was_pressed():
"""returns True or False to indicate if the button was pressed since the device started or the last time this method was called."""
def get_presses():
"""returns the running total of button presses, and resets this counter to zero"""
button_a = _Button()
button_b = _Button()
class _Display:
def get_pixel(self, x, y):
"""ARTASASD the brightness of the pixel (x,y). Brightness can be from 0 (the pixel is off) to 9 (the pixel is at maximum brightness)."""
def set_pixel(x, y, val):
"""sets the brightness of the pixel (x,y) to val (between 0 [off] and 9 [max brightness], inclusive)."""
def clear():
"""clears the display."""
def show(image, delay=0, wait=True, loop=False, clear=False):
"""shows the image."""
def show(iterable, delay=400, wait=True, loop=False, clear=False):
"""shows each image or letter in the iterable, with delay ms. in between each."""
def scroll(string, delay=400):
"""scrolls a string across the display (more exciting than display.show for written messages)."""
display = _Display()
class _MicroBitPin:
def write_digital(value):
"""value can be 0, 1, False, True"""
def read_digital():
"""returns either 1 or 0"""
def write_analog(value):
"""value is between 0 and 1023"""
def read_analog():
"""returns an integer between 0 and 1023"""
def set_analog_period(int):
"""sets the period of the PWM output of the pin in milliseconds (see https://en.wikipedia.org/wiki/Pulse-width_modulation)"""
def set_analog_period_microseconds(int):
"""sets the period of the PWM output of the pin in microseconds (see https://en.wikipedia.org/wiki/Pulse-width_modulation)"""
def is_touched():
pass
pin1 = _MicroBitPin()
pin2 = _MicroBitPin()
pin3 = _MicroBitPin()
pin4 = _MicroBitPin()
pin5 = _MicroBitPin()
pin6 = _MicroBitPin()
pin7 = _MicroBitPin()
pin8 = _MicroBitPin()
pin9 = _MicroBitPin()
pin10 = _MicroBitPin()
pin11 = _MicroBitPin()
pin12 = _MicroBitPin()
pin13 = _MicroBitPin()
pin14 = _MicroBitPin()
pin15 = _MicroBitPin()
pin16 = _MicroBitPin()
pin19 = _MicroBitPin()
pin20 = _MicroBitPin()
# to do: Images
# to do: The accelerometer
# to do: The compass
# to do: I2C bus
# to do: UART
|
hickford/microbit-dummy-python-api
|
microbit.py
|
Python
|
mit
| 3,004
|
[
"exciting"
] |
2efb6e25c899c4d11d0b4c32888587befb509e30d1a543b2833fb0a703de3128
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Restricted open-shell Kohn-Sham for periodic systems at a single k-point
'''
import time
import numpy
import pyscf.dft
from pyscf import lib
from pyscf.pbc.scf import rohf
from pyscf.pbc.dft import rks
from pyscf.pbc.dft import uks
@lib.with_doc(uks.get_veff.__doc__)
def get_veff(ks, cell=None, dm=None, dm_last=0, vhf_last=0, hermi=1,
kpt=None, kpts_band=None):
if getattr(dm, 'mo_coeff', None) is not None:
mo_coeff = dm.mo_coeff
mo_occ_a = (dm.mo_occ > 0).astype(numpy.double)
mo_occ_b = (dm.mo_occ ==2).astype(numpy.double)
dm = lib.tag_array(dm, mo_coeff=(mo_coeff,mo_coeff),
mo_occ=(mo_occ_a,mo_occ_b))
return uks.get_veff(ks, cell, dm, dm_last, vhf_last, hermi, kpt, kpts_band)
class ROKS(rks.KohnShamDFT, rohf.ROHF):
'''UKS class adapted for PBCs.
This is a literal duplication of the molecular UKS class with some `mol`
variables replaced by `cell`.
'''
def __init__(self, cell, kpt=numpy.zeros(3), xc='LDA,VWN'):
rohf.ROHF.__init__(self, cell, kpt)
rks.KohnShamDFT.__init__(self, xc)
def dump_flags(self, verbose=None):
rohf.ROHF.dump_flags(self, verbose)
rks.KohnShamDFT.dump_flags(self, verbose)
return self
get_veff = get_veff
energy_elec = pyscf.dft.uks.energy_elec
get_rho = uks.get_rho
density_fit = rks._patch_df_beckegrids(rohf.ROHF.density_fit)
mix_density_fit = rks._patch_df_beckegrids(rohf.ROHF.mix_density_fit)
if __name__ == '__main__':
from pyscf.pbc import gto
cell = gto.Cell()
cell.unit = 'A'
cell.atom = 'C 0., 0., 0.; C 0.8917, 0.8917, 0.8917'
cell.a = '''0. 1.7834 1.7834
1.7834 0. 1.7834
1.7834 1.7834 0. '''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.verbose = 7
cell.output = '/dev/null'
cell.build()
mf = ROKS(cell)
print(mf.kernel())
|
gkc1000/pyscf
|
pyscf/pbc/dft/roks.py
|
Python
|
apache-2.0
| 2,644
|
[
"PySCF"
] |
2c50112bf0fcf270cefd7d70e574c93973613c027465dfb03d2bd6fc7e036b5b
|
#!/usr/bin/env python3
"""
Copyright (C) 2015 Petr Skovoroda <petrskovoroda@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301 USA
"""
from gi.repository import GdkPixbuf, Gtk, Gdk
import glob
import json
import logging
import os
import re
import requests
import urllib.request
from collections import deque
from datetime import datetime
from datetime import timedelta
try:
from lxml import etree
except ImportError as err:
import xml.etree.ElementTree as etree
import silver.config as config
from silver.globals import ICON
from silver.globals import IMG_DIR
from silver.globals import SCHED_FILE
from silver.gui.common import hex_to_rgba
from silver.msktz import MSK
SCHED_URL = "http://silver.ru/programms/"
SILVER_RAIN_URL = "http://silver.ru"
USER_AGENT = "Mozilla/5.0 (X11; Linux x86_64) " + \
"AppleWebKit/537.36 (KHTML, like Gecko) " + \
"Chrome/41.0.2227.0 Safari/537.36"
# Use this list to operate with schedule
SCHED_WEEKDAY_LIST = ["Monday", "Tuesday", "Wednesday", "Thursday",
"Friday", "Saturday", "Sunday"]
MUSIC = "Музыка"
MUSIC_URL = "http://silver.ru/programms/muzyka/"
def str_time(start, end):
""" Return time in HH:MM-HH:MM """
s_h, s_m = divmod(int(start), 3600)
s_m = int(s_m / 60)
e_h, e_m = divmod(int(end), 3600)
e_m = int(e_m / 60)
return "{0:0=2d}:{1:0=2d} - {2:0=2d}:{3:0=2d}".format(s_h, s_m, e_h, e_m)
def parse_time(str):
""" Return time in seconds """
try:
x = datetime.strptime(str, "%H:%M")
except ValueError:
# except 24:00
# Just return the correct value
return 86400.0
d = timedelta(hours=x.hour, minutes=x.minute)
return d.total_seconds()
def parse_weekday(str):
""" Return list from string """
wd_parsed = []
wd_list = str.split(", ")
wd_name_list = {"Вс" : 6, "Пн" : 0, "Вт" : 1, "Ср" : 2,
"Чт" : 3, "Пт" : 4, "Сб" : 5}
for wd in wd_list :
x = wd.split('-')
if len(x) > 1:
wd_parsed += list(range(wd_name_list[x[0]], wd_name_list[x[1]]+1))
elif x == "По будням" :
wd_parsed += list(range(0,5))
elif x == "По выходным" :
wd_parsed += [5, 6]
else :
wd_parsed += [ wd_name_list[x[0].strip()] ]
return wd_parsed
def parse_hosts(hosts):
""" Return formatted string from list """
if len(hosts) > 1 :
str = ", ".join(hosts[:-1])
str += " и " + hosts[-1]
else :
str = ''.join(hosts)
return str
class SilverSchedule():
"""
_sched_week - full schedule
_sched_day - daily agenda
_event - currently playing
Schedule list[weekday(0-6)]:
position int
weekday str
is_parent bool
is_merged bool
time (HH:MM-HH:MM) str
title str
url str
host [str]
icon str
start (seconds) float
end (seconds) float
cover str
record bool
play bool
"""
def __init__(self):
self._sched_week = [ [] for x in range(7) ]
self._sched_day = deque()
self._event = {}
self._SCHEDULE_ERROR = False
def get_event_title(self):
""" Return event title """
if not self._SCHEDULE_ERROR:
return self._event["title"]
else:
return "Silver-Rain"
def get_event_time(self):
""" Return event time hh:mm-hh:mm """
if not self._SCHEDULE_ERROR:
return self._event["time"]
else:
return "00:00-24:00"
def get_event_url(self):
""" Return event url """
if not self._SCHEDULE_ERROR:
return self._event["url"]
else:
return "http://silver.ru/"
def get_event_merged_status(self):
""" Return end time in seconds """
if not self._SCHEDULE_ERROR:
return self._event["is_merged"]
else:
return False
def get_event_end(self):
""" Return end time in seconds """
if not self._SCHEDULE_ERROR:
return self._event["end"]
else:
return 86400.0
def get_event_position(self):
""" Return event position """
if not self._SCHEDULE_ERROR:
return self._event["position"]
else:
return 0
def get_event_weekday(self):
""" Return weekday """
if not self._SCHEDULE_ERROR:
return SCHED_WEEKDAY_LIST.index(self._event["weekday"])
else:
return datetime.now(MSK()).weekday()
def get_event_icon(self):
""" Return pixbuf """
if not self._SCHEDULE_ERROR and self._event["icon"]:
pb = GdkPixbuf.Pixbuf.new_from_file_at_size(self._event["icon"],
width=90, height=90)
else:
icontheme = Gtk.IconTheme.get_default()
pb = icontheme.load_icon(ICON, 256, 0)
pb = pb.scale_simple(80, 80, GdkPixbuf.InterpType.BILINEAR)
return pb
def get_event_host(self):
""" Return host """
if not self._SCHEDULE_ERROR:
str = parse_hosts(self._event["host"])
else:
str = ""
return str
def get_event_cover(self):
file = ""
if not self._SCHEDULE_ERROR and config.background_image:
file = self._event["cover"]
return file
def get_record_status(self):
""" Return True if should be recorded """
if not self._SCHEDULE_ERROR:
return self._event["record"]
else:
return False
def get_play_status(self):
""" Return True if should start playing """
if not self._SCHEDULE_ERROR:
return self._event["play"]
else:
return False
def update_event(self):
""" Update current event """
if not len(self._sched_day):
# It's a new day.
# It's so mundane. What exciting things will happen today?
self._sched_gen_daily_agenda()
self._event = self._sched_day.popleft()
def update_schedule(self, force_refresh=False):
""" Retrieve schedule """
self._SCHEDULE_ERROR = True
if not force_refresh and os.path.exists(SCHED_FILE):
# Read from file
self._sched_load_from_file()
if self._SCHEDULE_ERROR:
# Backup
sched_week_bak = self._sched_week
sched_day_bak = self._sched_day
# Clear
self._sched_week = [ [] for x in range(7) ]
self._sched_day = deque()
# Load from website
if not self._sched_load_from_html():
if sched_week_bak[0]:
# Got backup. Reset error status
self._SCHEDULE_ERROR = False
self._sched_week = sched_week_bak
self._sched_day = sched_day_bak
return False
# Generate schedule for today
self._sched_gen_daily_agenda()
# Update current event
self.update_event()
self._SCHEDULE_ERROR = False
return True
def update_covers(self):
""" Retrieve covers """
covers = {}
for wd in range(7):
for item in self._sched_week[wd]:
if not item["is_main"]:
continue
elif item["title"] in covers:
# If already downloaded
item["cover"] = covers[item["title"]]
continue
item["cover"] = self._get_cover(item["url"])
covers[item["title"]] = item["cover"]
self._sched_write_to_file()
def fill_tree_store(self, store):
""" Fill TreeStore object """
it = None
font = config.font
fg_color = config.font_color
icontheme = Gtk.IconTheme.get_default()
bg_dark = False
ch_dark = False
for wd in range(7):
for item in self._sched_week[wd]:
# Get pixbuf
if item["icon"]:
icon = GdkPixbuf.Pixbuf.new_from_file(item["icon"])
else:
# Load default icon instead
icon = icontheme.load_icon(ICON, 256, 0)
# Scale
sz = 80
if not item["is_main"]:
sz = 60
icon = icon.scale_simple(sz, sz, GdkPixbuf.InterpType.BILINEAR)
# Join hosts
host = parse_hosts(item["host"])
# Insert program
if item["is_main"]:
# Main event
bg_color = hex_to_rgba(config.bg_colors[bg_dark])
bg_color.alpha = config.bg_alpha[bg_dark]
# Insert item
it = store.append(None, [item["weekday"], item["is_main"],
item["time"], item["title"],
item["url"], host, icon,
bg_color, fg_color, font,
bg_dark, item["record"],
item["play"], item["is_merged"]])
# Alternate row color
bg_dark = not bg_dark
ch_dark = bg_dark
else:
# Child event
bg_color = hex_to_rgba(config.bg_colors[ch_dark])
bg_color.alpha = config.bg_alpha[ch_dark]
# Insert item
store.append(it, [item["weekday"], item["is_main"],
item["time"], item["title"], item["url"],
host, icon, bg_color, fg_color, font,
ch_dark, False, False, False])
# Alternate row color
ch_dark = not ch_dark
def set_record_status(self, status, wd, time):
""" Set recorder status """
for item in self._sched_week[wd]:
if not item["is_main"]:
continue
if item["time"] == time:
item["record"] = status
break
else:
logging("Program not found")
return
self._sched_write_to_file()
def set_play_status(self, status, wd, time):
""" Set playback flag """
for item in self._sched_week[wd]:
if not item["is_main"]:
continue
if item["time"] == time:
item["play"] = status
break
else:
logging("Program not found")
return
self._sched_write_to_file()
def _sched_gen_daily_agenda(self):
""" Create a list of main events for today """
today = datetime.now(MSK())
now = timedelta(hours=today.hour, minutes=today.minute,
seconds=today.second).total_seconds()
position = 0
self._sched_day = deque()
for it in reversed(self._sched_week[today.weekday() - 1]):
if not it["is_main"]:
continue
if it["is_merged"]:
if it["end"] > now:
self._sched_day.append(it)
self._sched_day[0]["position"] = position
position += 1
break
for item in self._sched_week[today.weekday()]:
if not item["is_main"]:
continue
else:
item["position"] = position
position += 1
if item["end"] <= now and not item["is_merged"]:
# Already ended. Skip
continue
self._sched_day.append(item)
def _sched_load_from_file(self):
""" Load schedule from file """
with open(SCHED_FILE, "r") as f:
self._sched_week = json.load(f)
# Check integrity
self._SCHEDULE_ERROR = False
program = self._sched_week[0][0]
keys = ["weekday", "is_main", "is_merged", "time", "title", "url",
"host", "icon", "cover", "start", "end", "play", "record"]
for key in keys:
if key not in program.keys():
self._SCHEDULE_ERROR = True
break
def _sched_write_to_file(self):
""" Save schedule on disk """
with open(SCHED_FILE, 'w') as f:
json.dump(self._sched_week, f)
def _sched_load_from_html(self):
""" Load schedule from site """
# Create session with fake user-agent
session = requests.Session()
session.headers["User-Agent"] = USER_AGENT
# Default event icon
music_icon_name = ""
# Weekdays parser
wd_name_list = {"Вс" : [6], "Пн" : [0], "Вт" : [1], "Ср" : [2],
"Чт" : [3], "Пт" : [4], "Сб" : [5],
"По будням" : list(range(0,5)),
"По выходным" : [5, 6]}
try:
# Download schedule
resp = session.get(SCHED_URL)
if resp.status_code != 200:
logging.error("Couldn't reach server. Code:", resp.status_code)
return False
# Get table
r = r'^.*<div\ class="program-list.*?(<tbody>.*?<\/tbody>).*$'
xhtml = re.sub(r, r'\1', resp.text)
# Handle unclosed img tags /* xhtml style */
xhtml = re.sub(r'(<img.*?"\s*)>', r'\1/>', xhtml)
xhtml = re.sub(r'<br>', r'<br/>', xhtml)
xhtml = re.sub(r' ', r'', xhtml)
root = etree.fromstring(xhtml)
except requests.exceptions.RequestException as e:
logging.error(str(e))
return False
except ValueError as e:
logging.error("Unexpected response")
logging.error(str(e))
return False
except etree.XMLSyntaxError as e:
logging.error("Syntax error")
logging.error(str(e))
return False
# Parse xhtml text
for obj in root:
# If time not presented
if not len(obj[3]):
# Event happens randomly or never
continue
# Get title
title = obj[1][0][0].text.strip()
# Event type
is_main = False
is_merged = False
# Get icon
icon_src = obj[0][0][0].attrib['src'].split("?")[0]
icon_name = self._get_icon(icon_src)
# Get program url
url = obj[1][0][0].attrib['href']
url = re.sub(r'^.*(/programms/.*?/).*$', r'\1', url)
url = SILVER_RAIN_URL + url
# Don't parse music. Just save icon location
if title == MUSIC:
music_icon_name = icon_name
continue
# Get hosts
host = []
if len(obj[2]):
# If hosts presented
for it in obj[2][0]:
h = it[0][0].text.strip()
h = h.split(' ')
if len(h) == 2 :
# Show name first
h.insert(0, h.pop())
h = ' '.join(h)
host.append(h)
# Get schedule
# Expecting "WD: HH:MM - HH:MM" format
sched = []
sched_list = []
wd_list_prev = []
for it in obj[3][0]:
sched_list.append(it.text)
i = 0
while i < len(it) - 1:
sched_list.append(it[i].tail)
i += 1
for it in sched_list:
if not it:
continue
# Remove extra comma
if it[-1] == ',' :
it = it[:-1]
try:
# Split wd and time
weekday, time = it.split(': ')
wd_list = parse_weekday(weekday)
wd_list_prev = wd_list
except ValueError:
# No weekday, use previous
wd_list = wd_list_prev
time = it
# Parse time
start, end = time.split('-')
if start.strip() == "24:00":
start = "00:00"
if end.strip() == "00:00":
end = "24:00"
# Fix Mixtape
#XXX: This is ridiculous
# Maybe better try to retrieve schedule from the same source,
# the win version does.
if title == "Mixtape" and wd_list == [4] and \
time.strip() == "04:00 - 05:00":
wd_list = [5,6]
start = "01:00"
end = "05:00"
# Convert into seconds
start = parse_time(start.strip())
end = parse_time(end.strip())
# Calculate length
length = end - start
if length < 0:
length = 86400 + length
is_merged = True
if length >= 3000:
# At least 50 minutes
is_main = True
# Round to hours
#FIXME: That's rude, but I don't have time right now
hrs, mins = divmod(length, 3600)
if mins:
start = start - start % 3600
end = start + (hrs + 1) * 3600
if is_merged:
end -= 86400
# Convert into string
time = str_time(start, end)
# Weekday number,
# HH:MM,
# start in seconds,
# end in seconds
sched.append([ wd_list, time, start, end ])
# Insert
for it in sched:
for weekday in it[0]:
program = {}
program["weekday"] = SCHED_WEEKDAY_LIST[weekday]
program["is_main"] = is_main
program["is_merged"] = is_merged
program["time"] = it[1]
program["title"] = title
program["url"] = url
program["host"] = host
program["icon"] = icon_name
program["cover"] = ""
program["start"] = it[2]
program["end"] = it[3]
program["play"] = False
program["record"] = False
self._sched_week[weekday].append(program)
for wd in range(7):
# Sort schedule by parent/start/end
self._sched_week[wd].sort(key = lambda x : (-x["is_main"], \
x["start"], -x["end"]))
# Fix common errors
i = 1
while i < len(self._sched_week[wd]):
item = self._sched_week[wd][i]
prev = self._sched_week[wd][i-1]
next = {}
# Fix only main items
if not item["is_main"] :
break
# Join duplicates
if (prev["end"] >= item["start"] and
prev["title"] == item["title"]) :
if prev["end"] > item["end"]:
item["end"] = prev["end"]
if prev["start"] < item["start"]:
item["start"] = prev["start"]
item["time"] = str_time(item["start"], item["end"])
self._sched_week[wd][i] = item
self._sched_week[wd].pop(i-1)
continue
# Fix one main program inside another
elif (prev["end"] > item["start"] and
prev["end"] >= item["end"]) :
if (prev["end"] > item["end"]) :
# Divide bigger one in two
next = prev
next["start"] = item["end"]
next["time"] = str_time(next["start"], next["end"])
self._sched_week[wd].insert(i+1, next)
if (prev["start"] < item["start"]) :
prev["end"] = item["start"]
prev["time"] = str_time(prev["start"], prev["end"])
self._sched_week[wd][i-1] = prev
else :
# Delete left one
self._sched_week[wd].pop(i-1)
i -= 1
if len(next) : i += 1
i += 1
# Sort schedule by start/parent
self._sched_week[wd].sort(key = lambda x : \
(x["start"], -x["is_main"]))
time = 0.0
last = {"end" : 0}
# Find first program
for it in reversed(self._sched_week[wd - 1]):
if it["is_main"]:
if it["is_merged"]: time = it["end"]
break
# Fill spaces with music
sched_week_with_music = []
for item in self._sched_week[wd]:
if not item["is_main"]:
sched_week_with_music.append(item)
continue
if item["start"] > time:
# If doesn't start right after the previous one
program = {}
program["is_main"] = True
program["is_merged"] = False
program["title"] = MUSIC
program["url"] = MUSIC_URL
program["host"] = []
program["icon"] = music_icon_name
program["cover"] = ""
program["weekday"] = SCHED_WEEKDAY_LIST[wd]
program["time"] = str_time(time, item["start"])
program["start"] = time
program["end"] = item["start"]
program["play"] = False
program["record"] = False
sched_week_with_music.append(program)
time = item["end"]
sched_week_with_music.append(item)
last = item
# Check if last event doesn't go till 24:00
if last["end"] < 86400.0 and not last["is_merged"]:
program = {}
program["is_main"] = True
program["is_merged"] = False
program["title"] = MUSIC
program["url"] = MUSIC_URL
program["host"] = []
program["icon"] = music_icon_name
program["cover"] = ""
program["weekday"] = SCHED_WEEKDAY_LIST[wd]
program["time"] = str_time(last["end"], 86400.0)
program["start"] = last["end"]
program["end"] = 86400.0
program["play"] = False
program["record"] = False
sched_week_with_music.append(program)
self._sched_week[wd] = sched_week_with_music
# Sort again
self._sched_week[wd].sort(key = lambda x : \
(x["start"], -x["is_main"], x["end"]))
# Save sched to file
self._sched_write_to_file()
return True
def _get_icon(self, src):
""" Download icon from url """
name = ""
if src.split(".")[-1] not in ["jpg", "jpeg", "png"]:
return name
if src[:7] != "http://":
if src[:2] == "//":
# //url/name.png
src = "http:" + src
elif src[0] == "/":
# /name.png
src = SILVER_RAIN_URL + src
else:
# url/name.png
src = "http://" + src
name = IMG_DIR + src.split("/")[-1]
# Download icon if it doesn't exist
if not os.path.exists(name):
try:
urllib.request.urlretrieve(src, name)
except urllib.error.URLError as e:
err = "Couldn't download icon from url: " + src
logging.error(err)
logging.error(str(e))
name = ""
return name
def _get_cover(self, program_page):
""" Download program cover """
name = ""
session = requests.Session()
session.headers["User-Agent"] = USER_AGENT
try:
resp = session.get(program_page)
if resp.status_code != 200:
logging.error("Couldn't reach server. Code:", resp.status_code)
return name
# Get image src
div = r'<div class="program-detail">.*?<div class="title".*?div>'
found = re.findall(div, resp.text)
src = re.sub(r'.*<img src="([^\?"]+)\??.*?".*', r'\1', found[0])
name = self._get_icon(src)
except requests.exceptions.RequestException as e:
logging.error(str(e))
except ValueError as e:
logging.error("Unexpected response")
logging.error(str(e))
except IndexError:
logging.error("Background not found")
return name
|
PetrSkovoroda/silver-rain
|
src/silver/schedule.py
|
Python
|
gpl-2.0
| 26,542
|
[
"exciting"
] |
7f17b44d93238e9d397c20c3c0ce545740d8d0153540427788f23e3ac02e4bba
|
#!/usr/bin/env python
#JSON {"lot": "UKS/6-31G(d)",
#JSON "scf": "EDIIS2SCFSolver",
#JSON "er": "cholesky",
#JSON "difficulty": 5,
#JSON "description": "Basic UKS DFT example with hyrbid GGA exhange-correlation functional (B3LYP)"}
import numpy as np
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
# Load the coordinates from file.
# Use the XYZ file from HORTON's test data directory.
fn_xyz = context.get_fn('test/methyl.xyz')
mol = IOData.from_file(fn_xyz)
# Create a Gaussian basis set
obasis = get_gobasis(mol.coordinates, mol.numbers, '6-31g(d)')
# Compute Gaussian integrals
olp = obasis.compute_overlap()
kin = obasis.compute_kinetic()
na = obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers)
er_vecs = obasis.compute_electron_repulsion_cholesky()
# Define a numerical integration grid needed the XC functionals
grid = BeckeMolGrid(mol.coordinates, mol.numbers, mol.pseudo_numbers)
# Create alpha orbitals
orb_alpha = Orbitals(obasis.nbasis)
orb_beta = Orbitals(obasis.nbasis)
# Initial guess
guess_core_hamiltonian(olp, kin + na, orb_alpha, orb_beta)
# Construct the restricted HF effective Hamiltonian
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
libxc_term = ULibXCHybridGGA('xc_b3lyp')
terms = [
UTwoIndexTerm(kin, 'kin'),
UDirectTerm(er_vecs, 'hartree'),
UGridGroup(obasis, grid, [libxc_term]),
UExchangeTerm(er_vecs, 'x_hf', libxc_term.get_exx_fraction()),
UTwoIndexTerm(na, 'ne'),
]
ham = UEffHam(terms, external)
# Decide how to occupy the orbitals (5 alpha electrons, 4 beta electrons)
occ_model = AufbauOccModel(5, 4)
# Converge WFN with CDIIS+EDIIS SCF
# - Construct the initial density matrix (needed for CDIIS+EDIIS).
occ_model.assign(orb_alpha, orb_beta)
dm_alpha = orb_alpha.to_dm()
dm_beta = orb_beta.to_dm()
# - SCF solver
scf_solver = EDIIS2SCFSolver(1e-6)
scf_solver(ham, olp, occ_model, dm_alpha, dm_beta)
# Derive orbitals (coeffs, energies and occupations) from the Fock and density
# matrices. The energy is also computed to store it in the output file below.
fock_alpha = np.zeros(olp.shape)
fock_beta = np.zeros(olp.shape)
ham.reset(dm_alpha, dm_beta)
ham.compute_energy()
ham.compute_fock(fock_alpha, fock_beta)
orb_alpha.from_fock_and_dm(fock_alpha, dm_alpha, olp)
orb_beta.from_fock_and_dm(fock_beta, dm_beta, olp)
# Assign results to the molecule object and write it to a file, e.g. for
# later analysis. Note that the CDIIS+EDIIS algorithm can only really construct
# an optimized density matrix and no orbitals.
mol.title = 'UKS computation on methyl'
mol.energy = ham.cache['energy']
mol.obasis = obasis
mol.orb_alpha = orb_alpha
mol.orb_beta = orb_beta
mol.dm_alpha = dm_alpha
mol.dm_beta = dm_beta
# useful for post-processing (results stored in double precision):
mol.to_file('methyl.h5')
# CODE BELOW IS FOR horton-regression-test.py ONLY. IT IS NOT PART OF THE EXAMPLE.
rt_results = {
'energy': ham.cache['energy'],
'orb_alpha': orb_alpha.energies,
'orb_beta': orb_beta.energies,
'nn': ham.cache["energy_nn"],
'kin': ham.cache["energy_kin"],
'ne': ham.cache["energy_ne"],
'grid': ham.cache["energy_grid_group"],
'ex': ham.cache["energy_x_hf"],
'hartree': ham.cache["energy_hartree"],
}
# BEGIN AUTOGENERATED CODE. DO NOT CHANGE MANUALLY.
rt_previous = {
'energy': -39.829219635738035,
'ex': -1.2302245921907746,
'orb_alpha': np.array([
-10.202510805419273, -0.66406017496772685, -0.40272214145506163,
-0.40271932405711497, -0.22486640330553606, 0.088865289955265131,
0.16218818378752942, 0.16219513232872484, 0.53321762474455336, 0.5676983657643706,
0.56770082505360386, 0.68335383164916741, 0.86349126731885351,
0.86350325512698756, 0.9207151969432954, 1.6565381606272092, 1.6565783656997477,
1.9537833876302899, 2.1093515670671379, 2.109434637212066
]),
'orb_beta': np.array([
-10.187835552622923, -0.62229231046907707, -0.39382059242599643,
-0.393812529523204, -0.04712861670320255, 0.10635162807276623,
0.16867798588387717, 0.16868833111894113, 0.57659434151316746,
0.57660076741912725, 0.61221503662441623, 0.70335183838284976,
0.86671222734246489, 0.86673750637878644, 0.95059963484638044, 1.7297544001544294,
1.7299218939578214, 2.0444562761782712, 2.1211021738699447, 2.1211432410416582
]),
'grid': -5.2062211181316655,
'hartree': 28.101037319269153,
'kin': 39.348971323946444,
'ne': -109.92256751129483,
'nn': 9.0797849426636361,
}
|
theochem/horton
|
data/examples/hf_dft/uks_methyl_hybgga.py
|
Python
|
gpl-3.0
| 4,578
|
[
"Gaussian"
] |
4cc22780cd92d5cc03e2f00d068e038064419602d5eab732fabc117feb734489
|
# -*- Mode: Python; coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2013 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import mock
from stoq.gui.test.baseguitest import BaseGUITest
from stoqlib.lib.dateutils import localdate
from ..opticaldomain import OpticalPatientHistory
from ..opticalhistory import (OpticalPatientHistoryEditor,
OpticalPatientTestEditor,
OpticalPatientMeasuresEditor,
OpticalPatientVisualAcuityEditor,
OpticalPatientDetails)
from .test_optical_domain import OpticalDomainTest
class TestOpticalPatientEditors(BaseGUITest, OpticalDomainTest):
def _generic_test(self, editor_class, uitest, create=None):
client = self.create_client()
model = None
case = 'show'
if create is not None:
model = create(client)
case = 'create'
editor = editor_class(self.store, client, model)
self.check_editor(editor, 'editor-optical-patient-%s-%s' % (uitest, case))
return editor
def test_optical_patient_history(self):
# First we test creating a new model
self._generic_test(OpticalPatientHistoryEditor, 'history')
# Then we test using an existing model
editor = self._generic_test(OpticalPatientHistoryEditor, 'history',
self.create_optical_patient_history)
editor.user_type.select(OpticalPatientHistory.TYPE_SECOND_USER)
editor.user_type.select(OpticalPatientHistory.TYPE_EX_USER)
editor.user_type.select(OpticalPatientHistory.TYPE_FIRST_USER)
def test_optical_patient_test(self):
# First we test creating a new model
self._generic_test(OpticalPatientTestEditor, 'test')
# Then we test using an existing model
self._generic_test(OpticalPatientTestEditor, 'test',
self.create_optical_patient_tes)
def test_optical_patient_measures(self):
# First we test creating a new model
self._generic_test(OpticalPatientMeasuresEditor, 'measures')
# Then we test using an existing model
self._generic_test(OpticalPatientMeasuresEditor, 'measures',
self.create_optical_patient_measures)
def test_optical_patient_visual_acuity(self):
# First we test creating a new model
self._generic_test(OpticalPatientVisualAcuityEditor, 'visual-acuity')
# Then we test using an existing model
self._generic_test(OpticalPatientVisualAcuityEditor, 'visual-acuity',
self.create_optical_patient_visual_acuity)
class TestOpticalPatientDetails(BaseGUITest, OpticalDomainTest):
def test_show(self):
client = self.create_client()
when = localdate(2012, 9, 1)
# Create some data for the editor to display.
history = self.create_optical_patient_history(client)
history.create_date = when
data = self.create_optical_patient_tes(client)
data.create_date = when
data = self.create_optical_patient_measures(client)
data.create_date = when
data = self.create_optical_patient_visual_acuity(client)
data.create_date = when
editor = OpticalPatientDetails(self.store, client)
self.check_editor(editor, 'editor-optical-patient-details')
slave = editor.slaves['history_holder']
with mock.patch.object(slave, 'run_dialog') as run_dialog:
slave.run_editor(self.store, history)
run_dialog.assert_called_once_with(OpticalPatientHistoryEditor,
store=self.store, client=client, model=history)
|
andrebellafronte/stoq
|
plugins/optical/tests/test_optical_history.py
|
Python
|
gpl-2.0
| 4,533
|
[
"VisIt"
] |
2dc4da39d120ee5d4a79e45fa672c9d818905a037e0fc165a501b5e78737816f
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*********************************
espressopp.analysis.StaticStructF
*********************************
.. function:: espressopp.analysis.StaticStructF(system)
:param system:
:type system:
.. function:: espressopp.analysis.StaticStructF.compute(nqx, nqy, nqz, bin_factor, ofile)
:param nqx:
:param nqy:
:param nqz:
:param bin_factor:
:param ofile: (default: None)
:type nqx:
:type nqy:
:type nqz:
:type bin_factor:
:type ofile:
:rtype:
.. function:: espressopp.analysis.StaticStructF.computeSingleChain(nqx, nqy, nqz, bin_factor, chainlength, ofile)
:param nqx:
:param nqy:
:param nqz:
:param bin_factor:
:param chainlength:
:param ofile: (default: None)
:type nqx:
:type nqy:
:type nqz:
:type bin_factor:
:type chainlength:
:type ofile:
:rtype:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.Observable import *
from _espressopp import analysis_StaticStructF
class StaticStructFLocal(ObservableLocal, analysis_StaticStructF):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_StaticStructF, system)
def compute(self, nqx, nqy, nqz, bin_factor, ofile = None):
if ofile is None:
return self.cxxclass.compute(self, nqx, nqy, nqz, bin_factor)
else:
#run compute on each CPU
result = self.cxxclass.compute(self, nqx, nqy, nqz, bin_factor)
#create the outfile only on CPU 0
if pmi.isController:
myofile = 'qsq_' + str(ofile) + '.txt'
outfile = open (myofile, 'w')
for i in range (len(result)):
line = str(result[i][0]) + "\t" + str(result[i][1]) + "\n"
outfile.write(line)
outfile.close()
return result
def computeSingleChain(self, nqx, nqy, nqz, bin_factor, chainlength, ofile = None):
if ofile is None:
return self.cxxclass.computeSingleChain(self, nqx, nqy, nqz, bin_factor, chainlength)
else:
#run computeSingleChain on each CPU
result = self.cxxclass.computeSingleChain(self, nqx, nqy, nqz, bin_factor, chainlength)
print(result) #this line is in case the outfile causes problems
#create the outfile only on CPU 0
if pmi.isController:
myofile = 'qsq_singleChain' + str(ofile) + '.txt'
outfile = open (myofile, 'w')
for i in range (len(result)):
line = str(result[i][0]) + "\t" + str(result[i][1]) + "\n"
outfile.write(line)
outfile.close()
return result
if pmi.isController:
class StaticStructF(Observable, metaclass=pmi.Proxy):
pmiproxydefs = dict(
pmicall = [ "compute", "computeSingleChain" ],
cls = 'espressopp.analysis.StaticStructFLocal'
)
|
espressopp/espressopp
|
src/analysis/StaticStructF.py
|
Python
|
gpl-3.0
| 4,261
|
[
"ESPResSo"
] |
457eb168db381e7ae2ad5403b9f6af36c02375d430ef147eb1a683c477a48e4b
|
"""Code run on the client side for unstaging complete Pulsar jobs."""
from contextlib import contextmanager
from logging import getLogger
from os.path import join, relpath
from json import loads
from ..action_mapper import FileActionMapper
from ..staging import COMMAND_VERSION_FILENAME
log = getLogger(__name__)
def finish_job(client, cleanup_job, job_completed_normally, client_outputs, pulsar_outputs):
"""Process for "un-staging" a complete Pulsar job.
This function is responsible for downloading results from remote
server and cleaning up Pulsar staging directory (if needed.)
"""
collection_failure_exceptions = []
if job_completed_normally:
output_collector = ClientOutputCollector(client)
action_mapper = FileActionMapper(client)
results_stager = ResultsCollector(output_collector, action_mapper, client_outputs, pulsar_outputs)
collection_failure_exceptions = results_stager.collect()
_clean(collection_failure_exceptions, cleanup_job, client)
return collection_failure_exceptions
class ClientOutputCollector(object):
def __init__(self, client):
self.client = client
def collect_output(self, results_collector, output_type, action, name):
# This output should have been handled by the Pulsar.
if not action.staging_action_local:
return False
working_directory = results_collector.client_outputs.working_directory
self.client.fetch_output(
path=action.path,
name=name,
working_directory=working_directory,
output_type=output_type,
action_type=action.action_type
)
return True
class ResultsCollector(object):
def __init__(self, output_collector, action_mapper, client_outputs, pulsar_outputs):
self.output_collector = output_collector
self.action_mapper = action_mapper
self.client_outputs = client_outputs
self.pulsar_outputs = pulsar_outputs
self.downloaded_working_directory_files = []
self.exception_tracker = DownloadExceptionTracker()
self.output_files = client_outputs.output_files
self.working_directory_contents = pulsar_outputs.working_directory_contents or []
self.metadata_directory_contents = pulsar_outputs.metadata_directory_contents or []
self.job_directory_contents = pulsar_outputs.job_directory_contents or []
def collect(self):
self.__collect_working_directory_outputs()
self.__collect_outputs()
self.__collect_version_file()
self.__collect_other_working_directory_files()
self.__collect_metadata_directory_files()
self.__collect_job_directory_files()
return self.exception_tracker.collection_failure_exceptions
def __collect_working_directory_outputs(self):
working_directory = self.client_outputs.working_directory
# Fetch explicit working directory outputs.
for source_file, output_file in self.client_outputs.work_dir_outputs:
name = relpath(source_file, working_directory)
pulsar = self.pulsar_outputs.path_helper.remote_name(name)
if self._attempt_collect_output('output_workdir', path=output_file, name=pulsar):
self.downloaded_working_directory_files.append(pulsar)
# Remove from full output_files list so don't try to download directly.
try:
self.output_files.remove(output_file)
except ValueError:
raise Exception("Failed to remove %s from %s" % (output_file, self.output_files))
def __collect_outputs(self):
# Legacy Pulsar not returning list of files, iterate over the list of
# expected outputs for tool.
for output_file in self.output_files:
# Fetch output directly...
output_generated = self.pulsar_outputs.has_output_file(output_file)
if output_generated:
self._attempt_collect_output('output', output_file)
for galaxy_path, pulsar in self.pulsar_outputs.output_extras(output_file).items():
self._attempt_collect_output('output', path=galaxy_path, name=pulsar)
# else not output generated, do not attempt download.
def __collect_version_file(self):
version_file = self.client_outputs.version_file
pulsar_output_directory_contents = self.pulsar_outputs.output_directory_contents
if version_file and COMMAND_VERSION_FILENAME in pulsar_output_directory_contents:
self._attempt_collect_output('output', version_file, name=COMMAND_VERSION_FILENAME)
def __collect_other_working_directory_files(self):
self.__collect_directory_files(
self.client_outputs.working_directory,
self.working_directory_contents,
'output_workdir',
)
def __collect_metadata_directory_files(self):
self.__collect_directory_files(
self.client_outputs.metadata_directory,
self.metadata_directory_contents,
'output_metadata',
)
def __collect_job_directory_files(self):
self.__collect_directory_files(
self.client_outputs.job_directory,
self.job_directory_contents,
'output_jobdir',
)
def __realized_dynamic_file_source_references(self):
references = []
def record_references(from_dict):
if isinstance(from_dict, list):
for v in from_dict:
record_references(v)
elif isinstance(from_dict, dict):
for k, v in from_dict.items():
if k == "filename":
references.append(v)
if isinstance(v, (list, dict)):
record_references(v)
def parse_and_record_references(json_content):
try:
as_dict = loads(json_content)
record_references(as_dict)
except Exception as e:
log.warning("problem parsing galaxy.json %s" % e)
pass
realized_dynamic_file_sources = (self.pulsar_outputs.realized_dynamic_file_sources or [])
for realized_dynamic_file_source in realized_dynamic_file_sources:
contents = realized_dynamic_file_source["contents"]
source_type = realized_dynamic_file_source["type"]
assert source_type in ["galaxy", "legacy_galaxy"], source_type
if source_type == "galaxy":
parse_and_record_references(contents)
else:
for line in contents.splitlines():
parse_and_record_references(line)
return references
def __collect_directory_files(self, directory, contents, output_type):
if directory is None: # e.g. output_metadata_directory
return
dynamic_file_source_references = self.__realized_dynamic_file_source_references()
# Fetch remaining working directory outputs of interest.
for name in contents:
collect = False
if name in self.downloaded_working_directory_files:
continue
if self.client_outputs.dynamic_match(name):
collect = True
elif name in dynamic_file_source_references:
collect = True
if collect:
log.debug("collecting dynamic %s file %s" % (output_type, name))
output_file = join(directory, self.pulsar_outputs.path_helper.local_name(name))
if self._attempt_collect_output(output_type=output_type, path=output_file, name=name):
self.downloaded_working_directory_files.append(name)
def _attempt_collect_output(self, output_type, path, name=None):
# path is final path on galaxy server (client)
# name is the 'name' of the file on the Pulsar server (possible a relative)
# path.
collected = False
with self.exception_tracker():
action = self.action_mapper.action({"path": path}, output_type)
if self._collect_output(output_type, action, name):
collected = True
return collected
def _collect_output(self, output_type, action, name):
log.info("collecting output %s with action %s" % (name, action))
try:
return self.output_collector.collect_output(self, output_type, action, name)
except Exception as e:
if _allow_collect_failure(output_type):
log.warning(
"Allowed failure in postprocessing, will not force job failure but generally indicates a tool"
f" failure: {e}")
else:
raise
class DownloadExceptionTracker(object):
def __init__(self):
self.collection_failure_exceptions = []
@contextmanager
def __call__(self):
try:
yield
except Exception as e:
self.collection_failure_exceptions.append(e)
def _clean(collection_failure_exceptions, cleanup_job, client):
failed = (len(collection_failure_exceptions) > 0)
do_clean = (not failed and cleanup_job != "never") or cleanup_job == "always"
if do_clean:
message = "Cleaning up job (failed [%s], cleanup_job [%s])"
else:
message = "Skipping job cleanup (failed [%s], cleanup_job [%s])"
log.debug(message % (failed, cleanup_job))
if do_clean:
try:
client.clean()
except Exception:
log.warn("Failed to cleanup remote Pulsar job")
def _allow_collect_failure(output_type):
return output_type in ['output_workdir']
__all__ = ('finish_job',)
|
natefoo/pulsar
|
pulsar/client/staging/down.py
|
Python
|
apache-2.0
| 9,758
|
[
"Galaxy"
] |
912fce42564fbe66c8a4ba928109ca131be40cdf599d7fa569a242b65d28f7a7
|
# -*- coding: utf-8 -*-
"""Various text used throughout the website, e.g. status messages, errors, etc.
"""
from website import settings
# Status Messages
#################
# NOTE: in status messages, newlines are not preserved, so triple-quotes strings
# are ok
# Status message shown at settings page on first login
# (upon clicking primary email confirmation link)
WELCOME_MESSAGE = """
<h1>Welcome to the OSF!</h1>
<p>Visit our <a href="https://openscience.zendesk.com/hc/en-us" target="_blank" rel="noreferrer">Guides</a> to learn about creating a project, or get inspiration from <a href="https://osf.io/explore/activity/#popularPublicProjects">popular public projects</a>.</p>
"""
TERMS_OF_SERVICE = """
<div style="text-align: center">
<div>
<h4>We've updated our <a target="_blank" href="https://github.com/CenterForOpenScience/cos.io/blob/master/TERMS_OF_USE.md">Terms of Use</a> and <a target="_blank" href="https://github.com/CenterForOpenScience/cos.io/blob/master/PRIVACY_POLICY.md">Privacy Policy</a>. Please read them carefully.</h4>
<h5><input type="checkbox" id="accept" style="margin-right: 5px">I have read and agree to these terms.</input></h5>
</div>
<button class="btn btn-primary" data-dismiss="alert" id="continue" disabled>Continue</button>
</div>
<script>
$('#accept').on('change', function() {{
$('#continue').prop('disabled', !$('#accept').prop('checked'));
}});
$('#continue').on('click', function() {{
var accepted = $('#accept').prop('checked');
$.ajax({{
url: '{api_domain}v2/users/me/',
type: 'PATCH',
contentType: 'application/json',
xhrFields: {{
withCredentials: true
}},
headers: {{
'X-CSRFToken': {csrf_token}
}},
data: JSON.stringify({{
'data': {{
'id': '{user_id}',
'type': 'users',
'attributes': {{
'accepted_terms_of_service': accepted
}}
}}
}})
}});
}});
</script>
"""
REGISTRATION_SUCCESS = """Registration successful. Please check {email} to confirm your email address."""
EXTERNAL_LOGIN_EMAIL_CREATE_SUCCESS = """A new OSF account has been created with your {external_id_provider} profile. Please check {email} to confirm your email address."""
EXTERNAL_LOGIN_EMAIL_LINK_SUCCESS = """Your OSF account has been linked with your {external_id_provider}. Please check {email} to confirm this action."""
# Shown if registration is turned off in website.settings
REGISTRATION_UNAVAILABLE = 'Registration currently unavailable.'
ALREADY_REGISTERED = u'The email {email} has already been registered.'
BLACKLISTED_EMAIL = 'Invalid email address. If this should not have occurred, please report this to {}.'.format(settings.OSF_SUPPORT_EMAIL)
# Shown if user tries to login with an email that is not yet confirmed
UNCONFIRMED = ('This login email has been registered but not confirmed. Please check your email (and spam folder).'
' <a href="/resend/">Click here</a> to resend your confirmation email.')
# Shown if the user's account is disabled
DISABLED = """
Log-in failed: Deactivated account.
"""
# Shown on incorrect password attempt
LOGIN_FAILED = """
Log-in failed. Please try again or reset your password.
"""
# Shown at login page if user tries to access a resource that requires auth
MUST_LOGIN = """
You must log in or create a new account to claim the contributor-ship.
"""
# Shown on logout
LOGOUT = """
You have successfully logged out.
"""
EMAIL_NOT_FOUND = u"""
{email} was not found in our records.
"""
# Shown after an unregistered user claims an account and is redirected to the
# settings page
CLAIMED_CONTRIBUTOR = ('<strong>Welcome to the OSF!</strong> Edit your display name below and then check your '
'<a href="/dashboard/">dashboard</a> to see projects to which you have been added as a '
'contributor by someone else.')
# Error Pages
# ###########
# Search-related errors
SEARCH_QUERY_HELP = ('Please check our help (the question mark beside the search box) for more information '
'on advanced search queries.')
# Shown at error page if an expired/revokes email confirmation link is clicked
EXPIRED_EMAIL_CONFIRM_TOKEN = 'This confirmation link has expired. Please <a href="/login/">log in</a> to continue.'
INVALID_EMAIL_CONFIRM_TOKEN = 'This confirmation link is invalid. Please <a href="/login/">log in</a> to continue.'
CANNOT_MERGE_ACCOUNTS_SHORT = 'Cannot Merge Accounts'
CANNOT_MERGE_ACCOUNTS_LONG = (
'Accounts cannot be merged due to a possible conflict with add-ons. '
'Before you continue, please <a href="/settings/addons/"> deactivate '
'any add-ons</a> to be merged into your primary account.'
)
MERGE_COMPLETE = 'Accounts successfully merged.'
MERGE_CONFIRMATION_REQUIRED_SHORT = 'Confirmation Required: Merge Accounts'
MERGE_CONFIRMATION_REQUIRED_LONG = (
u'<p>This email is confirmed to another account. '
u'Would you like to merge <em>{src_user}</em> with the account '
u'<em>{dest_user}</em>?<p>'
u'<a class="btn btn-primary" href="?confirm_merge">Confirm merge</a> '
)
# Node Actions
AFTER_REGISTER_ARCHIVING = (
'Files are being copied to the newly created registration, and you will receive an email '
'notification when the copying is finished.'
)
BEFORE_REGISTER_HAS_POINTERS = (
u'This {category} contains links to other projects. These links will be '
u'copied into your registration, but the projects that they link to will '
u'not be registered. If you wish to register the linked projects, they '
u'must be registered separately. Learn more about <a href="http://help.osf.io'
u'/m/links_forks/l/524112-link-to-a-project">links</a>.'
)
BEFORE_FORK_HAS_POINTERS = (
u'This {category} contains links to other projects. Links will be copied '
u'into your fork, but the projects that they link to will not be forked. '
u'If you wish to fork the linked projects, they need to be forked from the '
u'original project.'
)
REGISTRATION_EMBARGO_INFO = """
You can choose whether to make your registration public immediately or
embargo it for up to four years. At the end of the embargo period the registration
is automatically made public. After becoming public, the only way to remove a
registration is to withdraw it. Withdrawn registrations show only the registration title,
contributors, and description to indicate that a registration was made and
later withdrawn.
<br /><br />
If you choose to embargo your registration, a notification will be sent to
all other project contributors. Other administrators will have 48 hours to
approve or cancel creating the registration. If any other administrator rejects the
registration, it will be canceled. If all other administrators approve or do
nothing, the registration will be confirmed and enter its embargo period.
"""
BEFORE_REGISTRATION_INFO = """
Registration cannot be undone, and the archived content and files cannot be
deleted after registration. Please be sure the project is complete and
comprehensive for what you wish to register.
"""
# Nodes: forking, templating, linking
LINK_ACTION = 'Link to this Project'
LINK_DESCRIPTION = """
<p>Linking to this project will reference it in another project, without
creating a copy. The link will always point to the most up-to-date version.</p>
"""
TEMPLATE_ACTION = 'Duplicate template'
TEMPLATE_DESCRIPTION = """
<p>This option will create a new project, using this project as a template.
The new project will be structured in the same way, but contain no data.</p>
"""
FORK_ACTION = 'Fork this Project'
FORK_DESCRIPTION = """
<p>Fork this project if you plan to build upon it in your own work.
The new project will be an exact duplicate of this project's current state,
with you as the only contributor.</p>
"""
TEMPLATE_DROPDOWN_HELP = """Start typing to search. Selecting project as
template will duplicate its structure in the new project without importing the
content of that project."""
TEMPLATED_FROM_PREFIX = 'Templated from '
# MFR Error handling
ERROR_PREFIX = "Unable to render. <a href='?action=download'>Download</a> file to view it."
SUPPORT = u'Contact ' + settings.OSF_SUPPORT_EMAIL + u'for further assistance.'
SUPPORT_LINK = 'please report it to <a href="mailto:' + settings.OSF_SUPPORT_EMAIL + '">' + settings.OSF_SUPPORT_EMAIL + '</a>.'
# Custom Error Messages w/ support # TODO: Where are these used? See [#OSF-6101]
STATA_VERSION_ERROR = u'Version of given Stata file is not 104, 105, 108, 113 (Stata 8/9), 114 (Stata 10/11) or 115 (Stata 12)<p>{0}</p>'.format(SUPPORT)
BLANK_OR_CORRUPT_TABLE_ERROR = u'Is this a valid instance of this file type?<p>{0}</p>'.format(SUPPORT)
#disk saving mode
DISK_SAVING_MODE = 'Forks, registrations, and uploads to OSF Storage uploads are temporarily disabled while we are undergoing a server upgrade. These features will return shortly.'
#log out and revisit the link to confirm emails
CONFIRM_ALTERNATE_EMAIL_ERROR = 'The email address has <b>NOT</b> been added to your account. Please log out and revisit the link in your email. Thank you.'
SWITCH_VALIDATOR_ERROR = 'You do not have the ability to edit this field at this time.'
|
adlius/osf.io
|
website/language.py
|
Python
|
apache-2.0
| 9,431
|
[
"VisIt"
] |
64948582ee92302788ece94c3ed885bfcaec38ba31eea578e34c375bc0191472
|
"""
A collection of utility functions and classes. Originally, many
(but not all) were from the Python Cookbook -- hence the name cbook.
This module is safe to import from anywhere within matplotlib;
it imports matplotlib only at runtime.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
import collections
import datetime
import errno
import functools
import glob
import gzip
import io
from itertools import repeat
import locale
import numbers
import os
import re
import sys
import time
import traceback
import types
import warnings
from weakref import ref, WeakKeyDictionary
from .deprecation import deprecated, warn_deprecated
from .deprecation import mplDeprecation, MatplotlibDeprecationWarning
import numpy as np
# On some systems, locale.getpreferredencoding returns None,
# which can break unicode; and the sage project reports that
# some systems have incorrect locale specifications, e.g.,
# an encoding instead of a valid locale name. Another
# pathological case that has been reported is an empty string.
# On some systems, getpreferredencoding sets the locale, which has
# side effects. Passing False eliminates those side effects.
def unicode_safe(s):
import matplotlib
if isinstance(s, bytes):
try:
preferredencoding = locale.getpreferredencoding(
matplotlib.rcParams['axes.formatter.use_locale']).strip()
if not preferredencoding:
preferredencoding = None
except (ValueError, ImportError, AttributeError):
preferredencoding = None
if preferredencoding is None:
return six.text_type(s)
else:
return six.text_type(s, preferredencoding)
return s
@deprecated('2.1')
class converter(object):
"""
Base class for handling string -> python type with support for
missing values
"""
def __init__(self, missing='Null', missingval=None):
self.missing = missing
self.missingval = missingval
def __call__(self, s):
if s == self.missing:
return self.missingval
return s
def is_missing(self, s):
return not s.strip() or s == self.missing
@deprecated('2.1')
class tostr(converter):
"""convert to string or None"""
def __init__(self, missing='Null', missingval=''):
converter.__init__(self, missing=missing, missingval=missingval)
@deprecated('2.1')
class todatetime(converter):
"""convert to a datetime or None"""
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s):
return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.datetime(*tup[:6])
@deprecated('2.1')
class todate(converter):
"""convert to a date or None"""
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
"""use a :func:`time.strptime` format string for conversion"""
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s):
return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.date(*tup[:3])
@deprecated('2.1')
class tofloat(converter):
"""convert to a float or None"""
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
self.missingval = missingval
def __call__(self, s):
if self.is_missing(s):
return self.missingval
return float(s)
@deprecated('2.1')
class toint(converter):
"""convert to an int or None"""
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
def __call__(self, s):
if self.is_missing(s):
return self.missingval
return int(s)
class _BoundMethodProxy(object):
"""
Our own proxy object which enables weak references to bound and unbound
methods and arbitrary callables. Pulls information about the function,
class, and instance out of a bound method. Stores a weak reference to the
instance to support garbage collection.
@organization: IBM Corporation
@copyright: Copyright (c) 2005, 2006 IBM Corporation
@license: The BSD License
Minor bugfixes by Michael Droettboom
"""
def __init__(self, cb):
self._hash = hash(cb)
self._destroy_callbacks = []
try:
try:
if six.PY3:
self.inst = ref(cb.__self__, self._destroy)
else:
self.inst = ref(cb.im_self, self._destroy)
except TypeError:
self.inst = None
if six.PY3:
self.func = cb.__func__
self.klass = cb.__self__.__class__
else:
self.func = cb.im_func
self.klass = cb.im_class
except AttributeError:
self.inst = None
self.func = cb
self.klass = None
def add_destroy_callback(self, callback):
self._destroy_callbacks.append(_BoundMethodProxy(callback))
def _destroy(self, wk):
for callback in self._destroy_callbacks:
try:
callback(self)
except ReferenceError:
pass
def __getstate__(self):
d = self.__dict__.copy()
# de-weak reference inst
inst = d['inst']
if inst is not None:
d['inst'] = inst()
return d
def __setstate__(self, statedict):
self.__dict__ = statedict
inst = statedict['inst']
# turn inst back into a weakref
if inst is not None:
self.inst = ref(inst)
def __call__(self, *args, **kwargs):
"""
Proxy for a call to the weak referenced object. Take
arbitrary params to pass to the callable.
Raises `ReferenceError`: When the weak reference refers to
a dead object
"""
if self.inst is not None and self.inst() is None:
raise ReferenceError
elif self.inst is not None:
# build a new instance method with a strong reference to the
# instance
mtd = types.MethodType(self.func, self.inst())
else:
# not a bound method, just return the func
mtd = self.func
# invoke the callable and return the result
return mtd(*args, **kwargs)
def __eq__(self, other):
"""
Compare the held function and instance with that held by
another proxy.
"""
try:
if self.inst is None:
return self.func == other.func and other.inst is None
else:
return self.func == other.func and self.inst() == other.inst()
except Exception:
return False
def __ne__(self, other):
"""
Inverse of __eq__.
"""
return not self.__eq__(other)
def __hash__(self):
return self._hash
def _exception_printer(exc):
traceback.print_exc()
class CallbackRegistry(object):
"""Handle registering and disconnecting for a set of signals and
callbacks:
>>> def oneat(x):
... print('eat', x)
>>> def ondrink(x):
... print('drink', x)
>>> from matplotlib.cbook import CallbackRegistry
>>> callbacks = CallbackRegistry()
>>> id_eat = callbacks.connect('eat', oneat)
>>> id_drink = callbacks.connect('drink', ondrink)
>>> callbacks.process('drink', 123)
drink 123
>>> callbacks.process('eat', 456)
eat 456
>>> callbacks.process('be merry', 456) # nothing will be called
>>> callbacks.disconnect(id_eat)
>>> callbacks.process('eat', 456) # nothing will be called
In practice, one should always disconnect all callbacks when they
are no longer needed to avoid dangling references (and thus memory
leaks). However, real code in matplotlib rarely does so, and due
to its design, it is rather difficult to place this kind of code.
To get around this, and prevent this class of memory leaks, we
instead store weak references to bound methods only, so when the
destination object needs to die, the CallbackRegistry won't keep
it alive. The Python stdlib weakref module can not create weak
references to bound methods directly, so we need to create a proxy
object to handle weak references to bound methods (or regular free
functions). This technique was shared by Peter Parente on his
`"Mindtrove" blog
<http://mindtrove.info/python-weak-references/>`_.
Parameters
----------
exception_handler : callable, optional
If provided must have signature ::
def handler(exc: Exception) -> None:
If not None this function will be called with any `Exception`
subclass raised by the callbacks in `CallbackRegistry.process`.
The handler may either consume the exception or re-raise.
The callable must be pickle-able.
The default handler is ::
def h(exc):
traceback.print_exc()
"""
def __init__(self, exception_handler=_exception_printer):
self.exception_handler = exception_handler
self.callbacks = dict()
self._cid = 0
self._func_cid_map = {}
# In general, callbacks may not be pickled; thus, we simply recreate an
# empty dictionary at unpickling. In order to ensure that `__setstate__`
# (which just defers to `__init__`) is called, `__getstate__` must
# return a truthy value (for pickle protocol>=3, i.e. Py3, the
# *actual* behavior is that `__setstate__` will be called as long as
# `__getstate__` does not return `None`, but this is undocumented -- see
# http://bugs.python.org/issue12290).
def __getstate__(self):
return {'exception_handler': self.exception_handler}
def __setstate__(self, state):
self.__init__(**state)
def connect(self, s, func):
"""
register *func* to be called when a signal *s* is generated
func will be called
"""
self._func_cid_map.setdefault(s, WeakKeyDictionary())
# Note proxy not needed in python 3.
# TODO rewrite this when support for python2.x gets dropped.
proxy = _BoundMethodProxy(func)
if proxy in self._func_cid_map[s]:
return self._func_cid_map[s][proxy]
proxy.add_destroy_callback(self._remove_proxy)
self._cid += 1
cid = self._cid
self._func_cid_map[s][proxy] = cid
self.callbacks.setdefault(s, dict())
self.callbacks[s][cid] = proxy
return cid
def _remove_proxy(self, proxy):
for signal, proxies in list(six.iteritems(self._func_cid_map)):
try:
del self.callbacks[signal][proxies[proxy]]
except KeyError:
pass
if len(self.callbacks[signal]) == 0:
del self.callbacks[signal]
del self._func_cid_map[signal]
def disconnect(self, cid):
"""
disconnect the callback registered with callback id *cid*
"""
for eventname, callbackd in list(six.iteritems(self.callbacks)):
try:
del callbackd[cid]
except KeyError:
continue
else:
for signal, functions in list(
six.iteritems(self._func_cid_map)):
for function, value in list(six.iteritems(functions)):
if value == cid:
del functions[function]
return
def process(self, s, *args, **kwargs):
"""
process signal `s`. All of the functions registered to receive
callbacks on `s` will be called with ``**args`` and ``**kwargs``
"""
if s in self.callbacks:
for cid, proxy in list(six.iteritems(self.callbacks[s])):
try:
proxy(*args, **kwargs)
except ReferenceError:
self._remove_proxy(proxy)
# this does not capture KeyboardInterrupt, SystemExit,
# and GeneratorExit
except Exception as exc:
if self.exception_handler is not None:
self.exception_handler(exc)
else:
raise
class silent_list(list):
"""
override repr when returning a list of matplotlib artists to
prevent long, meaningless output. This is meant to be used for a
homogeneous list of a given type
"""
def __init__(self, type, seq=None):
self.type = type
if seq is not None:
self.extend(seq)
def __repr__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def __str__(self):
return repr(self)
def __getstate__(self):
# store a dictionary of this SilentList's state
return {'type': self.type, 'seq': self[:]}
def __setstate__(self, state):
self.type = state['type']
self.extend(state['seq'])
class IgnoredKeywordWarning(UserWarning):
"""
A class for issuing warnings about keyword arguments that will be ignored
by matplotlib
"""
pass
def local_over_kwdict(local_var, kwargs, *keys):
"""
Enforces the priority of a local variable over potentially conflicting
argument(s) from a kwargs dict. The following possible output values are
considered in order of priority:
local_var > kwargs[keys[0]] > ... > kwargs[keys[-1]]
The first of these whose value is not None will be returned. If all are
None then None will be returned. Each key in keys will be removed from the
kwargs dict in place.
Parameters
----------
local_var: any object
The local variable (highest priority)
kwargs: dict
Dictionary of keyword arguments; modified in place
keys: str(s)
Name(s) of keyword arguments to process, in descending order of
priority
Returns
-------
out: any object
Either local_var or one of kwargs[key] for key in keys
Raises
------
IgnoredKeywordWarning
For each key in keys that is removed from kwargs but not used as
the output value
"""
out = local_var
for key in keys:
kwarg_val = kwargs.pop(key, None)
if kwarg_val is not None:
if out is None:
out = kwarg_val
else:
warnings.warn('"%s" keyword argument will be ignored' % key,
IgnoredKeywordWarning)
return out
def strip_math(s):
"""remove latex formatting from mathtext"""
remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
s = s[1:-1]
for r in remove:
s = s.replace(r, '')
return s
class Bunch(object):
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables::
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: Alex Martelli
From: https://code.activestate.com/recipes/121294/
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
return 'Bunch(%s)' % ', '.join(
'%s=%s' % kv for kv in six.iteritems(vars(self)))
@deprecated('2.1')
def unique(x):
"""Return a list of unique elements of *x*"""
return list(set(x))
def iterable(obj):
"""return true if *obj* is iterable"""
try:
iter(obj)
except TypeError:
return False
return True
@deprecated('2.1')
def is_string_like(obj):
"""Return True if *obj* looks like a string"""
# (np.str_ == np.unicode_ on Py3).
return isinstance(obj, (six.string_types, np.str_, np.unicode_))
@deprecated('2.1')
def is_sequence_of_strings(obj):
"""Returns true if *obj* is iterable and contains strings"""
if not iterable(obj):
return False
if is_string_like(obj) and not isinstance(obj, np.ndarray):
try:
obj = obj.values
except AttributeError:
# not pandas
return False
for o in obj:
if not is_string_like(o):
return False
return True
def is_hashable(obj):
"""Returns true if *obj* can be hashed"""
try:
hash(obj)
except TypeError:
return False
return True
def is_writable_file_like(obj):
"""return true if *obj* looks like a file object with a *write* method"""
return callable(getattr(obj, 'write', None))
def file_requires_unicode(x):
"""
Returns `True` if the given writable file-like object requires Unicode
to be written to it.
"""
try:
x.write(b'')
except TypeError:
return True
else:
return False
@deprecated('2.1')
def is_scalar(obj):
"""return true if *obj* is not string like and is not iterable"""
return not isinstance(obj, six.string_types) and not iterable(obj)
def is_numlike(obj):
"""return true if *obj* looks like a number"""
return isinstance(obj, (numbers.Number, np.number))
def to_filehandle(fname, flag='rU', return_opened=False):
"""
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in .gz. *flag* is a
read/write flag for :func:`file`
"""
if isinstance(fname, six.string_types):
if fname.endswith('.gz'):
# get rid of 'U' in flag for gzipped files.
flag = flag.replace('U', '')
fh = gzip.open(fname, flag)
elif fname.endswith('.bz2'):
# get rid of 'U' in flag for bz2 files
flag = flag.replace('U', '')
import bz2
fh = bz2.BZ2File(fname, flag)
else:
fh = open(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fh = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fh, opened
return fh
def is_scalar_or_string(val):
"""Return whether the given object is a scalar or string like."""
return isinstance(val, six.string_types) or not iterable(val)
def _string_to_bool(s):
"""Parses the string argument as a boolean"""
if not isinstance(s, six.string_types):
return bool(s)
if s.lower() in ['on', 'true']:
return True
if s.lower() in ['off', 'false']:
return False
raise ValueError('String "%s" must be one of: '
'"on", "off", "true", or "false"' % s)
def get_sample_data(fname, asfileobj=True):
"""
Return a sample data file. *fname* is a path relative to the
`mpl-data/sample_data` directory. If *asfileobj* is `True`
return a file object, otherwise just a file path.
Set the rc parameter examples.directory to the directory where we should
look, if sample_data files are stored in a location different than
default (which is 'mpl-data/sample_data` at the same level of 'matplotlib`
Python module files).
If the filename ends in .gz, the file is implicitly ungzipped.
"""
import matplotlib
if matplotlib.rcParams['examples.directory']:
root = matplotlib.rcParams['examples.directory']
else:
root = os.path.join(matplotlib._get_data_path(), 'sample_data')
path = os.path.join(root, fname)
if asfileobj:
if (os.path.splitext(fname)[-1].lower() in
('.csv', '.xrc', '.txt')):
mode = 'r'
else:
mode = 'rb'
base, ext = os.path.splitext(fname)
if ext == '.gz':
return gzip.open(path, mode)
else:
return open(path, mode)
else:
return path
def flatten(seq, scalarp=is_scalar_or_string):
"""
Returns a generator of flattened nested containers
For example:
>>> from matplotlib.cbook import flatten
>>> l = (('John', ['Hunter']), (1, 23), [[([42, (5, 23)], )]])
>>> print(list(flatten(l)))
['John', 'Hunter', 1, 23, 42, 5, 23]
By: Composite of Holger Krekel and Luther Blissett
From: https://code.activestate.com/recipes/121294/
and Recipe 1.12 in cookbook
"""
for item in seq:
if scalarp(item) or item is None:
yield item
else:
for subitem in flatten(item, scalarp):
yield subitem
@deprecated('2.1', "sorted(..., key=itemgetter(...))")
class Sorter(object):
"""
Sort by attribute or item
Example usage::
sort = Sorter()
list = [(1, 2), (4, 8), (0, 3)]
dict = [{'a': 3, 'b': 4}, {'a': 5, 'b': 2}, {'a': 0, 'b': 0},
{'a': 9, 'b': 9}]
sort(list) # default sort
sort(list, 1) # sort by index 1
sort(dict, 'a') # sort a list of dicts by key 'a'
"""
def _helper(self, data, aux, inplace):
aux.sort()
result = [data[i] for junk, i in aux]
if inplace:
data[:] = result
return result
def byItem(self, data, itemindex=None, inplace=1):
if itemindex is None:
if inplace:
data.sort()
result = data
else:
result = sorted(data)
return result
else:
aux = [(data[i][itemindex], i) for i in range(len(data))]
return self._helper(data, aux, inplace)
def byAttribute(self, data, attributename, inplace=1):
aux = [(getattr(data[i], attributename), i) for i in range(len(data))]
return self._helper(data, aux, inplace)
# a couple of handy synonyms
sort = byItem
__call__ = byItem
@deprecated('2.1')
class Xlator(dict):
"""
All-in-one multiple-string-substitution class
Example usage::
text = "Larry Wall is the creator of Perl"
adict = {
"Larry Wall" : "Guido van Rossum",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print(multiple_replace(adict, text))
xlat = Xlator(adict)
print(xlat.xlat(text))
"""
def _make_regex(self):
""" Build re object based on the keys of the current dictionary """
return re.compile("|".join(map(re.escape, self)))
def __call__(self, match):
""" Handler invoked for each regex *match* """
return self[match.group(0)]
def xlat(self, text):
""" Translate *text*, returns the modified text. """
return self._make_regex().sub(self, text)
@deprecated('2.1')
def soundex(name, len=4):
""" soundex module conforming to Odell-Russell algorithm """
# digits holds the soundex values for the alphabet
soundex_digits = '01230120022455012623010202'
sndx = ''
fc = ''
# Translate letters in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc:
fc = c # Remember first letter
d = soundex_digits[ord(c) - ord('A')]
# Duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# Replace first digit with first letter
sndx = fc + sndx[1:]
# Remove all 0s from the soundex code
sndx = sndx.replace('0', '')
# Return soundex code truncated or 0-padded to len characters
return (sndx + (len * '0'))[:len]
@deprecated('2.1')
class Null(object):
""" Null objects always and reliably "do nothing." """
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __str__(self):
return "Null()"
def __repr__(self):
return "Null()"
if six.PY3:
def __bool__(self):
return 0
else:
def __nonzero__(self):
return 0
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
def mkdirs(newdir, mode=0o777):
"""
make directory *newdir* recursively, and set *mode*. Equivalent to ::
> mkdir -p NEWDIR
> chmod MODE NEWDIR
"""
# this functionality is now in core python as of 3.2
# LPY DROP
if six.PY3:
os.makedirs(newdir, mode=mode, exist_ok=True)
else:
try:
os.makedirs(newdir, mode=mode)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
class GetRealpathAndStat(object):
def __init__(self):
self._cache = {}
def __call__(self, path):
result = self._cache.get(path)
if result is None:
realpath = os.path.realpath(path)
if sys.platform == 'win32':
stat_key = realpath
else:
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
result = realpath, stat_key
self._cache[path] = result
return result
get_realpath_and_stat = GetRealpathAndStat()
@deprecated('2.1')
def dict_delall(d, keys):
"""delete all of the *keys* from the :class:`dict` *d*"""
for key in keys:
try:
del d[key]
except KeyError:
pass
@deprecated('2.1')
class RingBuffer(object):
""" class that implements a not-yet-full buffer """
def __init__(self, size_max):
self.max = size_max
self.data = []
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.data[self.cur] = x
self.cur = (self.cur + 1) % self.max
def get(self):
""" return list of elements in correct order """
return self.data[self.cur:] + self.data[:self.cur]
def append(self, x):
"""append an element at the end of the buffer"""
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = __Full
def get(self):
""" Return a list of elements from the oldest to the newest. """
return self.data
def __get_item__(self, i):
return self.data[i % len(self.data)]
@deprecated('2.1')
def get_split_ind(seq, N):
"""
*seq* is a list of words. Return the index into seq such that::
len(' '.join(seq[:ind])<=N
.
"""
s_len = 0
# todo: use Alex's xrange pattern from the cbook for efficiency
for (word, ind) in zip(seq, xrange(len(seq))):
s_len += len(word) + 1 # +1 to account for the len(' ')
if s_len >= N:
return ind
return len(seq)
@deprecated('2.1', alternative='textwrap.TextWrapper')
def wrap(prefix, text, cols):
"""wrap *text* with *prefix* at length *cols*"""
pad = ' ' * len(prefix.expandtabs())
available = cols - len(pad)
seq = text.split(' ')
Nseq = len(seq)
ind = 0
lines = []
while ind < Nseq:
lastInd = ind
ind += get_split_ind(seq[ind:], available)
lines.append(seq[lastInd:ind])
# add the prefix to the first line, pad with spaces otherwise
ret = prefix + ' '.join(lines[0]) + '\n'
for line in lines[1:]:
ret += pad + ' '.join(line) + '\n'
return ret
# A regular expression used to determine the amount of space to
# remove. It looks for the first sequence of spaces immediately
# following the first newline, or at the beginning of the string.
_find_dedent_regex = re.compile(r"(?:(?:\n\r?)|^)( *)\S")
# A cache to hold the regexs that actually remove the indent.
_dedent_regex = {}
def dedent(s):
"""
Remove excess indentation from docstring *s*.
Discards any leading blank lines, then removes up to n whitespace
characters from each line, where n is the number of leading
whitespace characters in the first line. It differs from
textwrap.dedent in its deletion of leading blank lines and its use
of the first non-blank line to determine the indentation.
It is also faster in most cases.
"""
# This implementation has a somewhat obtuse use of regular
# expressions. However, this function accounted for almost 30% of
# matplotlib startup time, so it is worthy of optimization at all
# costs.
if not s: # includes case of s is None
return ''
match = _find_dedent_regex.match(s)
if match is None:
return s
# This is the number of spaces to remove from the left-hand side.
nshift = match.end(1) - match.start(1)
if nshift == 0:
return s
# Get a regex that will remove *up to* nshift spaces from the
# beginning of each line. If it isn't in the cache, generate it.
unindent = _dedent_regex.get(nshift, None)
if unindent is None:
unindent = re.compile("\n\r? {0,%d}" % nshift)
_dedent_regex[nshift] = unindent
result = unindent.sub("\n", s).strip()
return result
def listFiles(root, patterns='*', recurse=1, return_folders=0):
"""
Recursively list files
from Parmar and Martelli in the Python Cookbook
"""
import os.path
import fnmatch
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
results = []
for dirname, dirs, files in os.walk(root):
# Append to results all relevant files (and perhaps folders)
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if return_folders or os.path.isfile(fullname):
for pattern in pattern_list:
if fnmatch.fnmatch(name, pattern):
results.append(fullname)
break
# Block recursion if recursion was disallowed
if not recurse:
break
return results
@deprecated('2.1')
def get_recursive_filelist(args):
"""
Recurse all the files and dirs in *args* ignoring symbolic links
and return the files as a list of strings
"""
files = []
for arg in args:
if os.path.isfile(arg):
files.append(arg)
continue
if os.path.isdir(arg):
newfiles = listFiles(arg, recurse=1, return_folders=1)
files.extend(newfiles)
return [f for f in files if not os.path.islink(f)]
@deprecated('2.1')
def pieces(seq, num=2):
"""Break up the *seq* into *num* tuples"""
start = 0
while 1:
item = seq[start:start + num]
if not len(item):
break
yield item
start += num
@deprecated('2.1')
def exception_to_str(s=None):
if six.PY3:
sh = io.StringIO()
else:
sh = io.BytesIO()
if s is not None:
print(s, file=sh)
traceback.print_exc(file=sh)
return sh.getvalue()
@deprecated('2.1')
def allequal(seq):
"""
Return *True* if all elements of *seq* compare equal. If *seq* is
0 or 1 length, return *True*
"""
if len(seq) < 2:
return True
val = seq[0]
for i in xrange(1, len(seq)):
thisval = seq[i]
if thisval != val:
return False
return True
@deprecated('2.1')
def alltrue(seq):
"""
Return *True* if all elements of *seq* evaluate to *True*. If
*seq* is empty, return *False*.
"""
if not len(seq):
return False
for val in seq:
if not val:
return False
return True
@deprecated('2.1')
def onetrue(seq):
"""
Return *True* if one element of *seq* is *True*. It *seq* is
empty, return *False*.
"""
if not len(seq):
return False
for val in seq:
if val:
return True
return False
@deprecated('2.1')
def allpairs(x):
"""
return all possible pairs in sequence *x*
"""
return [(s, f) for i, f in enumerate(x) for s in x[i + 1:]]
class maxdict(dict):
"""
A dictionary with a maximum size; this doesn't override all the
relevant methods to constrain the size, just setitem, so use with
caution
"""
def __init__(self, maxsize):
dict.__init__(self)
self.maxsize = maxsize
self._killkeys = []
def __setitem__(self, k, v):
if k not in self:
if len(self) >= self.maxsize:
del self[self._killkeys[0]]
del self._killkeys[0]
self._killkeys.append(k)
dict.__setitem__(self, k, v)
class Stack(object):
"""
Implement a stack where elements can be pushed on and you can move
back and forth. But no pop. Should mimic home / back / forward
in a browser
"""
def __init__(self, default=None):
self.clear()
self._default = default
def __call__(self):
"""return the current element, or None"""
if not len(self._elements):
return self._default
else:
return self._elements[self._pos]
def __len__(self):
return self._elements.__len__()
def __getitem__(self, ind):
return self._elements.__getitem__(ind)
def forward(self):
"""move the position forward and return the current element"""
n = len(self._elements)
if self._pos < n - 1:
self._pos += 1
return self()
def back(self):
"""move the position back and return the current element"""
if self._pos > 0:
self._pos -= 1
return self()
def push(self, o):
"""
push object onto stack at current position - all elements
occurring later than the current position are discarded
"""
self._elements = self._elements[:self._pos + 1]
self._elements.append(o)
self._pos = len(self._elements) - 1
return self()
def home(self):
"""push the first element onto the top of the stack"""
if not len(self._elements):
return
self.push(self._elements[0])
return self()
def empty(self):
return len(self._elements) == 0
def clear(self):
"""empty the stack"""
self._pos = -1
self._elements = []
def bubble(self, o):
"""
raise *o* to the top of the stack and return *o*. *o* must be
in the stack
"""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
bubbles = []
for thiso in old:
if thiso == o:
bubbles.append(thiso)
else:
self.push(thiso)
for thiso in bubbles:
self.push(o)
return o
def remove(self, o):
'remove element *o* from the stack'
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
for thiso in old:
if thiso == o:
continue
else:
self.push(thiso)
@deprecated('2.1')
def finddir(o, match, case=False):
"""
return all attributes of *o* which match string in match. if case
is True require an exact case match.
"""
if case:
names = [(name, name) for name in dir(o)
if isinstance(name, six.string_types)]
else:
names = [(name.lower(), name) for name in dir(o)
if isinstance(name, six.string_types)]
match = match.lower()
return [orig for name, orig in names if name.find(match) >= 0]
@deprecated('2.1')
def reverse_dict(d):
"""reverse the dictionary -- may lose data if values are not unique!"""
return {v: k for k, v in six.iteritems(d)}
@deprecated('2.1')
def restrict_dict(d, keys):
"""
Return a dictionary that contains those keys that appear in both
d and keys, with values from d.
"""
return {k: v for k, v in six.iteritems(d) if k in keys}
def report_memory(i=0): # argument may go away
"""return the memory consumed by process"""
from matplotlib.compat.subprocess import Popen, PIPE
pid = os.getpid()
if sys.platform == 'sunos5':
try:
a2 = Popen(str('ps -p %d -o osz') % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Sun OS only if "
"the 'ps' program is found")
mem = int(a2[-1].strip())
elif sys.platform.startswith('linux'):
try:
a2 = Popen(str('ps -p %d -o rss,sz') % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Linux only if "
"the 'ps' program is found")
mem = int(a2[1].split()[1])
elif sys.platform.startswith('darwin'):
try:
a2 = Popen(str('ps -p %d -o rss,vsz') % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Mac OS only if "
"the 'ps' program is found")
mem = int(a2[1].split()[0])
elif sys.platform.startswith('win'):
try:
a2 = Popen([str("tasklist"), "/nh", "/fi", "pid eq %d" % pid],
stdout=PIPE).stdout.read()
except OSError:
raise NotImplementedError(
"report_memory works on Windows only if "
"the 'tasklist' program is found")
mem = int(a2.strip().split()[-2].replace(',', ''))
else:
raise NotImplementedError(
"We don't have a memory monitor for %s" % sys.platform)
return mem
_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
def safezip(*args):
"""make sure *args* are equal len before zipping"""
Nx = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != Nx:
raise ValueError(_safezip_msg % (Nx, i + 1, len(arg)))
return list(zip(*args))
@deprecated('2.1')
def issubclass_safe(x, klass):
"""return issubclass(x, klass) and return False on a TypeError"""
try:
return issubclass(x, klass)
except TypeError:
return False
def safe_masked_invalid(x, copy=False):
x = np.array(x, subok=True, copy=copy)
if not x.dtype.isnative:
# Note that the argument to `byteswap` is 'inplace',
# thus if we have already made a copy, do the byteswap in
# place, else make a copy with the byte order swapped.
# Be explicit that we are swapping the byte order of the dtype
x = x.byteswap(copy).newbyteorder('S')
try:
xm = np.ma.masked_invalid(x, copy=False)
xm.shrink_mask()
except TypeError:
return x
return xm
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
*objects*
A list of objects to find cycles in. It is often useful to
pass in gc.garbage to find the cycles that are preventing some
objects from being garbage collected.
*outstream*
The stream for output.
*show_progress*
If True, print the number of objects reached as they are found.
"""
import gc
from types import FrameType
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % str(type(step)))
if isinstance(step, dict):
for key, val in six.iteritems(step):
if val is next:
outstream.write("[%s]" % repr(key))
break
if key is next:
outstream.write("[key] = %s" % repr(val))
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write("Examining: %r\n" % (obj,))
recurse(obj, obj, {}, [])
class Grouper(object):
"""
This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retreived by
using the object as an iterator.
The objects being joined must be hashable and weak-referenceable.
For example:
>>> from matplotlib.cbook import Grouper
>>> class Foo(object):
... def __init__(self, s):
... self.s = s
... def __repr__(self):
... return self.s
...
>>> a, b, c, d, e, f = [Foo(x) for x in 'abcdef']
>>> grp = Grouper()
>>> grp.join(a, b)
>>> grp.join(b, c)
>>> grp.join(d, e)
>>> sorted(map(tuple, grp))
[(a, b, c), (d, e)]
>>> grp.joined(a, b)
True
>>> grp.joined(a, c)
True
>>> grp.joined(a, d)
False
"""
def __init__(self, init=()):
mapping = self._mapping = {}
for x in init:
mapping[ref(x)] = [ref(x)]
def __contains__(self, item):
return ref(item) in self._mapping
def clean(self):
"""
Clean dead weak references from the dictionary
"""
mapping = self._mapping
to_drop = [key for key in mapping if key() is None]
for key in to_drop:
val = mapping.pop(key)
val.remove(key)
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more
arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(ref(a), [ref(a)])
for arg in args:
set_b = mapping.get(ref(arg))
if set_b is None:
set_a.append(ref(arg))
mapping[ref(arg)] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
self.clean()
def joined(self, a, b):
"""
Returns True if *a* and *b* are members of the same set.
"""
self.clean()
mapping = self._mapping
try:
return mapping[ref(a)] is mapping[ref(b)]
except KeyError:
return False
def remove(self, a):
self.clean()
mapping = self._mapping
seta = mapping.pop(ref(a), None)
if seta is not None:
seta.remove(ref(a))
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
self.clean()
token = object()
# Mark each group as we come across if by appending a token,
# and don't yield it twice
for group in six.itervalues(self._mapping):
if group[-1] is not token:
yield [x() for x in group]
group.append(token)
# Cleanup the tokens
for group in six.itervalues(self._mapping):
if group[-1] is token:
del group[-1]
def get_siblings(self, a):
"""
Returns all of the items joined with *a*, including itself.
"""
self.clean()
siblings = self._mapping.get(ref(a), [ref(a)])
return [x() for x in siblings]
def simple_linear_interpolation(a, steps):
if steps == 1:
return a
steps = int(np.floor(steps))
new_length = ((len(a) - 1) * steps) + 1
new_shape = list(a.shape)
new_shape[0] = new_length
result = np.zeros(new_shape, a.dtype)
result[0] = a[0]
a0 = a[0:-1]
a1 = a[1:]
delta = ((a1 - a0) / steps)
for i in range(1, steps):
result[i::steps] = delta * i + a0
result[steps::steps] = a1
return result
@deprecated('2.1', alternative='shutil.rmtree')
def recursive_remove(path):
if os.path.isdir(path):
for fname in (glob.glob(os.path.join(path, '*')) +
glob.glob(os.path.join(path, '.*'))):
if os.path.isdir(fname):
recursive_remove(fname)
os.removedirs(fname)
else:
os.remove(fname)
# os.removedirs(path)
else:
os.remove(path)
def delete_masked_points(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments with only the unmasked points remaining.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
does not yield a Boolean array.
All input arguments that are not passed unchanged are returned
as ndarrays after removing the points or rows corresponding to
masks in any of the arguments.
A vastly simpler version of this function was originally
written as a helper for Axes.scatter().
"""
if not len(args):
return ()
if (isinstance(args[0], six.string_types) or not iterable(args[0])):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = []
seqlist = [False] * len(args)
for i, x in enumerate(args):
if (not isinstance(x, six.string_types) and iterable(x)
and len(x) == nrecs):
seqlist[i] = True
if isinstance(x, np.ma.MaskedArray):
if x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
else:
x = np.asarray(x)
margs.append(x)
masks = [] # list of masks that are True where good
for i, x in enumerate(margs):
if seqlist[i]:
if x.ndim > 1:
continue # Don't try to get nan locations unless 1-D.
if isinstance(x, np.ma.MaskedArray):
masks.append(~np.ma.getmaskarray(x)) # invert the mask
xd = x.data
else:
xd = x
try:
mask = np.isfinite(xd)
if isinstance(mask, np.ndarray):
masks.append(mask)
except: # Fixme: put in tuple of possible exceptions?
pass
if len(masks):
mask = np.logical_and.reduce(masks)
igood = mask.nonzero()[0]
if len(igood) < nrecs:
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = x.take(igood, axis=0)
for i, x in enumerate(margs):
if seqlist[i] and isinstance(x, np.ma.MaskedArray):
margs[i] = x.filled()
return margs
def boxplot_stats(X, whis=1.5, bootstrap=None, labels=None,
autorange=False):
"""
Returns list of dictionaries of statistics used to draw a series
of box and whisker plots. The `Returns` section enumerates the
required keys of the dictionary. Users can skip this function and
pass a user-defined set of dictionaries to the new `axes.bxp` method
instead of relying on MPL to do the calculations.
Parameters
----------
X : array-like
Data that will be represented in the boxplots. Should have 2 or
fewer dimensions.
whis : float, string, or sequence (default = 1.5)
As a float, determines the reach of the whiskers to the beyond the
first and third quartiles. In other words, where IQR is the
interquartile range (`Q3-Q1`), the upper whisker will extend to last
datum less than `Q3 + whis*IQR`). Similarly, the lower whisker will
extend to the first datum greater than `Q1 - whis*IQR`.
Beyond the whiskers, data are considered outliers
and are plotted as individual points. This can be set this to an
ascending sequence of percentile (e.g., [5, 95]) to set the
whiskers at specific percentiles of the data. Finally, `whis`
can be the string ``'range'`` to force the whiskers to the
minimum and maximum of the data. In the edge case that the 25th
and 75th percentiles are equivalent, `whis` can be automatically
set to ``'range'`` via the `autorange` option.
bootstrap : int, optional
Number of times the confidence intervals around the median
should be bootstrapped (percentile method).
labels : array-like, optional
Labels for each dataset. Length must be compatible with
dimensions of `X`.
autorange : bool, optional (False)
When `True` and the data are distributed such that the 25th and
75th percentiles are equal, ``whis`` is set to ``'range'`` such
that the whisker ends are at the minimum and maximum of the
data.
Returns
-------
bxpstats : list of dict
A list of dictionaries containing the results for each column
of data. Keys of each dictionary are the following:
======== ===================================
Key Value Description
======== ===================================
label tick label for the boxplot
mean arithemetic mean value
med 50th percentile
q1 first quartile (25th percentile)
q3 third quartile (75th percentile)
cilo lower notch around the median
cihi upper notch around the median
whislo end of the lower whisker
whishi end of the upper whisker
fliers outliers
======== ===================================
Notes
-----
Non-bootstrapping approach to confidence interval uses Gaussian-
based asymptotic approximation:
.. math::
\\mathrm{med} \\pm 1.57 \\times \\frac{\\mathrm{iqr}}{\\sqrt{N}}
General approach from:
McGill, R., Tukey, J.W., and Larsen, W.A. (1978) "Variations of
Boxplots", The American Statistician, 32:12-16.
"""
def _bootstrap_median(data, N=5000):
# determine 95% confidence intervals of the median
M = len(data)
percentiles = [2.5, 97.5]
bs_index = np.random.randint(M, size=(N, M))
bsData = data[bs_index]
estimate = np.median(bsData, axis=1, overwrite_input=True)
CI = np.percentile(estimate, percentiles)
return CI
def _compute_conf_interval(data, med, iqr, bootstrap):
if bootstrap is not None:
# Do a bootstrap estimate of notch locations.
# get conf. intervals around median
CI = _bootstrap_median(data, N=bootstrap)
notch_min = CI[0]
notch_max = CI[1]
else:
N = len(data)
notch_min = med - 1.57 * iqr / np.sqrt(N)
notch_max = med + 1.57 * iqr / np.sqrt(N)
return notch_min, notch_max
# output is a list of dicts
bxpstats = []
# convert X to a list of lists
X = _reshape_2D(X, "X")
ncols = len(X)
if labels is None:
labels = repeat(None)
elif len(labels) != ncols:
raise ValueError("Dimensions of labels and X must be compatible")
input_whis = whis
for ii, (x, label) in enumerate(zip(X, labels), start=0):
# empty dict
stats = {}
if label is not None:
stats['label'] = label
# restore whis to the input values in case it got changed in the loop
whis = input_whis
# note tricksyness, append up here and then mutate below
bxpstats.append(stats)
# if empty, bail
if len(x) == 0:
stats['fliers'] = np.array([])
stats['mean'] = np.nan
stats['med'] = np.nan
stats['q1'] = np.nan
stats['q3'] = np.nan
stats['cilo'] = np.nan
stats['cihi'] = np.nan
stats['whislo'] = np.nan
stats['whishi'] = np.nan
stats['med'] = np.nan
continue
# up-convert to an array, just to be safe
x = np.asarray(x)
# arithmetic mean
stats['mean'] = np.mean(x)
# medians and quartiles
q1, med, q3 = np.percentile(x, [25, 50, 75])
# interquartile range
stats['iqr'] = q3 - q1
if stats['iqr'] == 0 and autorange:
whis = 'range'
# conf. interval around median
stats['cilo'], stats['cihi'] = _compute_conf_interval(
x, med, stats['iqr'], bootstrap
)
# lowest/highest non-outliers
if np.isscalar(whis):
if np.isreal(whis):
loval = q1 - whis * stats['iqr']
hival = q3 + whis * stats['iqr']
elif whis in ['range', 'limit', 'limits', 'min/max']:
loval = np.min(x)
hival = np.max(x)
else:
whismsg = ('whis must be a float, valid string, or '
'list of percentiles')
raise ValueError(whismsg)
else:
loval = np.percentile(x, whis[0])
hival = np.percentile(x, whis[1])
# get high extreme
wiskhi = np.compress(x <= hival, x)
if len(wiskhi) == 0 or np.max(wiskhi) < q3:
stats['whishi'] = q3
else:
stats['whishi'] = np.max(wiskhi)
# get low extreme
wisklo = np.compress(x >= loval, x)
if len(wisklo) == 0 or np.min(wisklo) > q1:
stats['whislo'] = q1
else:
stats['whislo'] = np.min(wisklo)
# compute a single array of outliers
stats['fliers'] = np.hstack([
np.compress(x < stats['whislo'], x),
np.compress(x > stats['whishi'], x)
])
# add in the remaining stats
stats['q1'], stats['med'], stats['q3'] = q1, med, q3
return bxpstats
# FIXME I don't think this is used anywhere
@deprecated('2.1')
def unmasked_index_ranges(mask, compressed=True):
"""
Find index ranges where *mask* is *False*.
*mask* will be flattened if it is not already 1-D.
Returns Nx2 :class:`numpy.ndarray` with each row the start and stop
indices for slices of the compressed :class:`numpy.ndarray`
corresponding to each of *N* uninterrupted runs of unmasked
values. If optional argument *compressed* is *False*, it returns
the start and stop indices into the original :class:`numpy.ndarray`,
not the compressed :class:`numpy.ndarray`. Returns *None* if there
are no unmasked values.
Example::
y = ma.array(np.arange(5), mask = [0,0,1,0,0])
ii = unmasked_index_ranges(ma.getmaskarray(y))
# returns array [[0,2,] [2,4,]]
y.compressed()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)
# returns array [[0, 2], [3, 5]]
y.filled()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
Prior to the transforms refactoring, this was used to support
masked arrays in Line2D.
"""
mask = mask.reshape(mask.size)
m = np.concatenate(((1,), mask, (1,)))
indices = np.arange(len(mask) + 1)
mdif = m[1:] - m[:-1]
i0 = np.compress(mdif == -1, indices)
i1 = np.compress(mdif == 1, indices)
assert len(i0) == len(i1)
if len(i1) == 0:
return None # Maybe this should be np.zeros((0,2), dtype=int)
if not compressed:
return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)
seglengths = i1 - i0
breakpoints = np.cumsum(seglengths)
ic0 = np.concatenate(((0,), breakpoints[:-1]))
ic1 = breakpoints
return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)
# The ls_mapper maps short codes for line style to their full name used by
# backends; the reverse mapper is for mapping full names to short ones.
ls_mapper = {'-': 'solid', '--': 'dashed', '-.': 'dashdot', ':': 'dotted'}
ls_mapper_r = {v: k for k, v in six.iteritems(ls_mapper)}
def align_iterators(func, *iterables):
"""
This generator takes a bunch of iterables that are ordered by func
It sends out ordered tuples::
(func(row), [rows from all iterators matching func(row)])
It is used by :func:`matplotlib.mlab.recs_join` to join record arrays
"""
class myiter:
def __init__(self, it):
self.it = it
self.key = self.value = None
self.iternext()
def iternext(self):
try:
self.value = next(self.it)
self.key = func(self.value)
except StopIteration:
self.value = self.key = None
def __call__(self, key):
retval = None
if key == self.key:
retval = self.value
self.iternext()
elif self.key and key > self.key:
raise ValueError("Iterator has been left behind")
return retval
# This can be made more efficient by not computing the minimum key for each
# iteration
iters = [myiter(it) for it in iterables]
minvals = minkey = True
while True:
minvals = ([_f for _f in [it.key for it in iters] if _f])
if minvals:
minkey = min(minvals)
yield (minkey, [it(minkey) for it in iters])
else:
break
def is_math_text(s):
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
try:
s = six.text_type(s)
except UnicodeDecodeError:
raise ValueError(
"matplotlib display text must have all code points < 128 or use "
"Unicode strings")
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
return even_dollars
def _to_unmasked_float_array(x):
"""
Convert a sequence to a float array; if input was a masked array, masked
values are converted to nans.
"""
if hasattr(x, 'mask'):
return np.ma.asarray(x, float).filled(np.nan)
else:
return np.asarray(x, float)
def _check_1d(x):
'''
Converts a sequence of less than 1 dimension, to an array of 1
dimension; leaves everything else untouched.
'''
if not hasattr(x, 'shape') or len(x.shape) < 1:
return np.atleast_1d(x)
else:
try:
x[:, None]
return x
except (IndexError, TypeError):
return np.atleast_1d(x)
def _reshape_2D(X, name):
"""
Use Fortran ordering to convert ndarrays and lists of iterables to lists of
1D arrays.
Lists of iterables are converted by applying `np.asarray` to each of their
elements. 1D ndarrays are returned in a singleton list containing them.
2D ndarrays are converted to the list of their *columns*.
*name* is used to generate the error message for invalid inputs.
"""
# Iterate over columns for ndarrays, over rows otherwise.
X = np.atleast_1d(X.T if isinstance(X, np.ndarray) else np.asarray(X))
if X.ndim == 1 and X.dtype.type != np.object_:
# 1D array of scalars: directly return it.
return [X]
elif X.ndim in [1, 2]:
# 2D array, or 1D array of iterables: flatten them first.
return [np.reshape(x, -1) for x in X]
else:
raise ValueError("{} must have 2 or fewer dimensions".format(name))
def violin_stats(X, method, points=100):
"""
Returns a list of dictionaries of data which can be used to draw a series
of violin plots. See the `Returns` section below to view the required keys
of the dictionary. Users can skip this function and pass a user-defined set
of dictionaries to the `axes.vplot` method instead of using MPL to do the
calculations.
Parameters
----------
X : array-like
Sample data that will be used to produce the gaussian kernel density
estimates. Must have 2 or fewer dimensions.
method : callable
The method used to calculate the kernel density estimate for each
column of data. When called via `method(v, coords)`, it should
return a vector of the values of the KDE evaluated at the values
specified in coords.
points : scalar, default = 100
Defines the number of points to evaluate each of the gaussian kernel
density estimates at.
Returns
-------
A list of dictionaries containing the results for each column of data.
The dictionaries contain at least the following:
- coords: A list of scalars containing the coordinates this particular
kernel density estimate was evaluated at.
- vals: A list of scalars containing the values of the kernel density
estimate at each of the coordinates given in `coords`.
- mean: The mean value for this column of data.
- median: The median value for this column of data.
- min: The minimum value for this column of data.
- max: The maximum value for this column of data.
"""
# List of dictionaries describing each of the violins.
vpstats = []
# Want X to be a list of data sequences
X = _reshape_2D(X, "X")
for x in X:
# Dictionary of results for this distribution
stats = {}
# Calculate basic stats for the distribution
min_val = np.min(x)
max_val = np.max(x)
# Evaluate the kernel density estimate
coords = np.linspace(min_val, max_val, points)
stats['vals'] = method(x, coords)
stats['coords'] = coords
# Store additional statistics for this distribution
stats['mean'] = np.mean(x)
stats['median'] = np.median(x)
stats['min'] = min_val
stats['max'] = max_val
# Append to output
vpstats.append(stats)
return vpstats
class _NestedClassGetter(object):
# recipe from http://stackoverflow.com/a/11493777/741316
"""
When called with the containing class as the first argument,
and the name of the nested class as the second argument,
returns an instance of the nested class.
"""
def __call__(self, containing_class, class_name):
nested_class = getattr(containing_class, class_name)
# make an instance of a simple object (this one will do), for which we
# can change the __class__ later on.
nested_instance = _NestedClassGetter()
# set the class of the instance, the __init__ will never be called on
# the class but the original state will be set later on by pickle.
nested_instance.__class__ = nested_class
return nested_instance
class _InstanceMethodPickler(object):
"""
Pickle cannot handle instancemethod saving. _InstanceMethodPickler
provides a solution to this.
"""
def __init__(self, instancemethod):
"""Takes an instancemethod as its only argument."""
if six.PY3:
self.parent_obj = instancemethod.__self__
self.instancemethod_name = instancemethod.__func__.__name__
else:
self.parent_obj = instancemethod.im_self
self.instancemethod_name = instancemethod.im_func.__name__
def get_instancemethod(self):
return getattr(self.parent_obj, self.instancemethod_name)
def pts_to_prestep(x, *args):
"""
Convert continuous line to pre-steps.
Given a set of ``N`` points, convert to ``2N - 1`` points, which when
connected linearly give a step function which changes values at the
beginning of the intervals.
Parameters
----------
x : array
The x location of the steps.
y1, ..., yp : array
y arrays to be turned into steps; all must be the same length as ``x``.
Returns
-------
out : array
The x and y values converted to steps in the same order as the input;
can be unpacked as ``x_out, y1_out, ..., yp_out``. If the input is
length ``N``, each of these arrays will be length ``2N + 1``.
Examples
--------
>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
"""
steps = np.zeros((1 + len(args), 2 * len(x) - 1))
# In all `pts_to_*step` functions, only assign *once* using `x` and `args`,
# as converting to an array may be expensive.
steps[0, 0::2] = x
steps[0, 1::2] = steps[0, 0:-2:2]
steps[1:, 0::2] = args
steps[1:, 1::2] = steps[1:, 2::2]
return steps
def pts_to_poststep(x, *args):
"""
Convert continuous line to post-steps.
Given a set of ``N`` points convert to ``2N + 1`` points, which when
connected linearly give a step function which changes values at the end of
the intervals.
Parameters
----------
x : array
The x location of the steps.
y1, ..., yp : array
y arrays to be turned into steps; all must be the same length as ``x``.
Returns
-------
out : array
The x and y values converted to steps in the same order as the input;
can be unpacked as ``x_out, y1_out, ..., yp_out``. If the input is
length ``N``, each of these arrays will be length ``2N + 1``.
Examples
--------
>> x_s, y1_s, y2_s = pts_to_poststep(x, y1, y2)
"""
steps = np.zeros((1 + len(args), 2 * len(x) - 1))
steps[0, 0::2] = x
steps[0, 1::2] = steps[0, 2::2]
steps[1:, 0::2] = args
steps[1:, 1::2] = steps[1:, 0:-2:2]
return steps
def pts_to_midstep(x, *args):
"""
Convert continuous line to mid-steps.
Given a set of ``N`` points convert to ``2N`` points which when connected
linearly give a step function which changes values at the middle of the
intervals.
Parameters
----------
x : array
The x location of the steps.
y1, ..., yp : array
y arrays to be turned into steps; all must be the same length as ``x``.
Returns
-------
out : array
The x and y values converted to steps in the same order as the input;
can be unpacked as ``x_out, y1_out, ..., yp_out``. If the input is
length ``N``, each of these arrays will be length ``2N``.
Examples
--------
>> x_s, y1_s, y2_s = pts_to_midstep(x, y1, y2)
"""
steps = np.zeros((1 + len(args), 2 * len(x)))
x = np.asanyarray(x)
steps[0, 1:-1:2] = steps[0, 2::2] = (x[:-1] + x[1:]) / 2
steps[0, 0], steps[0, -1] = x[0], x[-1]
steps[1:, 0::2] = args
steps[1:, 1::2] = steps[1:, 0::2]
return steps
STEP_LOOKUP_MAP = {'default': lambda x, y: (x, y),
'steps': pts_to_prestep,
'steps-pre': pts_to_prestep,
'steps-post': pts_to_poststep,
'steps-mid': pts_to_midstep}
def index_of(y):
"""
A helper function to get the index of an input to plot
against if x values are not explicitly given.
Tries to get `y.index` (works if this is a pd.Series), if that
fails, return np.arange(y.shape[0]).
This will be extended in the future to deal with more types of
labeled data.
Parameters
----------
y : scalar or array-like
The proposed y-value
Returns
-------
x, y : ndarray
The x and y values to plot.
"""
try:
return y.index.values, y.values
except AttributeError:
y = _check_1d(y)
return np.arange(y.shape[0], dtype=float), y
def safe_first_element(obj):
if isinstance(obj, collections.Iterator):
# needed to accept `array.flat` as input.
# np.flatiter reports as an instance of collections.Iterator
# but can still be indexed via [].
# This has the side effect of re-setting the iterator, but
# that is acceptable.
try:
return obj[0]
except TypeError:
pass
raise RuntimeError("matplotlib does not support generators "
"as input")
return next(iter(obj))
def sanitize_sequence(data):
"""Converts dictview object to list"""
return list(data) if isinstance(data, collections.MappingView) else data
def normalize_kwargs(kw, alias_mapping=None, required=(), forbidden=(),
allowed=None):
"""Helper function to normalize kwarg inputs
The order they are resolved are:
1. aliasing
2. required
3. forbidden
4. allowed
This order means that only the canonical names need appear in
`allowed`, `forbidden`, `required`
Parameters
----------
alias_mapping, dict, optional
A mapping between a canonical name to a list of
aliases, in order of precedence from lowest to highest.
If the canonical value is not in the list it is assumed to have
the highest priority.
required : iterable, optional
A tuple of fields that must be in kwargs.
forbidden : iterable, optional
A list of keys which may not be in kwargs
allowed : tuple, optional
A tuple of allowed fields. If this not None, then raise if
`kw` contains any keys not in the union of `required`
and `allowed`. To allow only the required fields pass in
``()`` for `allowed`
Raises
------
TypeError
To match what python raises if invalid args/kwargs are passed to
a callable.
"""
# deal with default value of alias_mapping
if alias_mapping is None:
alias_mapping = dict()
# make a local so we can pop
kw = dict(kw)
# output dictionary
ret = dict()
# hit all alias mappings
for canonical, alias_list in six.iteritems(alias_mapping):
# the alias lists are ordered from lowest to highest priority
# so we know to use the last value in this list
tmp = []
seen = []
for a in alias_list:
try:
tmp.append(kw.pop(a))
seen.append(a)
except KeyError:
pass
# if canonical is not in the alias_list assume highest priority
if canonical not in alias_list:
try:
tmp.append(kw.pop(canonical))
seen.append(canonical)
except KeyError:
pass
# if we found anything in this set of aliases put it in the return
# dict
if tmp:
ret[canonical] = tmp[-1]
if len(tmp) > 1:
warnings.warn("Saw kwargs {seen!r} which are all aliases for "
"{canon!r}. Kept value from {used!r}".format(
seen=seen, canon=canonical, used=seen[-1]))
# at this point we know that all keys which are aliased are removed, update
# the return dictionary from the cleaned local copy of the input
ret.update(kw)
fail_keys = [k for k in required if k not in ret]
if fail_keys:
raise TypeError("The required keys {keys!r} "
"are not in kwargs".format(keys=fail_keys))
fail_keys = [k for k in forbidden if k in ret]
if fail_keys:
raise TypeError("The forbidden keys {keys!r} "
"are in kwargs".format(keys=fail_keys))
if allowed is not None:
allowed_set = set(required) | set(allowed)
fail_keys = [k for k in ret if k not in allowed_set]
if fail_keys:
raise TypeError("kwargs contains {keys!r} which are not in "
"the required {req!r} or "
"allowed {allow!r} keys".format(
keys=fail_keys, req=required,
allow=allowed))
return ret
def get_label(y, default_name):
try:
return y.name
except AttributeError:
return default_name
_lockstr = """\
LOCKERROR: matplotlib is trying to acquire the lock
{!r}
and has failed. This maybe due to any other process holding this
lock. If you are sure no other matplotlib process is running try
removing these folders and trying again.
"""
class Locked(object):
"""
Context manager to handle locks.
Based on code from conda.
(c) 2012-2013 Continuum Analytics, Inc. / https://www.continuum.io/
All Rights Reserved
conda is distributed under the terms of the BSD 3-clause license.
Consult LICENSE_CONDA or https://opensource.org/licenses/BSD-3-Clause.
"""
LOCKFN = '.matplotlib_lock'
class TimeoutError(RuntimeError):
pass
def __init__(self, path):
self.path = path
self.end = "-" + str(os.getpid())
self.lock_path = os.path.join(self.path, self.LOCKFN + self.end)
self.pattern = os.path.join(self.path, self.LOCKFN + '-*')
self.remove = True
def __enter__(self):
retries = 50
sleeptime = 0.1
while retries:
files = glob.glob(self.pattern)
if files and not files[0].endswith(self.end):
time.sleep(sleeptime)
retries -= 1
else:
break
else:
err_str = _lockstr.format(self.pattern)
raise self.TimeoutError(err_str)
if not files:
try:
os.makedirs(self.lock_path)
except OSError:
pass
else: # PID lock already here --- someone else will remove it.
self.remove = False
def __exit__(self, exc_type, exc_value, traceback):
if self.remove:
for path in self.lock_path, self.path:
try:
os.rmdir(path)
except OSError:
pass
class _FuncInfo(object):
"""
Class used to store a function.
"""
def __init__(self, function, inverse, bounded_0_1=True, check_params=None):
"""
Parameters
----------
function : callable
A callable implementing the function receiving the variable as
first argument and any additional parameters in a list as second
argument.
inverse : callable
A callable implementing the inverse function receiving the variable
as first argument and any additional parameters in a list as
second argument. It must satisfy 'inverse(function(x, p), p) == x'.
bounded_0_1: bool or callable
A boolean indicating whether the function is bounded in the [0,1]
interval, or a callable taking a list of values for the additional
parameters, and returning a boolean indicating whether the function
is bounded in the [0,1] interval for that combination of
parameters. Default True.
check_params: callable or None
A callable taking a list of values for the additional parameters
and returning a boolean indicating whether that combination of
parameters is valid. It is only required if the function has
additional parameters and some of them are restricted.
Default None.
"""
self.function = function
self.inverse = inverse
if callable(bounded_0_1):
self._bounded_0_1 = bounded_0_1
else:
self._bounded_0_1 = lambda x: bounded_0_1
if check_params is None:
self._check_params = lambda x: True
elif callable(check_params):
self._check_params = check_params
else:
raise ValueError("Invalid 'check_params' argument.")
def is_bounded_0_1(self, params=None):
"""
Returns a boolean indicating if the function is bounded in the [0,1]
interval for a particular set of additional parameters.
Parameters
----------
params : list
The list of additional parameters. Default None.
Returns
-------
out : bool
True if the function is bounded in the [0,1] interval for
parameters 'params'. Otherwise False.
"""
return self._bounded_0_1(params)
def check_params(self, params=None):
"""
Returns a boolean indicating if the set of additional parameters is
valid.
Parameters
----------
params : list
The list of additional parameters. Default None.
Returns
-------
out : bool
True if 'params' is a valid set of additional parameters for the
function. Otherwise False.
"""
return self._check_params(params)
class _StringFuncParser(object):
"""
A class used to convert predefined strings into
_FuncInfo objects, or to directly obtain _FuncInfo
properties.
"""
_funcs = {}
_funcs['linear'] = _FuncInfo(lambda x: x,
lambda x: x,
True)
_funcs['quadratic'] = _FuncInfo(np.square,
np.sqrt,
True)
_funcs['cubic'] = _FuncInfo(lambda x: x**3,
lambda x: x**(1. / 3),
True)
_funcs['sqrt'] = _FuncInfo(np.sqrt,
np.square,
True)
_funcs['cbrt'] = _FuncInfo(lambda x: x**(1. / 3),
lambda x: x**3,
True)
_funcs['log10'] = _FuncInfo(np.log10,
lambda x: (10**(x)),
False)
_funcs['log'] = _FuncInfo(np.log,
np.exp,
False)
_funcs['log2'] = _FuncInfo(np.log2,
lambda x: (2**x),
False)
_funcs['x**{p}'] = _FuncInfo(lambda x, p: x**p[0],
lambda x, p: x**(1. / p[0]),
True)
_funcs['root{p}(x)'] = _FuncInfo(lambda x, p: x**(1. / p[0]),
lambda x, p: x**p,
True)
_funcs['log{p}(x)'] = _FuncInfo(lambda x, p: (np.log(x) /
np.log(p[0])),
lambda x, p: p[0]**(x),
False,
lambda p: p[0] > 0)
_funcs['log10(x+{p})'] = _FuncInfo(lambda x, p: np.log10(x + p[0]),
lambda x, p: 10**x - p[0],
lambda p: p[0] > 0)
_funcs['log(x+{p})'] = _FuncInfo(lambda x, p: np.log(x + p[0]),
lambda x, p: np.exp(x) - p[0],
lambda p: p[0] > 0)
_funcs['log{p}(x+{p})'] = _FuncInfo(lambda x, p: (np.log(x + p[1]) /
np.log(p[0])),
lambda x, p: p[0]**(x) - p[1],
lambda p: p[1] > 0,
lambda p: p[0] > 0)
def __init__(self, str_func):
"""
Parameters
----------
str_func : string
String to be parsed.
"""
if not isinstance(str_func, six.string_types):
raise ValueError("'%s' must be a string." % str_func)
self._str_func = six.text_type(str_func)
self._key, self._params = self._get_key_params()
self._func = self._parse_func()
def _parse_func(self):
"""
Parses the parameters to build a new _FuncInfo object,
replacing the relevant parameters if necessary in the lambda
functions.
"""
func = self._funcs[self._key]
if not self._params:
func = _FuncInfo(func.function, func.inverse,
func.is_bounded_0_1())
else:
m = func.function
function = (lambda x, m=m: m(x, self._params))
m = func.inverse
inverse = (lambda x, m=m: m(x, self._params))
is_bounded_0_1 = func.is_bounded_0_1(self._params)
func = _FuncInfo(function, inverse,
is_bounded_0_1)
return func
@property
def func_info(self):
"""
Returns the _FuncInfo object.
"""
return self._func
@property
def function(self):
"""
Returns the callable for the direct function.
"""
return self._func.function
@property
def inverse(self):
"""
Returns the callable for the inverse function.
"""
return self._func.inverse
@property
def is_bounded_0_1(self):
"""
Returns a boolean indicating if the function is bounded
in the [0-1 interval].
"""
return self._func.is_bounded_0_1()
def _get_key_params(self):
str_func = self._str_func
# Checking if it comes with parameters
regex = r'\{(.*?)\}'
params = re.findall(regex, str_func)
for i, param in enumerate(params):
try:
params[i] = float(param)
except ValueError:
raise ValueError("Parameter %i is '%s', which is "
"not a number." %
(i, param))
str_func = re.sub(regex, '{p}', str_func)
try:
func = self._funcs[str_func]
except (ValueError, KeyError):
raise ValueError("'%s' is an invalid string. The only strings "
"recognized as functions are %s." %
(str_func, list(self._funcs)))
# Checking that the parameters are valid
if not func.check_params(params):
raise ValueError("%s are invalid values for the parameters "
"in %s." %
(params, str_func))
return str_func, params
|
jonyroda97/redbot-amigosprovaveis
|
lib/matplotlib/cbook/__init__.py
|
Python
|
gpl-3.0
| 84,575
|
[
"Gaussian"
] |
033ad94489de307c8d11937d0ab4a40f425f909fce63733b0def065c4c2d2f79
|
#!/usr/bin/env python2
#
# Copyright 2012 Unknown <diogo@arch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import json
import warnings
# Suppress import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import os
import sys
import time
import codecs
import subprocess
import shutil
import traceback
import argparse
from os.path import abspath, join, basename
try:
from process.base import print_col, GREEN, RED, YELLOW
from ortho import OrthomclToolbox as OT
import ortho.orthomclInstallSchema as install_sqlite
import ortho.orthomclPairs as make_pairs_sqlite
import ortho.orthomclDumpPairsFiles as dump_pairs_sqlite
import ortho.orthomclFilterFasta as FilterFasta
import ortho.orthomclBlastParser as BlastParser
import ortho.orthomclMclToGroups as MclGroups
from ortho.error_handling import *
from process.error_handling import KillByUser
from __init__ import __version__
except ImportError:
from trifusion.process.base import print_col, GREEN, RED, YELLOW
from trifusion.ortho import OrthomclToolbox as OT
import trifusion.ortho.orthomclInstallSchema as install_sqlite
import trifusion.ortho.orthomclPairs as make_pairs_sqlite
import trifusion.ortho.orthomclDumpPairsFiles as dump_pairs_sqlite
import trifusion.ortho.orthomclFilterFasta as FilterFasta
import trifusion.ortho.orthomclBlastParser as BlastParser
import trifusion.ortho.orthomclMclToGroups as MclGroups
from trifusion.ortho.error_handling import *
from trifusion.process.error_handling import KillByUser
from trifusion import __version__
def install_schema(db_dir):
"""
Install the schema for the mySQL database
:param db_dir: string, directory for the sqlite database
"""
print_col("Creating sqlite database", GREEN, 1)
install_sqlite.execute(db_dir)
def check_unique_field(proteome_file, verbose=False, nm=None):
"""
Checks the original proteome file for a field in the fasta header
that is unique to all sequences
"""
# Some files may have utf8 encoding problems so I used codecs here
file_handle = codecs.open(proteome_file, "r", "cp1252")
header_list = []
header = ""
for line in file_handle:
if nm:
if nm.stop:
raise KillByUser("")
if line.startswith(">"):
header = line[1:].strip()
# Store header in list format
header_list.append(header.split("|"))
# Get the size of the header fields
header_field_size = len(header.split("|"))
for i in range(header_field_size):
if nm:
if nm.stop:
raise KillByUser("")
temp_list = []
for header in header_list:
temp_list.append(header[i])
if len(temp_list) == len(set(temp_list)) and len(set(temp_list)) ==\
len(header_list):
# The orthoMCL program uses an index starting from 1, so the +1 is
# a necessary adjustment
if verbose:
print_col("\t Using unique header field {}".format(i), GREEN, 1)
return i
# Ideally, a unique field should be found before this code. If not, raise
# exception
raise NoUniqueField("The proteome file {} has no unique field".format(
os.path.basename(proteome_file)))
def prep_fasta(proteome_file, code, unique_id, dest, verbose=False, nm=None):
if verbose:
print_col("\t Preparing file for USEARCH", GREEN, 1)
# Storing header list to check for duplicates
header_list = []
# Get json with header mappings, if exists
json_f = join(dest, "backstage_files", "header_mapping.json")
if os.path.exists(json_f):
with open(json_f) as fh:
header_mapping = json.load(fh)
else:
header_mapping = {}
# Will prevent writing
lock = True
# File handles
file_in = open(proteome_file)
pfile = basename(proteome_file.split(".")[0] + "_mod.fas")
file_out_path = join(dest, "backstage_files", pfile)
file_out = open(file_out_path, "w")
for line in file_in:
if nm:
if nm.stop:
raise KillByUser("")
if line.startswith(">"):
if line not in header_list:
fields = line.split("|")
unique_str = fields[unique_id].replace(" ", "_")
header_mapping["%s|%s" % (code, unique_str)] = line.strip()
header_list.append(line)
file_out.write(">%s|%s\n" % (code, unique_str))
lock = True
else:
lock = False
elif lock:
file_out.write(line)
# Close file handles:
file_in.close()
file_out.close()
with open(json_f, "w") as fh:
json.dump(header_mapping, fh)
def adjust_fasta(file_list, dest, nm=None):
print_col("Adjusting proteome files", GREEN, 1)
# Create compliant fasta directory
cf_dir = join(dest, "backstage_files", "compliantFasta")
if not os.path.exists(cf_dir):
os.makedirs(cf_dir)
else:
for f in os.listdir(cf_dir):
os.remove(join(cf_dir, f))
# Setup progress information
if nm:
if nm.stop:
KillByUser("")
# Get total number of files for total progress
nm.total = len(file_list)
nm.counter = 0
for proteome in file_list:
# Get code for proteome
code_name = proteome.split(os.path.sep)[-1].split(".")[0]
code_name = "_".join(code_name.split())
if nm:
if nm.stop:
raise KillByUser("")
nm.counter += 1
nm.msg = "Adjusting file {}".format(basename(proteome))
# Check the unique ID field
try:
unique_id = check_unique_field(proteome, True, nm)
except Exception as e:
print_col("The file {} could not be parsed".format(proteome),
YELLOW, 1)
#TODO: Log errors on file
continue
# Adjust fasta
# stg = prep_fasta(proteome, code_name, unique_id)
prep_fasta(proteome, code_name, unique_id, dest, nm)
protome_file_name = proteome.split(os.path.sep)[-1].split(".")[0] + \
".fasta"
protome_file_name = "_".join(protome_file_name.split())
pfile = basename(proteome.split(".")[0] + "_mod.fas")
shutil.move(join(dest, "backstage_files", pfile),
join(cf_dir, protome_file_name))
json_f = join(dest, "backstage_files", "header_mapping.json")
header_f = join(dest, "backstage_files", "header_mapping.csv")
if os.path.exists(json_f):
with open(json_f) as fh, open(header_f, "w") as ofh:
header_map = json.load(fh)
for k, v in header_map.items():
ofh.write("{}; {}\n".format(k, v))
def filter_fasta(min_len, max_stop, db, dest, nm=None):
print_col("Filtering proteome files", GREEN, 1)
cp_dir = join(dest, "backstage_files", "compliantFasta")
FilterFasta.orthomcl_filter_fasta(cp_dir, min_len, max_stop, db, dest, nm)
def allvsall_usearch(goodproteins, evalue, dest, cpus, usearch_outfile,
usearch_bin="usearch", nm=None):
print_col("Perfoming USEARCH All-vs-All (may take a while...)", GREEN, 1)
# FNULL = open(os.devnull, "w")
usearch_cmd = [usearch_bin,
"-ublast",
join(dest, "backstage_files", goodproteins),
"-db",
join(dest, "backstage_files", goodproteins),
"-blast6out",
join(dest, "backstage_files", usearch_outfile),
"-evalue", str(evalue),
"--maxaccepts",
"0",
"-threads",
str(cpus)]
if nm:
# The subprocess.Popen handler cannot be passed directly in Windows
# due to pickling issues. So I pass the pid of the process instead.
subp = subprocess.Popen(usearch_cmd)
nm.subp = subp.pid
subp.wait()
nm.subp = None
else:
_ = subprocess.Popen(usearch_cmd).wait()
def blast_parser(usearch_ouput, dest, db_dir, nm):
print_col("Parsing BLAST output", GREEN, 1)
BlastParser.orthomcl_blast_parser(
join(dest, "backstage_files", usearch_ouput),
join(dest, "backstage_files", "compliantFasta"),
db_dir,
nm)
def pairs(db_dir, nm=None):
print_col("Finding pairs for orthoMCL", GREEN, 1)
make_pairs_sqlite.execute(db_dir, nm=nm)
def dump_pairs(db_dir, dest, nm=None):
print_col("Dump files from the database produced by the orthomclPairs "
"program", GREEN, 1)
dump_pairs_sqlite.execute(db_dir, dest, nm=nm)
def mcl(inflation_list, dest, mcl_file="mcl", nm=None):
print_col("Running mcl algorithm", GREEN, 1)
mcl_input = join(dest, "backstage_files", "mclInput")
mcl_output = join(dest, "backstage_files", "mclOutput_")
for val in inflation_list:
mcl_cmd = [mcl_file,
mcl_input,
"--abc",
"-I",
val,
"-o",
mcl_output + val.replace(".", "")]
if nm:
# The subprocess.Popen handler cannot be passed directly in Windows
# due to pickling issues. So I pass the pid of the process instead.
subp = subprocess.Popen(mcl_cmd)
nm.subp = subp.pid
subp.wait()
nm.subp = None
else:
_ = subprocess.Popen(mcl_cmd).wait()
def mcl_groups(inflation_list, mcl_prefix, start_id, group_file, dest,
nm=None):
print_col("Dumping groups", GREEN, 1)
# Create a results directory
results_dir = join(dest, "Orthology_results")
if not os.path.exists(results_dir):
os.makedirs(results_dir)
mcl_output = join(dest, "backstage_files", "mclOutput_")
if nm:
if nm.stop:
raise KillByUser("")
nm.total = len(inflation_list)
nm.counter = 0
for val in inflation_list:
if nm:
if nm.stop:
raise KillByUser("")
nm.counter += 1
MclGroups.mcl_to_groups(
mcl_prefix,
start_id,
mcl_output + val.replace(".", ""),
os.path.join(results_dir, group_file + "_" + str(val) + ".txt"),
nm=nm)
def export_filtered_groups(inflation_list, group_prefix, gene_t, sp_t, sqldb,
db, tmp_dir, dest, nm=None):
print_col("Exporting filtered groups to protein sequence files", GREEN, 1)
stats_storage = {}
groups_obj = OT.MultiGroupsLight(tmp_dir)
if nm:
if nm.stop:
raise KillByUser("")
for val in inflation_list:
# Create a directory that will store the results for the current
# inflation value
inflation_dir = join(dest, "Orthology_results", "Inflation%s" % val)
if not os.path.exists(inflation_dir):
os.makedirs(inflation_dir)
group_file = join(dest, "Orthology_results",
group_prefix + "_%s.txt" % val)
# Create Group object
group_obj = OT.GroupLight(group_file, gene_t, sp_t)
# Add group to the MultiGroups object
groups_obj.add_group(group_obj)
# Export filtered groups and return stats to present in the app
stats = group_obj.basic_group_statistics()
# Retrieve fasta sequences from the filtered groups
group_obj.retrieve_sequences(sqldb, db,
dest=join(inflation_dir, "Orthologs"),
shared_namespace=nm)
# os.remove(sqldb)
stats_storage[val] = stats
return stats_storage, groups_obj
def check_bin_path(bin_path, program):
prog = {"usearch": "usearch",
"mcl": b"mcl"}
try:
res, _ = subprocess.Popen([bin_path, "--version"],
stdout=subprocess.PIPE).communicate()
if not res.startswith(prog[program]):
print_col("The {} executable file could not be found".format(
program), RED, 1)
except OSError:
print_col("The {} executable file could not be found".format(
program), RED, 1)
def check_dirs(dir_path):
if not os.path.exists(dir_path):
print_col("The following path does not exist: {}".format(dir_path),
RED, 1)
def main():
# The inclusion of the argument definition in main, makes it possible to
# import this file as a module and not triggering argparse. The
# alternative of using a if __name__ == "__main__" statement does not
# work well with the entry_points parameter of setup.py, since they call
# the main function but do nothing inside said statement.
parser = argparse.ArgumentParser(description="Command line interface for "
"TriFusion Orthology search module")
parser.add_argument("-in", dest="infile", type=str,
help="Provide the path "
"to the directory containing the proteome files")
# Execution modes
exec_modes = parser.add_argument_group("Execution modes")
exec_modes.add_argument("-n", action="store_const", const=True,
dest="normal",
help="Complete run of the pipeline")
exec_modes.add_argument("-a", action="store_const", const=True,
dest="adjust",
help="Only adjust proteome fasta files")
exec_modes.add_argument("-na", action="store_const", const=True,
dest="no_adjust",
help="Complete run of the pipeline without "
"adjusting fasta files")
# Input formatting
input_format = parser.add_argument_group("Input formatting")
input_format.add_argument("-d", action="store_const", const=True,
dest="code", help="Do not convert input proteome"
" file names because the file names are already "
"in code (e.g. Homo_sapiens.fas -> HoSap.fas")
input_format.add_argument("-sep", dest="separator", help="Specify the "
"separator in the input files (e.g. '_' is the"
" separator in 'Homo_sapiens.fas'). This "
"parameter is ignored if the '-d' option is set")
# Search options
search_opts = parser.add_argument_group("Ortholog search options")
search_opts.add_argument("--usearch", dest="usearch_bin",
default="usearch",
help="Provide the path to the USEARCH executable."
" If the executable is already in your "
"PATH environment variable, specify only"
" the name of the executable (default is "
"'%(default)s')")
search_opts.add_argument("--mcl", dest="mcl_bin", default="mcl",
help="Provide the path to the MCL executable."
" If the executable is already in your "
"PATH environment variable, specify only"
" the name of the executable (default is "
"'%(default)s')")
search_opts.add_argument("--min-length", dest="min_length", type=int,
default=10, help="Set minimum length allowed "
"for protein sequences (default is '%(default)s')")
search_opts.add_argument("--max-stop", dest="max_stop", type=int,
default=20, help="Set maximum percentage of "
"stop codons in protein sequences (default is "
"'%(default)s')")
search_opts.add_argument("--db", dest="database",
default="goodProteins", help="Name of search "
"database (default is '%(default)s')")
search_opts.add_argument("--search-out", dest="search_out",
default="AllVsAll.out", help="Name of the "
"search output file containing the All-vs-All "
"protein comparisons")
search_opts.add_argument("-evalue", dest="evalue", default=1E-5,
help="Set the e-value cut off for search "
"operation (default is '%(default)s')")
search_opts.add_argument("-inflation", dest="inflation", nargs="+",
default=["3"],
choices=[str(x) for x in xrange(1, 6)],
help="Set inflation values for ortholog group"
" clustering. Multiple values may be provided "
"but values are limited to the range [1, 5]")
# Output options
output_opts = parser.add_argument_group("Output options")
output_opts.add_argument("-o", dest="output_dir", default=os.getcwd(),
help="Output directory")
output_opts.add_argument("-prefix", dest="prefix", default="Ortholog",
help="Set the prefix name for each ortholog "
"cluster (default is '%(default)s')")
output_opts.add_argument("-id", dest="id_num", type=int, default=1,
help="Set the starting number for the ortholog "
"clusters (default is '%(default)s')")
output_opts.add_argument("--groups-file", dest="groups_file",
default="groups", help="Set the name of the "
"group files from the output of MCL (default is "
"'%(default)s')")
output_opts.add_argument("--min-species", dest="min_sp", default=1,
type=float, help="Set the minimum number of "
"species required for an ortholog cluster to be "
"converted into protein sequence. This option "
"will only affect the protein sequence files, "
"not the group file output.")
output_opts.add_argument("--max-gene-copy", dest="max_gn", default=100,
type=int, help="Set the maximum number of gene "
"copies from the same taxon for each ortholog "
"cluster. This option will only affect the "
"protein sequence files, not the group file "
"output.")
# Miscellaneous options
misc_options = parser.add_argument_group("Miscellaneous options")
misc_options.add_argument("-np", dest="cpus", default=1, help="Number of "
"CPUs to be used during search operation ("
"default is '%(default)s')")
misc_options.add_argument("-v", "--version", dest="version",
action="store_const", const=True,
help="Displays software version")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
arg = parser.parse_args()
if arg.version:
print(__version__)
sys.exit(1)
# Crete temp directory
tmp_dir = join(os.getcwd(), ".tmp")
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
print_col("Executing OrthoMCL pipeline at %s %s" % (
time.strftime("%d/%m/%Y"), time.strftime("%I:%M:%S")), GREEN, 1)
try:
start_time = time.time()
# Arguments
input_dir = os.path.abspath(arg.infile)
check_dirs(input_dir)
output_dir = os.path.abspath(arg.output_dir)
# name_separator = arg.separator
min_length = arg.min_length
max_percent_stop = arg.max_stop
usearch_bin = arg.usearch_bin
mcl_bin = arg.mcl_bin
database_name = join(os.getcwd(), output_dir, "backstage_files",
arg.database)
usearch_out_name = arg.search_out
evalue_cutoff = arg.evalue
cpus = arg.cpus
inflation = arg.inflation
prefix = arg.prefix
start_id = arg.id_num
groups_file = arg.groups_file
min_sp = arg.min_sp
max_gn = arg.max_gn
# Check USEARCH bin
check_bin_path(usearch_bin, "usearch")
# Check MCL bin
check_bin_path(mcl_bin, "mcl")
sql_path = join(tmp_dir, "sqldb.db")
# Get proteome files
if not os.path.exists(input_dir):
print_col("The input directory %s does not exist. Exiting." %
input_dir, RED, 1)
proteome_files = [abspath(join(input_dir, x)) for x in os.listdir(
input_dir)]
# Create and change working directory
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# os.chdir(output_dir)
# Create directory that will store intermediate files during orthology
# search
int_dir = join(output_dir, "backstage_files")
if not os.path.exists(int_dir):
os.makedirs(int_dir)
if arg.normal:
install_schema(tmp_dir)
adjust_fasta(proteome_files, output_dir)
filter_fasta(min_length, max_percent_stop, database_name,
output_dir)
allvsall_usearch(database_name, evalue_cutoff, output_dir, cpus,
usearch_out_name, usearch_bin=usearch_bin)
blast_parser(usearch_out_name, output_dir, tmp_dir, None)
pairs(tmp_dir)
dump_pairs(tmp_dir, output_dir)
mcl(inflation, output_dir, mcl_file=mcl_bin)
mcl_groups(inflation, prefix, start_id, groups_file, output_dir)
export_filtered_groups(inflation, groups_file, max_gn, min_sp,
sql_path, database_name, tmp_dir,
output_dir)
elif arg.adjust:
adjust_fasta(proteome_files, output_dir)
elif arg.no_adjust:
install_schema(tmp_dir)
filter_fasta(min_length, max_percent_stop, database_name,
output_dir)
allvsall_usearch(database_name, evalue_cutoff, output_dir, cpus,
usearch_out_name, usearch_bin=usearch_bin)
blast_parser(usearch_out_name, output_dir, tmp_dir, None)
pairs(tmp_dir)
dump_pairs(tmp_dir, output_dir)
mcl(inflation, output_dir, mcl_file=mcl_bin)
mcl_groups(inflation, prefix, start_id, groups_file, output_dir)
export_filtered_groups(inflation, groups_file, max_gn, min_sp,
sql_path, database_name, tmp_dir,
output_dir)
print_col("OrthoMCL pipeline execution successfully completed in %s "
"seconds" % (round(time.time() - start_time, 2)), GREEN, 1)
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
except Exception as e:
print(e.message)
traceback.print_exc()
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
print_col("Program exited with errors!", RED, 1)
if __name__ == "__main__":
main()
__author__ = "Diogo N. Silva"
|
ODiogoSilva/TriFusion
|
trifusion/orthomcl_pipeline.py
|
Python
|
gpl-3.0
| 24,641
|
[
"BLAST"
] |
5333c7d8032d312cc3a66f53d479e9feb079fabb942bc67d5969833d93638a81
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Spin-free lambda equation of UHF-CCSD(T)
'''
import time
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.cc import ccsd_lambda
from pyscf.cc import uccsd_lambda
def kernel(mycc, eris=None, t1=None, t2=None, l1=None, l2=None,
max_cycle=50, tol=1e-8, verbose=logger.INFO):
return ccsd_lambda.kernel(mycc, eris, t1, t2, l1, l2, max_cycle, tol,
verbose, make_intermediates, update_lambda)
def make_intermediates(mycc, t1, t2, eris):
from pyscf.cc import uccsd_t_slow
def p6(t):
return (t + t.transpose(1,2,0,4,5,3) +
t.transpose(2,0,1,5,3,4) + t.transpose(0,2,1,3,5,4) +
t.transpose(2,1,0,5,4,3) + t.transpose(1,0,2,4,3,5))
def r6(w):
return (w + w.transpose(2,0,1,3,4,5) + w.transpose(1,2,0,3,4,5)
- w.transpose(2,1,0,3,4,5) - w.transpose(0,2,1,3,4,5)
- w.transpose(1,0,2,3,4,5))
imds = uccsd_lambda.make_intermediates(mycc, t1, t2, eris)
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb = t2ab.shape[:2]
nmoa = eris.focka.shape[0]
nmob = eris.fockb.shape[0]
mo_ea, mo_eb = eris.mo_energy
eia = mo_ea[:nocca,None] - mo_ea[nocca:]
eIA = mo_eb[:noccb,None] - mo_eb[noccb:]
fvo = eris.focka[nocca:,:nocca]
fVO = eris.fockb[noccb:,:noccb]
# aaa
d3 = lib.direct_sum('ia+jb+kc->ijkabc', eia, eia, eia)
w = numpy.einsum('ijae,kceb->ijkabc', t2aa, numpy.asarray(eris.get_ovvv()).conj())
w-= numpy.einsum('mkbc,iajm->ijkabc', t2aa, numpy.asarray(eris.ovoo.conj()))
v = numpy.einsum('jbkc,ia->ijkabc', numpy.asarray(eris.ovov).conj(), t1a)
v+= numpy.einsum('jkbc,ai->ijkabc', t2aa, fvo) * .5
rw = r6(p6(w)) / d3
imds.l1a_t = numpy.einsum('ijkabc,jbkc->ia', rw.conj(),
numpy.asarray(eris.ovov)) / eia * .25
wvd = r6(p6(w * 2 + v)) / d3
l2_t = numpy.einsum('ijkabc,kceb->ijae', wvd, numpy.asarray(eris.get_ovvv()).conj())
l2_t -= numpy.einsum('ijkabc,iajm->mkbc', wvd, numpy.asarray(eris.ovoo.conj()))
l2_t = l2_t + l2_t.transpose(1,0,3,2)
l2_t += numpy.einsum('ijkabc,ai->jkbc', rw, fvo)
imds.l2aa_t = l2_t.conj() / lib.direct_sum('ia+jb->ijab', eia, eia) * .5
# bbb
d3 = lib.direct_sum('ia+jb+kc->ijkabc', eIA, eIA, eIA)
w = numpy.einsum('ijae,kceb->ijkabc', t2bb, numpy.asarray(eris.get_OVVV()).conj())
w-= numpy.einsum('imab,kcjm->ijkabc', t2bb, numpy.asarray(eris.OVOO.conj()))
v = numpy.einsum('jbkc,ia->ijkabc', numpy.asarray(eris.OVOV).conj(), t1b)
v+= numpy.einsum('jkbc,ai->ijkabc', t2bb, fVO) * .5
rw = r6(p6(w)) / d3
imds.l1b_t = numpy.einsum('ijkabc,jbkc->ia', rw.conj(),
numpy.asarray(eris.OVOV)) / eIA * .25
wvd = r6(p6(w * 2 + v)) / d3
l2_t = numpy.einsum('ijkabc,kceb->ijae', wvd, numpy.asarray(eris.get_OVVV()).conj())
l2_t -= numpy.einsum('ijkabc,iajm->mkbc', wvd, numpy.asarray(eris.OVOO.conj()))
l2_t = l2_t + l2_t.transpose(1,0,3,2)
l2_t += numpy.einsum('ijkabc,ai->jkbc', rw, fVO)
imds.l2bb_t = l2_t.conj() / lib.direct_sum('ia+jb->ijab', eIA, eIA) * .5
# baa
def r4(w):
w = w - w.transpose(0,2,1,3,4,5)
w = w + w.transpose(0,2,1,3,5,4)
return w
d3 = lib.direct_sum('ia+jb+kc->ijkabc', eIA, eia, eia)
w = numpy.einsum('jIeA,kceb->IjkAbc', t2ab, numpy.asarray(eris.get_ovvv()).conj()) * 2
w += numpy.einsum('jIbE,kcEA->IjkAbc', t2ab, numpy.asarray(eris.get_ovVV()).conj()) * 2
w += numpy.einsum('jkbe,IAec->IjkAbc', t2aa, numpy.asarray(eris.get_OVvv()).conj())
w -= numpy.einsum('mIbA,kcjm->IjkAbc', t2ab, numpy.asarray(eris.ovoo).conj()) * 2
w -= numpy.einsum('jMbA,kcIM->IjkAbc', t2ab, numpy.asarray(eris.ovOO).conj()) * 2
w -= numpy.einsum('jmbc,IAkm->IjkAbc', t2aa, numpy.asarray(eris.OVoo).conj())
v = numpy.einsum('jbkc,IA->IjkAbc', numpy.asarray(eris.ovov).conj(), t1b)
v += numpy.einsum('kcIA,jb->IjkAbc', numpy.asarray(eris.ovOV).conj(), t1a)
v += numpy.einsum('kcIA,jb->IjkAbc', numpy.asarray(eris.ovOV).conj(), t1a)
v += numpy.einsum('jkbc,AI->IjkAbc', t2aa, fVO) * .5
v += numpy.einsum('kIcA,bj->IjkAbc', t2ab, fvo) * 2
rw = r4(w) / d3
imds.l1a_t += numpy.einsum('ijkabc,kcia->jb', rw.conj(),
numpy.asarray(eris.ovOV)) / eia * .5
imds.l1b_t += numpy.einsum('ijkabc,jbkc->ia', rw.conj(),
numpy.asarray(eris.ovov)) / eIA * .25
wvd = r4(w * 2 + v) / d3
l2_t = numpy.einsum('ijkabc,iaec->jkbe', wvd, numpy.asarray(eris.get_OVvv()).conj())
l2_t -= numpy.einsum('ijkabc,iakm->jmbc', wvd, numpy.asarray(eris.OVoo).conj())
l2_t = l2_t + l2_t.transpose(1,0,3,2)
l2_t += numpy.einsum('ijkabc,ai->jkbc', rw, fVO)
imds.l2aa_t += l2_t.conj() / lib.direct_sum('ia+jb->ijab', eia, eia) * .5
l2_t = numpy.einsum('ijkabc,kceb->jiea', wvd, numpy.asarray(eris.get_ovvv()).conj())
l2_t += numpy.einsum('ijkabc,kcea->jibe', wvd, numpy.asarray(eris.get_ovVV()).conj())
l2_t -= numpy.einsum('ijkabc,kcjm->miba', wvd, numpy.asarray(eris.ovoo).conj())
l2_t -= numpy.einsum('ijkabc,kcim->jmba', wvd, numpy.asarray(eris.ovOO).conj())
l2_t += numpy.einsum('ijkabc,bj->kica', rw, fvo)
imds.l2ab_t = l2_t.conj() / lib.direct_sum('ia+jb->ijab', eia, eIA) * .5
# bba
d3 = lib.direct_sum('ia+jb+kc->ijkabc', eia, eIA, eIA)
w = numpy.einsum('ijae,kceb->ijkabc', t2ab, numpy.asarray(eris.get_OVVV()).conj()) * 2
w += numpy.einsum('ijeb,kcea->ijkabc', t2ab, numpy.asarray(eris.get_OVvv()).conj()) * 2
w += numpy.einsum('jkbe,iaec->ijkabc', t2bb, numpy.asarray(eris.get_ovVV()).conj())
w -= numpy.einsum('imab,kcjm->ijkabc', t2ab, numpy.asarray(eris.OVOO).conj()) * 2
w -= numpy.einsum('mjab,kcim->ijkabc', t2ab, numpy.asarray(eris.OVoo).conj()) * 2
w -= numpy.einsum('jmbc,iakm->ijkabc', t2bb, numpy.asarray(eris.ovOO).conj())
v = numpy.einsum('jbkc,ia->ijkabc', numpy.asarray(eris.OVOV).conj(), t1a)
v += numpy.einsum('iakc,jb->ijkabc', numpy.asarray(eris.ovOV).conj(), t1b)
v += numpy.einsum('iakc,jb->ijkabc', numpy.asarray(eris.ovOV).conj(), t1b)
v += numpy.einsum('JKBC,ai->iJKaBC', t2bb, fvo) * .5
v += numpy.einsum('iKaC,BJ->iJKaBC', t2ab, fVO) * 2
rw = r4(w) / d3
imds.l1a_t += numpy.einsum('ijkabc,jbkc->ia', rw.conj(),
numpy.asarray(eris.OVOV)) / eia * .25
imds.l1b_t += numpy.einsum('ijkabc,iakc->jb', rw.conj(),
numpy.asarray(eris.ovOV)) / eIA * .5
wvd = r4(w * 2 + v) / d3
l2_t = numpy.einsum('ijkabc,iaec->jkbe', wvd, numpy.asarray(eris.get_ovVV()).conj())
l2_t -= numpy.einsum('ijkabc,iakm->jmbc', wvd, numpy.asarray(eris.ovOO).conj())
l2_t = l2_t + l2_t.transpose(1,0,3,2)
l2_t += numpy.einsum('ijkabc,ai->jkbc', rw, fvo)
imds.l2bb_t += l2_t.conj() / lib.direct_sum('ia+jb->ijab', eIA, eIA) * .5
l2_t = numpy.einsum('ijkabc,kceb->ijae', wvd, numpy.asarray(eris.get_OVVV()).conj())
l2_t += numpy.einsum('ijkabc,kcea->ijeb', wvd, numpy.asarray(eris.get_OVvv()).conj())
l2_t -= numpy.einsum('ijkabc,kcjm->imab', wvd, numpy.asarray(eris.OVOO).conj())
l2_t -= numpy.einsum('ijkabc,kcim->mjab', wvd, numpy.asarray(eris.OVoo).conj())
l2_t += numpy.einsum('ijkabc,bj->ikac', rw, fVO)
imds.l2ab_t += l2_t.conj() / lib.direct_sum('ia+jb->ijab', eia, eIA) * .5
return imds
def update_lambda(mycc, t1, t2, l1, l2, eris=None, imds=None):
if eris is None: eris = mycc.ao2mo()
if imds is None: imds = make_intermediates(mycc, t1, t2, eris)
l1, l2 = uccsd_lambda.update_lambda(mycc, t1, t2, l1, l2, eris, imds)
l1a, l1b = l1
l2aa, l2ab, l2bb = l2
l1a += imds.l1a_t
l1b += imds.l1b_t
l2aa += imds.l2aa_t
l2ab += imds.l2ab_t
l2bb += imds.l2bb_t
return (l1a, l1b), (l2aa, l2ab, l2bb)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf import cc
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '631g'
mol.spin = 0
mol.build()
mf0 = mf = scf.RHF(mol).run(conv_tol=1)
mf = scf.addons.convert_to_uhf(mf)
mycc = cc.UCCSD(mf)
eris = mycc.ao2mo()
from pyscf.cc import ccsd_t_lambda_slow as ccsd_t_lambda
mycc0 = cc.CCSD(mf0)
eris0 = mycc0.ao2mo()
mycc0.kernel(eris=eris0)
t1 = mycc0.t1
t2 = mycc0.t2
imds = ccsd_t_lambda.make_intermediates(mycc0, t1, t2, eris0)
l1, l2 = ccsd_t_lambda.update_lambda(mycc0, t1, t2, t1, t2, eris0, imds)
l1ref, l2ref = ccsd_t_lambda.update_lambda(mycc0, t1, t2, l1, l2, eris0, imds)
t1 = (t1, t1)
t2aa = t2 - t2.transpose(1,0,2,3)
t2 = (t2aa, t2, t2aa)
l1 = (l1, l1)
l2aa = l2 - l2.transpose(1,0,2,3)
l2 = (l2aa, l2, l2aa)
imds = make_intermediates(mycc, t1, t2, eris)
l1, l2 = update_lambda(mycc, t1, t2, l1, l2, eris, imds)
print(abs(l2[1]-l2[1].transpose(1,0,2,3)-l2[0]).max())
print(abs(l2[1]-l2[1].transpose(0,1,3,2)-l2[2]).max())
print(abs(l1[0]-l1ref).max())
print(abs(l2[1]-l2ref).max())
|
gkc1000/pyscf
|
pyscf/cc/uccsd_t_lambda.py
|
Python
|
apache-2.0
| 9,887
|
[
"PySCF"
] |
683a72bd7eafaaa617439eb2eaaadb85162ed6f7eaa4fd92407f2c1187ddeaab
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Merz) of interaction energies for bimolecular complexes from protein-indinavir reaction site.
| Geometries from and original reference energies from Faver et al. JCTC 7 790 (2011).
| Revised reference interaction energies (HSGA) from Marshall et al. JCP 135 194102 (2011).
- **cp** ``'off'`` || ``'on'``
- **rlxd** ``'off'``
- **benchmark**
- ``'HSG0'`` Faver et al. JCTC 7 790 (2011).
- |dl| ``'HSGA'`` |dr| Marshall et al. JCP 135 194102 (2011).
- **subset**
- ``'small'``
- ``'large'``
"""
import qcdb
# <<< HSG Database Module >>>
dbse = 'HSG'
# <<< Database Memobers >>>
HRXN = range(1, 22)
HRXN_SM = [6, 15]
HRXN_LG = [14]
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supramolecular calculations
for rxn in HRXN:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : -1,
'%s-%s-monoB-unCP' % (dbse, rxn) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn) ]
# <<< Reference Values >>>
BIND = {}
# Original publication
BIND_HSG0 = {}
BIND_HSG0['%s-%s' % (dbse, 1)] = -0.519
BIND_HSG0['%s-%s' % (dbse, 2)] = -2.181
BIND_HSG0['%s-%s' % (dbse, 3)] = -2.451
BIND_HSG0['%s-%s' % (dbse, 4)] = -16.445
BIND_HSG0['%s-%s' % (dbse, 5)] = -18.984
BIND_HSG0['%s-%s' % (dbse, 6)] = -6.009
BIND_HSG0['%s-%s' % (dbse, 7)] = -3.301
BIND_HSG0['%s-%s' % (dbse, 8)] = -0.554
BIND_HSG0['%s-%s' % (dbse, 9)] = -5.038
BIND_HSG0['%s-%s' % (dbse, 10)] = -7.532
BIND_HSG0['%s-%s' % (dbse, 11)] = -6.279
BIND_HSG0['%s-%s' % (dbse, 12)] = 0.305
BIND_HSG0['%s-%s' % (dbse, 13)] = -2.087
BIND_HSG0['%s-%s' % (dbse, 14)] = -1.376
BIND_HSG0['%s-%s' % (dbse, 15)] = -0.853
BIND_HSG0['%s-%s' % (dbse, 16)] = -1.097
BIND_HSG0['%s-%s' % (dbse, 17)] = -1.504
BIND_HSG0['%s-%s' % (dbse, 18)] = -0.473
BIND_HSG0['%s-%s' % (dbse, 19)] = -1.569
BIND_HSG0['%s-%s' % (dbse, 20)] = 0.391
BIND_HSG0['%s-%s' % (dbse, 21)] = -9.486
# Current revision
BIND_HSGA = {}
BIND_HSGA['%s-%s' % (dbse, 1)] = -0.518
BIND_HSGA['%s-%s' % (dbse, 2)] = -2.283
BIND_HSGA['%s-%s' % (dbse, 3)] = -2.478
BIND_HSGA['%s-%s' % (dbse, 4)] = -16.526
BIND_HSGA['%s-%s' % (dbse, 5)] = -19.076
BIND_HSGA['%s-%s' % (dbse, 6)] = -5.998
BIND_HSGA['%s-%s' % (dbse, 7)] = -3.308
BIND_HSGA['%s-%s' % (dbse, 8)] = -0.581
BIND_HSGA['%s-%s' % (dbse, 9)] = -5.066
BIND_HSGA['%s-%s' % (dbse, 10)] = -7.509
BIND_HSGA['%s-%s' % (dbse, 11)] = -6.274
BIND_HSGA['%s-%s' % (dbse, 12)] = 0.302
BIND_HSGA['%s-%s' % (dbse, 13)] = -2.103
BIND_HSGA['%s-%s' % (dbse, 14)] = -1.378
BIND_HSGA['%s-%s' % (dbse, 15)] = -0.856
BIND_HSGA['%s-%s' % (dbse, 16)] = -1.100
BIND_HSGA['%s-%s' % (dbse, 17)] = -1.534
BIND_HSGA['%s-%s' % (dbse, 18)] = -0.472
BIND_HSGA['%s-%s' % (dbse, 19)] = -1.598
BIND_HSGA['%s-%s' % (dbse, 20)] = 0.378
BIND_HSGA['%s-%s' % (dbse, 21)] = -9.538
# Set default
BIND = BIND_HSGA
# <<< Coment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 1)] = 'ala29-big'
TAGL['%s-%s-dimer' % (dbse, 1)] = 'ala29-big'
TAGL['%s-%s-monoA-CP' % (dbse, 1)] = 'indinavir from ala29-big'
TAGL['%s-%s-monoB-CP' % (dbse, 1)] = 'alanine from ala29-big'
TAGL['%s-%s-monoA-unCP' % (dbse, 1)] = 'indinavir from ala29-big'
TAGL['%s-%s-monoB-unCP' % (dbse, 1)] = 'alanine from ala29-big'
TAGL['%s-%s' % (dbse, 2)] = 'ala128-small'
TAGL['%s-%s-dimer' % (dbse, 2)] = 'ala128-small'
TAGL['%s-%s-monoA-CP' % (dbse, 2)] = 'alanine from ala128-small'
TAGL['%s-%s-monoB-CP' % (dbse, 2)] = 'indinavir from ala128-small'
TAGL['%s-%s-monoA-unCP' % (dbse, 2)] = 'alanine from ala128-small'
TAGL['%s-%s-monoB-unCP' % (dbse, 2)] = 'indinavir from ala128-small'
TAGL['%s-%s' % (dbse, 3)] = 'arg8'
TAGL['%s-%s-dimer' % (dbse, 3)] = 'arg8'
TAGL['%s-%s-monoA-CP' % (dbse, 3)] = 'arginine from arg8'
TAGL['%s-%s-monoB-CP' % (dbse, 3)] = 'indinavir from arg8'
TAGL['%s-%s-monoA-unCP' % (dbse, 3)] = 'arginine from arg8'
TAGL['%s-%s-monoB-unCP' % (dbse, 3)] = 'indinavir from arg8'
TAGL['%s-%s' % (dbse, 4)] = 'ash26-asp125'
TAGL['%s-%s-dimer' % (dbse, 4)] = 'ash26-asp125'
TAGL['%s-%s-monoA-CP' % (dbse, 4)] = 'aspartic acid from ash26-asp125'
TAGL['%s-%s-monoB-CP' % (dbse, 4)] = 'indinavir from ash26-asp125'
TAGL['%s-%s-monoA-unCP' % (dbse, 4)] = 'aspartic acid from ash26-asp125'
TAGL['%s-%s-monoB-unCP' % (dbse, 4)] = 'indinavir from ash26-asp125'
TAGL['%s-%s' % (dbse, 5)] = 'asp129-big'
TAGL['%s-%s-dimer' % (dbse, 5)] = 'asp129-big'
TAGL['%s-%s-monoA-CP' % (dbse, 5)] = 'aspartic acid from asp129-big'
TAGL['%s-%s-monoB-CP' % (dbse, 5)] = 'indinavir from asp129-big'
TAGL['%s-%s-monoA-unCP' % (dbse, 5)] = 'aspartic acid from asp129-big'
TAGL['%s-%s-monoB-unCP' % (dbse, 5)] = 'indinavir from asp129-big'
TAGL['%s-%s' % (dbse, 6)] = 'asp130'
TAGL['%s-%s-dimer' % (dbse, 6)] = 'asp130'
TAGL['%s-%s-monoA-CP' % (dbse, 6)] = 'aspartic acid from asp130'
TAGL['%s-%s-monoB-CP' % (dbse, 6)] = 'indinavir from asp130'
TAGL['%s-%s-monoA-unCP' % (dbse, 6)] = 'aspartic acid from asp130'
TAGL['%s-%s-monoB-unCP' % (dbse, 6)] = 'indinavir from asp130'
TAGL['%s-%s' % (dbse, 7)] = 'gly28-big'
TAGL['%s-%s-dimer' % (dbse, 7)] = 'gly28-big'
TAGL['%s-%s-monoA-CP' % (dbse, 7)] = 'glycine from gly28-big'
TAGL['%s-%s-monoB-CP' % (dbse, 7)] = 'indinavir from gly28-big'
TAGL['%s-%s-monoA-unCP' % (dbse, 7)] = 'glycine from gly28-big'
TAGL['%s-%s-monoB-unCP' % (dbse, 7)] = 'indinavir from gly28-big'
TAGL['%s-%s' % (dbse, 8)] = 'gly50-ring-big'
TAGL['%s-%s-dimer' % (dbse, 8)] = 'gly50-ring-big'
TAGL['%s-%s-monoA-CP' % (dbse, 8)] = 'glycine from gly50-ring-big'
TAGL['%s-%s-monoB-CP' % (dbse, 8)] = 'indinavir from gly50-ring-big'
TAGL['%s-%s-monoA-unCP' % (dbse, 8)] = 'glycine from gly50-ring-big'
TAGL['%s-%s-monoB-unCP' % (dbse, 8)] = 'indinavir from gly50-ring-big'
TAGL['%s-%s' % (dbse, 9)] = 'gly50-v1'
TAGL['%s-%s-dimer' % (dbse, 9)] = 'gly50-v1'
TAGL['%s-%s-monoA-CP' % (dbse, 9)] = 'glycine from gly50-v1'
TAGL['%s-%s-monoB-CP' % (dbse, 9)] = 'indinavir from gly50-v1'
TAGL['%s-%s-monoA-unCP' % (dbse, 9)] = 'glycine from gly50-v1'
TAGL['%s-%s-monoB-unCP' % (dbse, 9)] = 'indinavir from gly50-v1'
TAGL['%s-%s' % (dbse, 10)] = 'gly127'
TAGL['%s-%s-dimer' % (dbse, 10)] = 'gly127'
TAGL['%s-%s-monoA-CP' % (dbse, 10)] = 'indinavir from gly127'
TAGL['%s-%s-monoB-CP' % (dbse, 10)] = 'glycine from gly127'
TAGL['%s-%s-monoA-unCP' % (dbse, 10)] = 'indinavir from gly127'
TAGL['%s-%s-monoB-unCP' % (dbse, 10)] = 'glycine from gly127'
TAGL['%s-%s' % (dbse, 11)] = 'gly148'
TAGL['%s-%s-dimer' % (dbse, 11)] = 'gly148'
TAGL['%s-%s-monoA-CP' % (dbse, 11)] = 'glycine from gly148'
TAGL['%s-%s-monoB-CP' % (dbse, 11)] = 'indinavir from gly148'
TAGL['%s-%s-monoA-unCP' % (dbse, 11)] = 'glycine from gly148'
TAGL['%s-%s-monoB-unCP' % (dbse, 11)] = 'indinavir from gly148'
TAGL['%s-%s' % (dbse, 12)] = 'ile48-big'
TAGL['%s-%s-dimer' % (dbse, 12)] = 'ile48-big'
TAGL['%s-%s-monoA-CP' % (dbse, 12)] = 'isoleucine from ile48-big'
TAGL['%s-%s-monoB-CP' % (dbse, 12)] = 'indinavir from ile48-big'
TAGL['%s-%s-monoA-unCP' % (dbse, 12)] = 'isoleucine from ile48-big'
TAGL['%s-%s-monoB-unCP' % (dbse, 12)] = 'indinavir from ile48-big'
TAGL['%s-%s' % (dbse, 13)] = 'ile147'
TAGL['%s-%s-dimer' % (dbse, 13)] = 'ile147'
TAGL['%s-%s-monoA-CP' % (dbse, 13)] = 'isoleucine from ile147'
TAGL['%s-%s-monoB-CP' % (dbse, 13)] = 'indinavir from ile147'
TAGL['%s-%s-monoA-unCP' % (dbse, 13)] = 'isoleucine from ile147'
TAGL['%s-%s-monoB-unCP' % (dbse, 13)] = 'indinavir from ile147'
TAGL['%s-%s' % (dbse, 14)] = 'ile150-big'
TAGL['%s-%s-dimer' % (dbse, 14)] = 'ile150-big'
TAGL['%s-%s-monoA-CP' % (dbse, 14)] = 'isoleucine from ile150-big'
TAGL['%s-%s-monoB-CP' % (dbse, 14)] = 'indinavir from ile150-big'
TAGL['%s-%s-monoA-unCP' % (dbse, 14)] = 'isoleucine from ile150-big'
TAGL['%s-%s-monoB-unCP' % (dbse, 14)] = 'indinavir from ile150-big'
TAGL['%s-%s' % (dbse, 15)] = 'ile184'
TAGL['%s-%s-dimer' % (dbse, 15)] = 'ile184'
TAGL['%s-%s-monoA-CP' % (dbse, 15)] = 'isoleucine from ile184'
TAGL['%s-%s-monoB-CP' % (dbse, 15)] = 'indinavir from ile184'
TAGL['%s-%s-monoA-unCP' % (dbse, 15)] = 'isoleucine from ile184'
TAGL['%s-%s-monoB-unCP' % (dbse, 15)] = 'indinavir from ile184'
TAGL['%s-%s' % (dbse, 16)] = 'leu23-big'
TAGL['%s-%s-dimer' % (dbse, 16)] = 'leu23-big'
TAGL['%s-%s-monoA-CP' % (dbse, 16)] = 'leucine from leu23-big'
TAGL['%s-%s-monoB-CP' % (dbse, 16)] = 'indinavir from leu23-big'
TAGL['%s-%s-monoA-unCP' % (dbse, 16)] = 'leucine from leu23-big'
TAGL['%s-%s-monoB-unCP' % (dbse, 16)] = 'indinavir from leu23-big'
TAGL['%s-%s' % (dbse, 17)] = 'pro181'
TAGL['%s-%s-dimer' % (dbse, 17)] = 'pro181'
TAGL['%s-%s-monoA-CP' % (dbse, 17)] = 'proline from pro181'
TAGL['%s-%s-monoB-CP' % (dbse, 17)] = 'indinavir from pro181'
TAGL['%s-%s-monoA-unCP' % (dbse, 17)] = 'proline from pro181'
TAGL['%s-%s-monoB-unCP' % (dbse, 17)] = 'indinavir from pro181'
TAGL['%s-%s' % (dbse, 18)] = 'val33-big'
TAGL['%s-%s-dimer' % (dbse, 18)] = 'val33-big'
TAGL['%s-%s-monoA-CP' % (dbse, 18)] = 'valine from val33-big'
TAGL['%s-%s-monoB-CP' % (dbse, 18)] = 'indinavir from val33-big'
TAGL['%s-%s-monoA-unCP' % (dbse, 18)] = 'valine from val33-big'
TAGL['%s-%s-monoB-unCP' % (dbse, 18)] = 'indinavir from val33-big'
TAGL['%s-%s' % (dbse, 19)] = 'val83'
TAGL['%s-%s-dimer' % (dbse, 19)] = 'val83'
TAGL['%s-%s-monoA-CP' % (dbse, 19)] = 'valine from val83'
TAGL['%s-%s-monoB-CP' % (dbse, 19)] = 'indinavir from val83'
TAGL['%s-%s-monoA-unCP' % (dbse, 19)] = 'valine from val83'
TAGL['%s-%s-monoB-unCP' % (dbse, 19)] = 'indinavir from val83'
TAGL['%s-%s' % (dbse, 20)] = 'val132'
TAGL['%s-%s-dimer' % (dbse, 20)] = 'val132'
TAGL['%s-%s-monoA-CP' % (dbse, 20)] = 'valine from val132'
TAGL['%s-%s-monoB-CP' % (dbse, 20)] = 'indinavir from val132'
TAGL['%s-%s-monoA-unCP' % (dbse, 20)] = 'valine from val132'
TAGL['%s-%s-monoB-unCP' % (dbse, 20)] = 'indinavir from val132'
TAGL['%s-%s' % (dbse, 21)] = 'wat200'
TAGL['%s-%s-dimer' % (dbse, 21)] = 'wat200'
TAGL['%s-%s-monoA-CP' % (dbse, 21)] = 'water from wat200'
TAGL['%s-%s-monoB-CP' % (dbse, 21)] = 'indinavir from wat200'
TAGL['%s-%s-monoA-unCP' % (dbse, 21)] = 'water from wat200'
TAGL['%s-%s-monoB-unCP' % (dbse, 21)] = 'indinavir from wat200'
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-dimer' % (dbse, '1')] = qcdb.Molecule("""
0 1
C 13.03200 29.07900 6.986000
H 12.30800 29.25100 7.790000
H 13.47200 28.08100 7.080000
H 13.82700 29.84100 7.035000
H 12.50772 29.16746 6.023030
--
0 1
C 10.60200 24.81800 6.466000
O 10.95600 23.84000 7.103000
N 10.17800 25.94300 7.070000
C 10.09100 26.25600 8.476000
C 9.372000 27.59000 8.640000
C 11.44600 26.35600 9.091000
C 9.333000 25.25000 9.282000
H 9.874000 26.68900 6.497000
H 9.908000 28.37100 8.093000
H 8.364000 27.46400 8.233000
H 9.317000 27.84600 9.706000
H 9.807000 24.28200 9.160000
H 9.371000 25.57400 10.32900
H 8.328000 25.26700 8.900000
H 11.28800 26.57600 10.14400
H 11.97000 27.14900 8.585000
H 11.93200 25.39300 8.957000
H 10.61998 24.85900 5.366911
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '2')] = qcdb.Molecule("""
0 1
C 18.71400 22.19500 2.742000
H 18.37900 21.58700 3.577000
C 17.68800 22.11500 1.586000
H 17.61600 21.07600 1.227000
H 16.69600 22.44400 1.940000
H 18.00000 22.76400 0.747000
H 18.77673 23.23495 3.094948
H 19.70954 21.82087 2.461043
--
0 1
C 16.65000 19.51600 5.550000
C 18.05900 18.95300 5.945000
O 19.08300 19.90900 5.610000
C 18.24000 17.70600 5.049000
C 17.41000 17.97200 3.810000
C 17.48600 17.37200 2.527000
C 16.60600 17.86100 1.547000
C 15.70300 18.89200 1.854000
C 15.65400 19.47000 3.123000
C 16.51300 18.99700 4.107000
H 15.85600 19.11800 6.209000
H 18.17800 18.65700 6.992000
H 17.85200 16.83600 5.584000
H 19.29200 17.55300 4.850000
H 18.19800 16.55200 2.242000
H 16.61400 17.45600 0.523000
H 15.03200 19.27500 1.092000
H 14.96900 20.29200 3.327000
H 19.96600 19.54000 5.876000
H 16.54908 20.59917 5.712962
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '3')] = qcdb.Molecule("""
1 1
C 23.73500 21.90400 8.645000
H 24.33900 21.64000 9.515000
H 23.04400 22.70000 8.945000
N 22.96900 20.71700 8.247000
H 22.85100 20.56200 7.249000
C 22.40300 19.85100 9.070000
N 22.40300 20.05000 10.36600
H 22.82700 20.88300 10.74700
H 21.93200 19.41900 10.98100
N 21.82000 18.77600 8.615000
H 21.76800 18.61500 7.604000
H 21.33700 18.14400 9.214000
H 24.38340 22.25566 7.828969
--
0 1
C 16.73700 21.75300 8.985000
C 18.06300 21.93100 8.570000
C 19.04400 21.01900 8.966000
C 18.68400 19.93600 9.775000
C 17.35900 19.76800 10.19300
C 16.38200 20.67900 9.796000
H 15.33000 20.56400 10.09500
H 17.07400 18.92500 10.82100
H 19.43700 19.21300 10.07200
H 20.08100 21.14800 8.627000
H 18.32800 22.76900 7.913000
H 15.93631 22.42849 8.649437
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '4')] = qcdb.Molecule("""
-1 1
C 17.05600 28.65300 6.834000
H 17.72900 28.22900 7.569000
H 16.32100 29.27500 7.342000
C 16.35100 27.45400 6.256000
O 16.17800 26.43900 6.902000
O 15.98200 27.55700 4.965000
H 15.73800 26.67800 4.650000
C 16.27300 25.57900 0.088000
H 16.75700 24.66100 -0.278000
H 15.39700 25.75100 -0.577000
C 15.87600 25.38300 1.569000
O 16.42900 26.07300 2.466000
O 14.98200 24.56700 1.861000
H 17.61665 29.26091 6.108662
H 16.97158 26.42544 0.013713047
--
0 1
C 14.25800 24.02900 5.093000
O 15.51000 24.53800 4.641000
H 15.42000 24.70300 3.667000
H 14.02700 23.02800 4.754000
H 13.45976 24.69373 4.731124
H 14.36576 23.85731 6.174161
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '5')] = qcdb.Molecule("""
-1 1
C 18.71400 22.19500 2.742000
H 18.37900 21.58700 3.577000
C 20.10300 21.67300 2.350000
O 20.78600 22.26500 1.513000
N 20.55800 20.55500 2.927000
H 20.07200 20.11800 3.686000
C 21.79700 19.92300 2.527000
H 22.55800 20.68900 2.504000
C 22.17900 18.80900 3.507000
H 21.42700 18.01600 3.405000
H 23.13400 18.39700 3.140000
C 22.26300 19.27700 4.986000
O 23.05600 20.18800 5.322000
O 21.52500 18.75500 5.855000
H 21.73715 19.49687 1.514662
H 18.77673 23.23495 3.094948
H 17.98479 22.13814 1.920400
--
0 1
C 18.05900 18.95300 5.945000
H 18.17800 18.65700 6.992000
O 19.08300 19.90900 5.610000
H 19.96600 19.54000 5.876000
H 17.07047 19.34799 5.667876
H 18.18777 18.06583 5.307547
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '6')] = qcdb.Molecule("""
-1 1
C 20.08800 16.66100 -0.398000
H 19.04500 16.60200 -0.082000
H 20.18300 16.24400 -1.387000
C 20.90200 15.70700 0.505000
O 21.91000 15.13700 0.031000
O 20.37700 15.22700 1.542000
H 20.29988 17.73389 -0.5163442
--
0 1
C 17.41000 17.97200 3.810000
C 17.48600 17.37200 2.527000
C 16.60600 17.86100 1.547000
C 15.70300 18.89200 1.854000
C 15.65400 19.47000 3.123000
C 16.51300 18.99700 4.107000
H 18.19800 16.55200 2.242000
H 16.61400 17.45600 0.523000
H 15.03200 19.27500 1.092000
H 14.96900 20.29200 3.327000
H 16.61088 19.36781 5.137980
H 18.01270 17.77884 4.709692
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '7')] = qcdb.Molecule("""
0 1
C 15.27800 30.38900 2.305000
O 14.37900 30.99200 1.712000
N 15.20400 29.08900 2.640000
H 15.90100 28.63400 3.249000
C 14.02400 28.31500 2.332000
H 13.65700 28.66700 1.375000
H 14.32500 27.27400 2.236000
C 12.93200 28.48000 3.398000
O 11.76000 28.20200 3.138000
N 13.27200 28.98600 4.593000
H 14.23600 29.23200 4.806000
H 12.53585 29.15009 5.393723
H 16.18758 30.94244 2.581356
--
0 1
C 14.25800 24.02900 5.093000
O 15.51000 24.53800 4.641000
C 13.12200 24.97500 4.578000
N 11.86200 24.24500 4.359000
C 11.78200 23.78100 2.948000
C 10.62700 24.87500 4.938000
H 15.42000 24.70300 3.667000
H 14.02700 23.02800 4.754000
H 12.93300 25.79400 5.257000
H 13.40100 25.40500 3.596000
H 10.49500 25.96800 4.694000
H 12.00900 24.60600 2.286000
H 12.53400 23.01100 2.841000
H 10.60902 24.83400 6.037089
H 9.792144 24.23767 4.611163
H 10.82891 23.34622 2.612456
H 14.36576 23.85731 6.174161
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '8')] = qcdb.Molecule("""
0 1
C 4.290000 24.10300 10.08600
O 4.354000 23.13400 10.84900
N 4.507000 24.01000 8.777000
H 4.471000 24.81400 8.166000
C 4.858000 22.75500 8.154000
H 4.799000 21.96700 8.885000
H 4.136000 22.58200 7.364000
C 6.276000 22.76600 7.611000
O 6.647000 23.71300 6.921000
N 7.066000 21.74000 7.938000
H 6.754000 21.01200 8.573000
H 4.054781 25.09671 10.49492
H 8.028304 21.54906 7.440493
--
0 1
C 6.218000 24.82200 2.171000
C 6.715000 23.76400 2.930000
C 5.918000 23.22600 3.934000
C 4.663000 23.78000 4.166000
C 4.226000 24.84700 3.389000
N 4.990000 25.38300 2.391000
H 6.798000 25.19700 1.318000
H 3.211000 25.24700 3.575000
H 4.018000 23.38100 4.943000
H 6.274000 22.37100 4.504000
H 7.673767 23.27955 2.693205
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '9')] = qcdb.Molecule("""
0 1
N 9.334000 19.62700 6.121000
H 10.08000 19.74000 6.781000
C 8.252000 20.39100 6.228000
O 7.244000 20.21700 5.545000
C 8.331000 21.48900 7.284000
H 8.671000 22.40300 6.814000
H 9.046000 21.19200 8.047000
N 7.066000 21.74000 7.938000
H 6.754000 21.01200 8.573000
O 6.647000 23.71300 6.921000
C 6.276000 22.76600 7.611000
H 9.495216 18.80266 5.410733
H 5.248769 22.75803 8.004361
--
0 1
C 10.62700 24.87500 4.938000
C 10.60200 24.81800 6.466000
O 10.95600 23.84000 7.103000
N 10.17800 25.94300 7.070000
C 10.09100 26.25600 8.476000
H 9.874000 26.68900 6.497000
H 10.49500 25.96800 4.694000
H 11.53119 24.41376 4.514093
H 9.792144 24.23767 4.611163
H 9.533425 25.51600 9.068883
H 9.572130 27.21869 8.594352
H 11.09040 26.32976 8.929603
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '10')] = qcdb.Molecule("""
0 1
C 18.71400 22.19500 2.742000
H 18.37900 21.58700 3.577000
N 18.79700 23.57100 3.209000
H 18.86300 24.28300 2.488000
C 18.84100 23.91000 4.508000
O 18.83700 23.07300 5.410000
C 18.95700 25.38900 4.894000
H 18.06200 25.68500 5.433000
H 19.81900 25.52500 5.542000
H 19.09001 26.05938 4.032084
H 19.70954 21.82087 2.461043
H 17.98479 22.13814 1.920400
--
0 1
C 16.65000 19.51600 5.550000
H 15.85600 19.11800 6.209000
H 17.38100 21.50400 5.804000
N 16.51500 20.96500 5.768000
C 15.41800 21.48800 6.353000
O 14.37000 20.87200 6.467000
C 15.57100 22.84700 6.986000
H 16.50000 23.29500 6.652000
H 14.74390 23.51764 6.710063
H 15.61789 22.72907 8.078654
H 17.63853 19.12101 5.827124
H 16.55212 19.14519 4.519021
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '11')] = qcdb.Molecule("""
0 1
C 14.75600 16.28200 8.071000
O 15.85100 16.84300 8.024000
N 13.59000 16.92100 8.167000
H 12.70700 16.41600 8.193000
C 13.49400 18.33500 8.443000
H 14.48600 18.75400 8.594000
H 13.03600 18.80900 7.582000
C 12.63300 18.57700 9.678000
O 12.60000 17.78900 10.62400
N 11.87400 19.66100 9.642000
H 11.86900 20.24000 8.807000
H 14.71385 15.18329 8.038301
H 11.09386 19.92029 10.37286
--
0 1
C 16.65000 19.51600 5.550000
N 16.51500 20.96500 5.768000
H 17.38100 21.50400 5.804000
H 15.85600 19.11800 6.209000
C 15.41800 21.48800 6.353000
O 14.37000 20.87200 6.467000
C 15.57100 22.84700 6.986000
H 16.50000 23.29500 6.652000
H 14.74390 23.51764 6.710063
H 15.61789 22.72907 8.078654
H 17.63853 19.12101 5.827124
H 16.55212 19.14519 4.519021
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '12')] = qcdb.Molecule("""
0 1
C 5.250000 26.25800 10.94600
H 5.928000 26.22800 10.10000
C 5.965000 25.62100 12.14400
H 5.388000 25.81100 13.04900
H 6.001000 24.53800 11.98600
C 7.381000 26.17300 12.29600
H 7.851000 25.68400 13.15200
H 7.950000 25.96300 11.38000
H 7.328000 27.25600 12.46000
H 4.327890 25.69684 10.73431
H 4.991234 27.31620 11.09851
--
0 1
C 10.60200 24.81800 6.466000
O 10.95600 23.84000 7.103000
N 10.17800 25.94300 7.070000
C 10.09100 26.25600 8.476000
C 9.372000 27.59000 8.640000
C 11.44600 26.35600 9.091000
C 9.333000 25.25000 9.282000
H 9.874000 26.68900 6.497000
H 9.908000 28.37100 8.093000
H 8.364000 27.46400 8.233000
H 9.317000 27.84600 9.706000
H 9.807000 24.28200 9.160000
H 9.371000 25.57400 10.32900
H 8.328000 25.26700 8.900000
H 11.28800 26.57600 10.14400
H 11.97000 27.14900 8.585000
H 11.93200 25.39300 8.957000
H 10.61998 24.85900 5.366911
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '13')] = qcdb.Molecule("""
0 1
C 16.05600 13.91300 3.701000
H 16.15900 14.95600 4.033000
C 17.23700 13.52900 2.786000
H 17.24200 14.19600 1.903000
H 18.17200 13.64500 3.338000
H 17.11900 12.49000 2.453000
C 14.73500 13.74900 2.932000
H 14.59700 12.70200 2.661000
H 13.89800 14.05900 3.553000
C 14.73600 14.57900 1.670000
H 13.78900 14.41800 1.168000
H 14.86200 15.62400 1.955000
H 15.57300 14.24500 1.050000
H 16.03595 13.25839 4.584791
--
0 1
C 16.51300 18.99700 4.107000
C 17.41000 17.97200 3.810000
C 17.48600 17.37200 2.527000
C 16.60600 17.86100 1.547000
C 15.70300 18.89200 1.854000
C 15.65400 19.47000 3.123000
H 14.96900 20.29200 3.327000
H 15.03200 19.27500 1.092000
H 16.61400 17.45600 0.523000
H 18.19800 16.55200 2.242000
H 18.01270 17.77884 4.709692
H 16.61088 19.36781 5.137980
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '14')] = qcdb.Molecule("""
0 1
C 10.37400 21.44300 10.31100
H 9.934000 21.48700 9.305000
C 9.280000 21.86400 11.28300
H 8.982000 22.89200 11.04700
H 8.438000 21.18400 11.17100
H 9.688000 21.81300 12.29800
C 11.56200 22.42400 10.37400
H 12.42800 21.94400 9.922000
H 11.32000 23.31700 9.801000
C 11.94400 22.85500 11.78700
H 12.79300 23.53700 11.72100
H 11.08500 23.36200 12.24600
H 12.21000 21.96700 12.37000
H 10.70966 20.41678 10.52123
--
0 1
C 10.60200 24.81800 6.466000
O 10.95600 23.84000 7.103000
N 10.17800 25.94300 7.070000
C 10.09100 26.25600 8.476000
C 9.372000 27.59000 8.640000
C 11.44600 26.35600 9.091000
C 9.333000 25.25000 9.282000
H 9.874000 26.68900 6.497000
H 9.908000 28.37100 8.093000
H 8.364000 27.46400 8.233000
H 9.317000 27.84600 9.706000
H 9.807000 24.28200 9.160000
H 9.371000 25.57400 10.32900
H 8.328000 25.26700 8.900000
H 11.28800 26.57600 10.14400
H 11.97000 27.14900 8.585000
H 11.93200 25.39300 8.957000
H 10.61998 24.85900 5.366911
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '15')] = qcdb.Molecule("""
0 1
C 12.74600 22.16800 -1.090000
H 12.81800 23.17800 -0.699000
H 11.91700 22.12800 -1.801000
C 12.43800 21.21300 0.067000
H 11.49600 21.52600 0.536000
H 12.33700 20.19000 -0.320000
H 13.25200 21.25600 0.799000
H 13.67406 21.92095 -1.626354
--
0 1
N 9.254000 23.85400 3.012000
C 11.78200 23.78100 2.948000
C 10.44700 23.17200 2.478000
N 11.86200 24.24500 4.359000
H 10.43300 22.11300 2.752000
H 10.40400 23.24700 1.396000
H 12.53400 23.01100 2.841000
H 12.00900 24.60600 2.286000
H 8.356916 23.29360 2.710019
H 9.400716 23.94588 4.098293
H 10.95781 24.70625 4.782907
H 12.80321 24.79031 4.522592
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '16')] = qcdb.Molecule("""
0 1
C 20.37500 27.13100 10.18700
H 20.77800 26.39300 10.88300
H 19.30700 27.23700 10.38300
C 20.54000 26.57700 8.773000
H 20.37800 27.33600 8.009000
C 21.95300 26.03000 8.616000
H 22.05800 25.62600 7.609000
H 22.66100 26.85500 8.771000
H 22.12100 25.24800 9.363000
C 19.49000 25.48000 8.621000
H 19.56600 25.05200 7.621000
H 19.65600 24.70700 9.381000
H 18.48800 25.92200 8.763000
H 20.85217 28.10683 10.36039
--
0 1
C 16.73700 21.75300 8.985000
C 18.06300 21.93100 8.570000
C 19.04400 21.01900 8.966000
C 18.68400 19.93600 9.775000
C 17.35900 19.76800 10.19300
C 16.38200 20.67900 9.796000
H 15.33000 20.56400 10.09500
H 17.07400 18.92500 10.82100
H 19.43700 19.21300 10.07200
H 20.08100 21.14800 8.627000
H 18.32800 22.76900 7.913000
C 15.63700 22.68100 8.524000
H 15.79700 23.65100 8.994000
H 14.68200 22.28100 8.905000
H 15.59011 22.79893 7.431346
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '17')] = qcdb.Molecule("""
0 1
C 5.679000 20.88000 0.749000
H 6.221000 21.83800 0.688000
H 4.631000 21.09600 0.905000
C 6.254000 19.97400 1.854000
H 6.621000 20.53900 2.695000
H 5.514000 19.25100 2.203000
H 7.080433 19.44257 1.359442
H 5.830258 20.31081 -0.1800558
--
0 1
C 6.715000 23.76400 2.930000
C 6.218000 24.82200 2.171000
N 4.990000 25.38300 2.391000
C 4.226000 24.84700 3.389000
C 4.663000 23.78000 4.166000
C 5.918000 23.22600 3.934000
C 8.039000 23.09500 2.603000
H 8.026000 22.11200 3.087000
H 8.096000 22.92700 1.519000
H 6.274000 22.37100 4.504000
H 4.018000 23.38100 4.943000
H 3.211000 25.24700 3.575000
H 6.798000 25.19700 1.318000
H 8.936083 23.65540 2.904981
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '18')] = qcdb.Molecule("""
0 1
C 11.54100 27.68600 13.69600
H 12.45900 27.15000 13.44600
C 10.79000 27.96500 12.40600
H 10.55700 27.01400 11.92400
H 9.879000 28.51400 12.64300
H 11.44300 28.56800 11.76200
H 10.90337 27.06487 14.34224
H 11.78789 28.62476 14.21347
--
0 1
C 10.60200 24.81800 6.466000
O 10.95600 23.84000 7.103000
N 10.17800 25.94300 7.070000
C 10.09100 26.25600 8.476000
C 9.372000 27.59000 8.640000
C 11.44600 26.35600 9.091000
C 9.333000 25.25000 9.282000
H 9.874000 26.68900 6.497000
H 9.908000 28.37100 8.093000
H 8.364000 27.46400 8.233000
H 9.317000 27.84600 9.706000
H 9.807000 24.28200 9.160000
H 9.371000 25.57400 10.32900
H 8.328000 25.26700 8.900000
H 11.28800 26.57600 10.14400
H 11.97000 27.14900 8.585000
H 11.93200 25.39300 8.957000
H 10.61998 24.85900 5.366911
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '19')] = qcdb.Molecule("""
0 1
C 18.95600 23.00600 13.42400
H 19.58200 22.11000 13.49500
C 17.99700 22.76900 12.25600
H 18.56900 22.61800 11.32900
H 17.41200 21.87100 12.48100
H 17.34100 23.63600 12.15300
C 19.86100 24.16500 13.07400
H 20.34500 23.96700 12.11700
H 19.24600 25.06600 13.01700
H 20.59800 24.25000 13.87300
H 18.44867 23.19359 14.38182
--
0 1
C 16.73700 21.75300 8.985000
C 18.06300 21.93100 8.570000
C 19.04400 21.01900 8.966000
C 18.68400 19.93600 9.775000
C 17.35900 19.76800 10.19300
C 16.38200 20.67900 9.796000
H 15.33000 20.56400 10.09500
H 17.07400 18.92500 10.82100
H 19.43700 19.21300 10.07200
H 20.08100 21.14800 8.627000
H 18.32800 22.76900 7.913000
H 15.93631 22.42849 8.649437
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '20')] = qcdb.Molecule("""
0 1
C 13.79100 17.02500 -2.243000
H 12.90600 17.67000 -2.286000
C 13.29600 15.57900 -2.178000
H 12.68500 15.45200 -1.281000
H 12.69000 15.37300 -3.075000
H 14.15900 14.91000 -2.152000
C 14.52700 17.45100 -0.990000
H 13.87200 17.32300 -0.127000
H 15.43100 16.85400 -0.884000
H 14.78900 18.51600 -1.107000
H 14.41225 17.12759 -3.144954
--
0 1
C 16.51300 18.99700 4.107000
C 17.41000 17.97200 3.810000
C 17.48600 17.37200 2.527000
C 16.60600 17.86100 1.547000
C 15.70300 18.89200 1.854000
C 15.65400 19.47000 3.123000
H 14.96900 20.29200 3.327000
H 15.03200 19.27500 1.092000
H 16.61400 17.45600 0.523000
H 18.19800 16.55200 2.242000
H 18.01270 17.77884 4.709692
H 16.61088 19.36781 5.137980
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '21')] = qcdb.Molecule("""
-1 1
O 8.976000 28.18400 5.336000
H 9.797000 28.02600 4.860000
H 8.600000 28.95300 4.860000
C 7.707000 31.02100 3.964000
O 8.101000 31.66300 2.963000
O 7.068000 29.95800 3.828000
C 8.014000 31.53300 5.387000
H 7.465000 32.46700 5.552000
H 7.660000 30.81200 6.145000
H 9.081843 31.72975 5.563073
--
0 1
C 10.62700 24.87500 4.938000
C 10.60200 24.81800 6.466000
O 10.95600 23.84000 7.103000
N 10.17800 25.94300 7.070000
C 10.09100 26.25600 8.476000
H 9.874000 26.68900 6.497000
H 10.49500 25.96800 4.694000
H 9.572130 27.21869 8.594352
H 9.533425 25.51600 9.068883
H 11.09040 26.32976 8.929603
H 11.53119 24.41376 4.514093
H 9.792144 24.23767 4.611163
units angstrom
""")
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1)
GEOS['%s-%s-monoB-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2)
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
#########################################################################
# <<< Supplementary Quantum Chemical Results >>>
DATA = {}
DATA['NUCLEAR REPULSION ENERGY'] = {}
DATA['NUCLEAR REPULSION ENERGY']['HSG-1-dimer' ] = 409.61526850
DATA['NUCLEAR REPULSION ENERGY']['HSG-1-monoA-unCP' ] = 13.33595232
DATA['NUCLEAR REPULSION ENERGY']['HSG-1-monoB-unCP' ] = 332.12261009
DATA['NUCLEAR REPULSION ENERGY']['HSG-2-dimer' ] = 693.84322132
DATA['NUCLEAR REPULSION ENERGY']['HSG-2-monoA-unCP' ] = 41.89071165
DATA['NUCLEAR REPULSION ENERGY']['HSG-2-monoB-unCP' ] = 501.75349414
DATA['NUCLEAR REPULSION ENERGY']['HSG-3-dimer' ] = 578.08454963
DATA['NUCLEAR REPULSION ENERGY']['HSG-3-monoA-unCP' ] = 194.80446994
DATA['NUCLEAR REPULSION ENERGY']['HSG-3-monoB-unCP' ] = 202.93507303
DATA['NUCLEAR REPULSION ENERGY']['HSG-4-dimer' ] = 536.02111700
DATA['NUCLEAR REPULSION ENERGY']['HSG-4-monoA-unCP' ] = 336.06029689
DATA['NUCLEAR REPULSION ENERGY']['HSG-4-monoB-unCP' ] = 40.09418196
DATA['NUCLEAR REPULSION ENERGY']['HSG-5-dimer' ] = 641.07583890
DATA['NUCLEAR REPULSION ENERGY']['HSG-5-monoA-unCP' ] = 440.48402439
DATA['NUCLEAR REPULSION ENERGY']['HSG-5-monoB-unCP' ] = 39.79355972
DATA['NUCLEAR REPULSION ENERGY']['HSG-6-dimer' ] = 440.32913479
DATA['NUCLEAR REPULSION ENERGY']['HSG-6-monoA-unCP' ] = 112.25425669
DATA['NUCLEAR REPULSION ENERGY']['HSG-6-monoB-unCP' ] = 202.38032057
DATA['NUCLEAR REPULSION ENERGY']['HSG-7-dimer' ] = 825.37483209
DATA['NUCLEAR REPULSION ENERGY']['HSG-7-monoA-unCP' ] = 302.68630925
DATA['NUCLEAR REPULSION ENERGY']['HSG-7-monoB-unCP' ] = 256.12378323
DATA['NUCLEAR REPULSION ENERGY']['HSG-8-dimer' ] = 721.36437027
DATA['NUCLEAR REPULSION ENERGY']['HSG-8-monoA-unCP' ] = 298.54657988
DATA['NUCLEAR REPULSION ENERGY']['HSG-8-monoB-unCP' ] = 204.68604075
DATA['NUCLEAR REPULSION ENERGY']['HSG-9-dimer' ] = 699.77856295
DATA['NUCLEAR REPULSION ENERGY']['HSG-9-monoA-unCP' ] = 298.58992071
DATA['NUCLEAR REPULSION ENERGY']['HSG-9-monoB-unCP' ] = 179.49546339
DATA['NUCLEAR REPULSION ENERGY']['HSG-10-dimer' ] = 538.20524151
DATA['NUCLEAR REPULSION ENERGY']['HSG-10-monoA-unCP' ] = 179.66798724
DATA['NUCLEAR REPULSION ENERGY']['HSG-10-monoB-unCP' ] = 180.34079666
DATA['NUCLEAR REPULSION ENERGY']['HSG-11-dimer' ] = 697.51311416
DATA['NUCLEAR REPULSION ENERGY']['HSG-11-monoA-unCP' ] = 296.89990217
DATA['NUCLEAR REPULSION ENERGY']['HSG-11-monoB-unCP' ] = 180.34079666
DATA['NUCLEAR REPULSION ENERGY']['HSG-12-dimer' ] = 553.87245309
DATA['NUCLEAR REPULSION ENERGY']['HSG-12-monoA-unCP' ] = 82.71734142
DATA['NUCLEAR REPULSION ENERGY']['HSG-12-monoB-unCP' ] = 332.12261009
DATA['NUCLEAR REPULSION ENERGY']['HSG-13-dimer' ] = 492.23285254
DATA['NUCLEAR REPULSION ENERGY']['HSG-13-monoA-unCP' ] = 134.28280330
DATA['NUCLEAR REPULSION ENERGY']['HSG-13-monoB-unCP' ] = 202.38032057
DATA['NUCLEAR REPULSION ENERGY']['HSG-14-dimer' ] = 670.02074299
DATA['NUCLEAR REPULSION ENERGY']['HSG-14-monoA-unCP' ] = 134.10189365
DATA['NUCLEAR REPULSION ENERGY']['HSG-14-monoB-unCP' ] = 332.12261009
DATA['NUCLEAR REPULSION ENERGY']['HSG-15-dimer' ] = 242.88545739
DATA['NUCLEAR REPULSION ENERGY']['HSG-15-monoA-unCP' ] = 42.22202660
DATA['NUCLEAR REPULSION ENERGY']['HSG-15-monoB-unCP' ] = 131.69625678
DATA['NUCLEAR REPULSION ENERGY']['HSG-16-dimer' ] = 551.59382982
DATA['NUCLEAR REPULSION ENERGY']['HSG-16-monoA-unCP' ] = 135.70381177
DATA['NUCLEAR REPULSION ENERGY']['HSG-16-monoB-unCP' ] = 269.04078448
DATA['NUCLEAR REPULSION ENERGY']['HSG-17-dimer' ] = 421.73710621
DATA['NUCLEAR REPULSION ENERGY']['HSG-17-monoA-unCP' ] = 42.20972067
DATA['NUCLEAR REPULSION ENERGY']['HSG-17-monoB-unCP' ] = 270.70970086
DATA['NUCLEAR REPULSION ENERGY']['HSG-18-dimer' ] = 474.74808030
DATA['NUCLEAR REPULSION ENERGY']['HSG-18-monoA-unCP' ] = 42.43370398
DATA['NUCLEAR REPULSION ENERGY']['HSG-18-monoB-unCP' ] = 332.12261009
DATA['NUCLEAR REPULSION ENERGY']['HSG-19-dimer' ] = 410.08888873
DATA['NUCLEAR REPULSION ENERGY']['HSG-19-monoA-unCP' ] = 83.35857717
DATA['NUCLEAR REPULSION ENERGY']['HSG-19-monoB-unCP' ] = 202.93507303
DATA['NUCLEAR REPULSION ENERGY']['HSG-20-dimer' ] = 392.20505391
DATA['NUCLEAR REPULSION ENERGY']['HSG-20-monoA-unCP' ] = 82.90559609
DATA['NUCLEAR REPULSION ENERGY']['HSG-20-monoB-unCP' ] = 202.38032057
DATA['NUCLEAR REPULSION ENERGY']['HSG-21-dimer' ] = 495.71409832
DATA['NUCLEAR REPULSION ENERGY']['HSG-21-monoA-unCP' ] = 169.11593456
DATA['NUCLEAR REPULSION ENERGY']['HSG-21-monoB-unCP' ] = 179.49546339
DATA['NUCLEAR REPULSION ENERGY']['HSG-1-monoA-CP' ] = 13.33595232
DATA['NUCLEAR REPULSION ENERGY']['HSG-1-monoB-CP' ] = 332.12261009
DATA['NUCLEAR REPULSION ENERGY']['HSG-2-monoA-CP' ] = 41.89071165
DATA['NUCLEAR REPULSION ENERGY']['HSG-2-monoB-CP' ] = 501.75349414
DATA['NUCLEAR REPULSION ENERGY']['HSG-3-monoA-CP' ] = 194.80446994
DATA['NUCLEAR REPULSION ENERGY']['HSG-3-monoB-CP' ] = 202.93507303
DATA['NUCLEAR REPULSION ENERGY']['HSG-4-monoA-CP' ] = 336.06029689
DATA['NUCLEAR REPULSION ENERGY']['HSG-4-monoB-CP' ] = 40.09418196
DATA['NUCLEAR REPULSION ENERGY']['HSG-5-monoA-CP' ] = 440.48402439
DATA['NUCLEAR REPULSION ENERGY']['HSG-5-monoB-CP' ] = 39.79355972
DATA['NUCLEAR REPULSION ENERGY']['HSG-6-monoA-CP' ] = 112.25425669
DATA['NUCLEAR REPULSION ENERGY']['HSG-6-monoB-CP' ] = 202.38032057
DATA['NUCLEAR REPULSION ENERGY']['HSG-7-monoA-CP' ] = 302.68630925
DATA['NUCLEAR REPULSION ENERGY']['HSG-7-monoB-CP' ] = 256.12378323
DATA['NUCLEAR REPULSION ENERGY']['HSG-8-monoA-CP' ] = 298.54657988
DATA['NUCLEAR REPULSION ENERGY']['HSG-8-monoB-CP' ] = 204.68604075
DATA['NUCLEAR REPULSION ENERGY']['HSG-9-monoA-CP' ] = 298.58992071
DATA['NUCLEAR REPULSION ENERGY']['HSG-9-monoB-CP' ] = 179.49546339
DATA['NUCLEAR REPULSION ENERGY']['HSG-10-monoA-CP' ] = 179.66798724
DATA['NUCLEAR REPULSION ENERGY']['HSG-10-monoB-CP' ] = 180.34079666
DATA['NUCLEAR REPULSION ENERGY']['HSG-11-monoA-CP' ] = 296.89990217
DATA['NUCLEAR REPULSION ENERGY']['HSG-11-monoB-CP' ] = 180.34079666
DATA['NUCLEAR REPULSION ENERGY']['HSG-12-monoA-CP' ] = 82.71734142
DATA['NUCLEAR REPULSION ENERGY']['HSG-12-monoB-CP' ] = 332.12261009
DATA['NUCLEAR REPULSION ENERGY']['HSG-13-monoA-CP' ] = 134.28280330
DATA['NUCLEAR REPULSION ENERGY']['HSG-13-monoB-CP' ] = 202.38032057
DATA['NUCLEAR REPULSION ENERGY']['HSG-14-monoA-CP' ] = 134.10189365
DATA['NUCLEAR REPULSION ENERGY']['HSG-14-monoB-CP' ] = 332.12261009
DATA['NUCLEAR REPULSION ENERGY']['HSG-15-monoA-CP' ] = 42.22202660
DATA['NUCLEAR REPULSION ENERGY']['HSG-15-monoB-CP' ] = 131.69625678
DATA['NUCLEAR REPULSION ENERGY']['HSG-16-monoA-CP' ] = 135.70381177
DATA['NUCLEAR REPULSION ENERGY']['HSG-16-monoB-CP' ] = 269.04078448
DATA['NUCLEAR REPULSION ENERGY']['HSG-17-monoA-CP' ] = 42.20972067
DATA['NUCLEAR REPULSION ENERGY']['HSG-17-monoB-CP' ] = 270.70970086
DATA['NUCLEAR REPULSION ENERGY']['HSG-18-monoA-CP' ] = 42.43370398
DATA['NUCLEAR REPULSION ENERGY']['HSG-18-monoB-CP' ] = 332.12261009
DATA['NUCLEAR REPULSION ENERGY']['HSG-19-monoA-CP' ] = 83.35857717
DATA['NUCLEAR REPULSION ENERGY']['HSG-19-monoB-CP' ] = 202.93507303
DATA['NUCLEAR REPULSION ENERGY']['HSG-20-monoA-CP' ] = 82.90559609
DATA['NUCLEAR REPULSION ENERGY']['HSG-20-monoB-CP' ] = 202.38032057
DATA['NUCLEAR REPULSION ENERGY']['HSG-21-monoA-CP' ] = 169.11593456
DATA['NUCLEAR REPULSION ENERGY']['HSG-21-monoB-CP' ] = 179.49546339
|
kratman/psi4public
|
psi4/share/psi4/databases/HSG.py
|
Python
|
gpl-2.0
| 46,440
|
[
"Psi4"
] |
685d372a4461809fd8be0ace4fc81626cdcffab30a63ce6e7a463bc771472654
|
# -*- coding: utf-8 -*-
import logging
import os
import subprocess
from galaxy.datatypes.data import get_file_peek, Text
from galaxy.datatypes.metadata import MetadataElement
log = logging.getLogger(__name__)
def count_special_lines(word, filename, invert=False):
"""
searching for special 'words' using the grep tool
grep is used to speed up the searching and counting
The number of hits is returned.
"""
try:
cmd = ["grep", "-c"]
if invert:
cmd.append('-v')
cmd.extend([word, filename])
out = subprocess.Popen(cmd, stdout=subprocess.PIPE)
return int(out.communicate()[0].split()[0])
except:
pass
return 0
class Stockholm_1_0(Text):
file_ext = "stockholm"
MetadataElement( name="number_of_alignments", default=0, desc="Number of multiple alignments", readonly=True, visible=True, optional=True, no_value=0 )
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
if (dataset.metadata.number_of_models == 1):
dataset.blurb = "1 alignment"
else:
dataset.blurb = "%s alignments" % dataset.metadata.number_of_models
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disc'
def sniff( self, filename ):
if count_special_lines('^#[[:space:]+]STOCKHOLM[[:space:]+]1.0', filename) > 0:
return True
else:
return False
def set_meta( self, dataset, **kwd ):
"""
Set the number of models in dataset.
"""
dataset.metadata.number_of_models = count_special_lines('^#[[:space:]+]STOCKHOLM[[:space:]+]1.0', dataset.file_name)
def split( cls, input_datasets, subdir_generator_function, split_params):
"""
Split the input files by model records.
"""
if split_params is None:
return None
if len(input_datasets) > 1:
raise Exception("STOCKHOLM-file splitting does not support multiple files")
input_files = [ds.file_name for ds in input_datasets]
chunk_size = None
if split_params['split_mode'] == 'number_of_parts':
raise Exception('Split mode "%s" is currently not implemented for STOCKHOLM-files.' % split_params['split_mode'])
elif split_params['split_mode'] == 'to_size':
chunk_size = int(split_params['split_size'])
else:
raise Exception('Unsupported split mode %s' % split_params['split_mode'])
def _read_stockholm_records( filename ):
lines = []
with open(filename) as handle:
for line in handle:
lines.append( line )
if line.strip() == '//':
yield lines
lines = []
def _write_part_stockholm_file( accumulated_lines ):
part_dir = subdir_generator_function()
part_path = os.path.join( part_dir, os.path.basename( input_files[0] ) )
part_file = open( part_path, 'w' )
part_file.writelines( accumulated_lines )
part_file.close()
try:
stockholm_records = _read_stockholm_records( input_files[0] )
stockholm_lines_accumulated = []
for counter, stockholm_record in enumerate( stockholm_records, start=1):
stockholm_lines_accumulated.extend( stockholm_record )
if counter % chunk_size == 0:
_write_part_stockholm_file( stockholm_lines_accumulated )
stockholm_lines_accumulated = []
if stockholm_lines_accumulated:
_write_part_stockholm_file( stockholm_lines_accumulated )
except Exception as e:
log.error('Unable to split files: %s' % str(e))
raise
split = classmethod(split)
|
yhoogstrate/tools-iuc
|
datatypes/msa/stockholm_1_0/stockholm_1_0.py
|
Python
|
mit
| 4,042
|
[
"Galaxy"
] |
5a6235c2635e2dde8d3f31869fd45f28e6c220e677c02acf7ae7ce773375fb24
|
from setuptools import setup
"""
Static comments plugin for Pelican.
"""
setup(
name='pelican_comments',
version='0.0.1',
long_description=__doc__,
py_modules=['pelican_comments'],
author='Brian St. Pierre',
author_email='brian@bstpierre.org',
classifiers=[
'License :: OSI Approved :: GNU Affero General Public License v3',
'Environment :: Plugins',
'Topic :: Internet :: WWW/HTTP',
],
)
|
bstpierre/pelican-comments
|
setup.py
|
Python
|
agpl-3.0
| 452
|
[
"Brian"
] |
5bbe5a2d83fa57c9b5159b400a8097312fe80d83d4e017b8bbf86262e218d4f5
|
# Version: 0.19
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/python-versioneer/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible with: Python 3.6, 3.7, 3.8, 3.9 and pypy3
* [![Latest Version][pypi-image]][pypi-url]
* [![Build Status][travis-image]][travis-url]
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere in your $PATH
* add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md))
* run `versioneer install` in your source tree, commit the results
* Verify version information with `python setup.py version`
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes).
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/python-versioneer/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other languages) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## Similar projects
* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time
dependency
* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of
versioneer
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg
[pypi-url]: https://pypi.python.org/pypi/versioneer/
[travis-image]:
https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg
[travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer
"""
import configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.ConfigParser()
with open(setup_cfg, "r") as f:
parser.read_file(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty=",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as f:
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty=",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
with open(".gitattributes", "r") as f:
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
except EnvironmentError:
pass
if not present:
with open(".gitattributes", "a+") as f:
f.write("%s export-subst\n" % versionfile_source)
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.19) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass(cmdclass=None):
"""Get the custom setuptools/distutils subclasses used by Versioneer.
If the package uses a different cmdclass (e.g. one from numpy), it
should be provide as an argument.
"""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/python-versioneer/python-versioneer/issues/52
cmds = {} if cmdclass is None else cmdclass.copy()
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if 'build_py' in cmds:
_build_py = cmds['build_py']
elif "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "setuptools" in sys.modules:
from setuptools.command.build_ext import build_ext as _build_ext
else:
from distutils.command.build_ext import build_ext as _build_ext
class cmd_build_ext(_build_ext):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_ext.run(self)
if self.inplace:
# build_ext --inplace will only build extensions in
# build/lib<..> dir with no _version.py to write to.
# As in place builds will already have a _version.py
# in the module dir, we do not need to write one.
return
# now locate _version.py in the new build/ directory and replace
# it with an updated value
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_ext"] = cmd_build_ext
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
from py2exe.distutils_buildexe import py2exe as _py2exe
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if 'sdist' in cmds:
_sdist = cmds['sdist']
elif "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
simongibbons/numpy
|
versioneer.py
|
Python
|
bsd-3-clause
| 70,185
|
[
"Brian"
] |
3a0cf64223b6dd955068c4fd2e7a0009e9235c4ba13f3c559d7957ac6a41eea9
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2020 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""Helpers for reading netcdf-based files."""
import logging
import dask.array as da
import netCDF4
import numpy as np
import xarray as xr
from satpy import CHUNK_SIZE
from satpy.readers.file_handlers import BaseFileHandler
from satpy.readers.utils import np2str
LOG = logging.getLogger(__name__)
class NetCDF4FileHandler(BaseFileHandler):
"""Small class for inspecting a NetCDF4 file and retrieving its metadata/header data.
File information can be accessed using bracket notation. Variables are
accessed by using:
wrapper["var_name"]
Or:
wrapper["group/subgroup/var_name"]
Attributes can be accessed by appending "/attr/attr_name" to the
item string:
wrapper["group/subgroup/var_name/attr/units"]
Or for global attributes:
wrapper["/attr/platform_short_name"]
Or for all of global attributes:
wrapper["/attrs"]
Note that loading datasets requires reopening the original file
(unless those datasets are cached, see below), but to get just the
shape of the dataset append "/shape" to the item string:
wrapper["group/subgroup/var_name/shape"]
If your file has many small data variables that are frequently accessed,
you may choose to cache some of them. You can do this by passing a number,
any variable smaller than this number in bytes will be read into RAM.
Warning, this part of the API is provisional and subject to change.
You may get an additional speedup by passing ``cache_handle=True``. This
will keep the netCDF4 dataset handles open throughout the lifetime of the
object, and instead of using `xarray.open_dataset` to open every data
variable, a dask array will be created "manually". This may be useful if
you have a dataset distributed over many files, such as for FCI. Note
that the coordinates will be missing in this case. If you use this option,
``xarray_kwargs`` will have no effect.
Args:
filename (str): File to read
filename_info (dict): Dictionary with filename information
filetype_info (dict): Dictionary with filetype information
auto_maskandscale (bool): Apply mask and scale factors
xarray_kwargs (dict): Addition arguments to `xarray.open_dataset`
cache_var_size (int): Cache variables smaller than this size.
cache_handle (bool): Keep files open for lifetime of filehandler.
"""
file_handle = None
def __init__(self, filename, filename_info, filetype_info,
auto_maskandscale=False, xarray_kwargs=None,
cache_var_size=0, cache_handle=False):
"""Initialize object."""
super(NetCDF4FileHandler, self).__init__(
filename, filename_info, filetype_info)
self.file_content = {}
self.cached_file_content = {}
try:
file_handle = netCDF4.Dataset(self.filename, 'r')
except IOError:
LOG.exception(
'Failed reading file %s. Possibly corrupted file', self.filename)
raise
self.auto_maskandscale = auto_maskandscale
if hasattr(file_handle, "set_auto_maskandscale"):
file_handle.set_auto_maskandscale(auto_maskandscale)
self.collect_metadata("", file_handle)
self.collect_dimensions("", file_handle)
if cache_var_size > 0:
self.collect_cache_vars(
[varname for (varname, var)
in self.file_content.items()
if isinstance(var, netCDF4.Variable)
and isinstance(var.dtype, np.dtype) # vlen may be str
and var.size * var.dtype.itemsize < cache_var_size],
file_handle)
if cache_handle:
self.file_handle = file_handle
else:
file_handle.close()
self._xarray_kwargs = xarray_kwargs or {}
self._xarray_kwargs.setdefault('chunks', CHUNK_SIZE)
self._xarray_kwargs.setdefault('mask_and_scale', self.auto_maskandscale)
def __del__(self):
"""Delete the file handler."""
if self.file_handle is not None:
try:
self.file_handle.close()
except RuntimeError: # presumably closed already
pass
def _collect_global_attrs(self, obj):
"""Collect all the global attributes for the provided file object."""
global_attrs = {}
for key in obj.ncattrs():
fc_key = f"/attr/{key}"
value = self._get_attr_value(obj, key)
self.file_content[fc_key] = global_attrs[key] = value
self.file_content["/attrs"] = global_attrs
def _collect_attrs(self, name, obj):
"""Collect all the attributes for the provided file object."""
for key in obj.ncattrs():
fc_key = f"{name}/attr/{key}"
value = self._get_attr_value(obj, key)
self.file_content[fc_key] = value
def _get_attr_value(self, obj, key):
value = getattr(obj, key)
try:
value = np2str(value)
except ValueError:
pass
return value
def collect_metadata(self, name, obj):
"""Collect all file variables and attributes for the provided file object.
This method also iterates through subgroups of the provided object.
"""
# Look through each subgroup
base_name = name + "/" if name else ""
self._collect_groups_info(base_name, obj)
self._collect_variables_info(base_name, obj)
if not name:
self._collect_global_attrs(obj)
else:
self._collect_attrs(name, obj)
def _collect_groups_info(self, base_name, obj):
for group_name, group_obj in obj.groups.items():
full_group_name = base_name + group_name
self.file_content[full_group_name] = group_obj
self._collect_attrs(full_group_name, group_obj)
self.collect_metadata(full_group_name, group_obj)
def _collect_variables_info(self, base_name, obj):
for var_name, var_obj in obj.variables.items():
var_name = base_name + var_name
self.file_content[var_name] = var_obj
self.file_content[var_name + "/dtype"] = var_obj.dtype
self.file_content[var_name + "/shape"] = var_obj.shape
self.file_content[var_name + "/dimensions"] = var_obj.dimensions
self._collect_attrs(var_name, var_obj)
def collect_dimensions(self, name, obj):
"""Collect dimensions."""
for dim_name, dim_obj in obj.dimensions.items():
dim_name = "{}/dimension/{}".format(name, dim_name)
self.file_content[dim_name] = len(dim_obj)
def collect_cache_vars(self, cache_vars, obj):
"""Collect data variables for caching.
This method will collect some data variables and store them in RAM.
This may be useful if some small variables are frequently accessed,
to prevent needlessly frequently opening and closing the file, which
in case of xarray is associated with some overhead.
Should be called later than `collect_metadata`.
Args:
cache_vars (List[str]): Names of data variables to be cached.
obj (netCDF4.Dataset): Dataset object from which to read them.
"""
for var_name in cache_vars:
v = self.file_content[var_name]
self.cached_file_content[var_name] = xr.DataArray(
v[:], dims=v.dimensions, attrs=v.__dict__, name=v.name)
def __getitem__(self, key):
"""Get item for given key."""
val = self.file_content[key]
if isinstance(val, netCDF4.Variable):
return self._get_variable(key, val)
if isinstance(val, netCDF4.Group):
return self._get_group(key, val)
return val
def _get_variable(self, key, val):
"""Get a variable from the netcdf file."""
if key in self.cached_file_content:
return self.cached_file_content[key]
# these datasets are closed and inaccessible when the file is
# closed, need to reopen
# TODO: Handle HDF4 versus NetCDF3 versus NetCDF4
parts = key.rsplit('/', 1)
if len(parts) == 2:
group, key = parts
else:
group = None
if self.file_handle is not None:
val = self._get_var_from_filehandle(group, key)
else:
val = self._get_var_from_xr(group, key)
return val
def _get_group(self, key, val):
"""Get a group from the netcdf file."""
# Full groups are conveniently read with xr even if file_handle is available
with xr.open_dataset(self.filename, group=key,
**self._xarray_kwargs) as nc:
val = nc
return val
def _get_var_from_xr(self, group, key):
with xr.open_dataset(self.filename, group=group,
**self._xarray_kwargs) as nc:
val = nc[key]
# Even though `chunks` is specified in the kwargs, xarray
# uses dask.arrays only for data variables that have at least
# one dimension; for zero-dimensional data variables (scalar),
# it uses its own lazy loading for scalars. When those are
# accessed after file closure, xarray reopens the file without
# closing it again. This will leave potentially many open file
# objects (which may in turn trigger a Segmentation Fault:
# https://github.com/pydata/xarray/issues/2954#issuecomment-491221266
if not val.chunks:
val.load()
return val
def _get_var_from_filehandle(self, group, key):
# Not getting coordinates as this is more work, therefore more
# overhead, and those are not used downstream.
if group is None:
g = self.file_handle
else:
g = self.file_handle[group]
v = g[key]
x = xr.DataArray(
da.from_array(v), dims=v.dimensions, attrs=v.__dict__,
name=v.name)
return x
def __contains__(self, item):
"""Get item from file content."""
return item in self.file_content
def get(self, item, default=None):
"""Get item."""
if item in self:
return self[item]
else:
return default
|
pytroll/satpy
|
satpy/readers/netcdf_utils.py
|
Python
|
gpl-3.0
| 11,229
|
[
"NetCDF"
] |
461a561463f8be3154d4e86b09923ef2eaa2f0650929234fda2c88007c67b8a9
|
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SearchIO parser for BLAST+ tab output format, with or without comments."""
import re
from Bio._py3k import _as_bytes, _bytes_to_string
from Bio._py3k import basestring
from Bio.SearchIO._index import SearchIndexer
from Bio.SearchIO._model import QueryResult, Hit, HSP, HSPFragment
__all__ = ['BlastTabIndexer', 'BlastTabParser', 'BlastTabWriter']
__docformat__ = "restructuredtext en"
# longname-shortname map
# maps the column names shown in a commented output to its short name
# (the one used in the command line)
_LONG_SHORT_MAP = {
'query id': 'qseqid',
'query acc.': 'qacc',
'query acc.ver': 'qaccver',
'query length': 'qlen',
'subject id': 'sseqid',
'subject acc.': 'sacc',
'subject acc.ver': 'saccver',
'subject length': 'slen',
'alignment length': 'length',
'bit score': 'bitscore',
'score': 'score',
'evalue': 'evalue',
'identical': 'nident',
'% identity': 'pident',
'positives': 'positive',
'% positives': 'ppos',
'mismatches': 'mismatch',
'gaps': 'gaps',
'q. start': 'qstart',
'q. end': 'qend',
's. start': 'sstart',
's. end': 'send',
'query frame': 'qframe',
'sbjct frame': 'sframe',
'query/sbjct frames': 'frames',
'query seq': 'qseq',
'subject seq': 'sseq',
'gap opens': 'gapopen',
'query gi': 'qgi',
'subject ids': 'sallseqid',
'subject gi': 'sgi',
'subject gis': 'sallgi',
'BTOP': 'btop',
'subject accs.': 'sallacc',
'subject tax ids': 'staxids',
'subject sci names': 'sscinames',
'subject com names': 'scomnames',
'subject blast names': 'sblastnames',
'subject super kingdoms': 'sskingdoms',
'subject title': 'stitle',
'subject titles': 'salltitles',
'subject strand': 'sstrand',
'% subject coverage': 'qcovs',
'% hsp coverage': 'qcovhsp',
}
# function to create a list from semicolon-delimited string
# used in BlastTabParser._parse_result_row
_list_semicol = lambda x: x.split(';')
_list_diamond = lambda x: x.split('<>')
# column to class attribute map
_COLUMN_QRESULT = {
'qseqid': ('id', str),
'qacc': ('accession', str),
'qaccver': ('accession_version', str),
'qlen': ('seq_len', int),
'qgi': ('gi', str),
}
_COLUMN_HIT = {
'sseqid': ('id', str),
'sallseqid': ('id_all', _list_semicol),
'sacc': ('accession', str),
'saccver': ('accession_version', str),
'sallacc': ('accession_all', _list_semicol),
'sgi': ('gi', str),
'sallgi': ('gi_all', str),
'slen': ('seq_len', int),
'staxids': ('tax_ids', _list_semicol),
'sscinames': ('sci_names', _list_semicol),
'scomnames': ('com_names', _list_semicol),
'sblastnames': ('blast_names', _list_semicol),
'sskingdoms': ('super_kingdoms', _list_semicol),
'stitle': ('title', str),
'salltitles': ('title_all', _list_diamond),
# set strand as HSP property?
'sstrand': ('strand', str),
'qcovs': ('query_coverage', float),
}
_COLUMN_HSP = {
'bitscore': ('bitscore', float),
'score': ('bitscore_raw', int),
'evalue': ('evalue', float),
'nident': ('ident_num', int),
'pident': ('ident_pct', float),
'positive': ('pos_num', int),
'ppos': ('pos_pct', float),
'mismatch': ('mismatch_num', int),
'gaps': ('gap_num', int),
'gapopen': ('gapopen_num', int),
'btop': ('btop', str),
'qcovhsp': ('query_coverage', float),
}
_COLUMN_FRAG = {
'length': ('aln_span', int),
'qstart': ('query_start', int),
'qend': ('query_end', int),
'sstart': ('hit_start', int),
'send': ('hit_end', int),
'qframe': ('query_frame', int),
'sframe': ('hit_frame', int),
'frames': ('frames', str),
'qseq': ('query', str),
'sseq': ('hit', str),
}
_SUPPORTED_FIELDS = set(list(_COLUMN_QRESULT) + list(_COLUMN_HIT) +
list(_COLUMN_HSP) + list(_COLUMN_FRAG))
# column order in the non-commented tabular output variant
# values must be keys inside the column-attribute maps above
_DEFAULT_FIELDS = ['qseqid', 'sseqid', 'pident', 'length', 'mismatch',
'gapopen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore']
# one field from each of the following sets must exist in order for the
# parser to work
_MIN_QUERY_FIELDS = set(['qseqid', 'qacc', 'qaccver'])
_MIN_HIT_FIELDS = set(['sseqid', 'sacc', 'saccver', 'sallseqid'])
# simple function to create BLAST HSP attributes that may be computed if
# other certain attributes are present
# This was previously implemented in the HSP objects in the old model
_RE_GAPOPEN = re.compile(r'\w-')
def _compute_gapopen_num(hsp):
"""Returns the number of gap openings in the given HSP."""
gapopen = 0
for seq_type in ('query', 'hit'):
seq = str(getattr(hsp, seq_type).seq)
gapopen += len(re.findall(_RE_GAPOPEN, seq))
return gapopen
def _augment_blast_hsp(hsp, attr):
"""Calculates the given HSP attribute, for writing."""
if attr == 'aln_span':
# aln_span is number of identical matches + mismatches + gaps
func = lambda hsp: hsp.ident_num + hsp.mismatch_num + hsp.gap_num
# ident and gap will require the num values to be computed first
elif attr.startswith('ident'):
func = lambda hsp: hsp.aln_span - hsp.mismatch_num - hsp.gap_num
elif attr.startswith('gap'):
func = lambda hsp: hsp.aln_span - hsp.ident_num - hsp.mismatch_num
elif attr == 'mismatch_num':
func = lambda hsp: hsp.aln_span - hsp.ident_num - hsp.gap_num
elif attr == 'gapopen_num':
if not hasattr(hsp, 'query') or not hasattr(hsp, 'hit'):
# mock function so that the except clause below is triggered
# as both the query and hit are required to compute gapopen
def mock(hsp):
raise AttributeError
func = mock
else:
func = _compute_gapopen_num
# set the num values
# requires the endswith check, since we only want to set 'num' or 'span'
# attributes here
if not hasattr(hsp, attr) and not attr.endswith('_pct'):
value = func(hsp)
setattr(hsp, attr, value)
# if the attr is a percent value, calculate it
if attr == 'ident_pct':
func2 = lambda hsp: hsp.ident_num / float(hsp.aln_span) * 100
elif attr == 'pos_pct':
func = lambda hsp: hsp.pos_num / float(hsp.aln_span) * 100
elif attr == 'gap_pct':
func2 = lambda hsp: hsp.gap_num / float(hsp.aln_span) * 100
else:
func2 = None
# set the pct values
if func2 is not None:
value = func2(hsp)
setattr(hsp, attr, value)
class BlastTabParser(object):
"""Parser for the BLAST tabular format."""
def __init__(self, handle, comments=False, fields=_DEFAULT_FIELDS):
self.handle = handle
self.has_comments = comments
self.fields = self._prep_fields(fields)
self.line = self.handle.readline().strip()
def __iter__(self):
# stop iteration if file has no lines
if not self.line:
raise StopIteration
# determine which iterator to use
elif self.has_comments:
iterfunc = self._parse_commented_qresult
else:
iterfunc = self._parse_qresult
for qresult in iterfunc():
yield qresult
def _prep_fields(self, fields):
"""Validates and formats the given fields for use by the parser."""
# cast into list if fields is a space-separated string
if isinstance(fields, basestring):
fields = fields.strip().split(' ')
# blast allows 'std' as a proxy for the standard default lists
# we want to transform 'std' to its proper column names
if 'std' in fields:
idx = fields.index('std')
fields = fields[:idx] + _DEFAULT_FIELDS + fields[idx + 1:]
# if set(fields) has a null intersection with minimum required
# fields for hit and query, raise an exception
if not set(fields).intersection(_MIN_QUERY_FIELDS) or \
not set(fields).intersection(_MIN_HIT_FIELDS):
raise ValueError("Required query and/or hit ID field not found.")
return fields
def _parse_commented_qresult(self):
"""Iterator returning `QueryResult` objects from a commented file."""
while True:
comments = self._parse_comments()
if comments:
try:
self.fields = comments['fields']
# iterator for the query results
qres_iter = self._parse_qresult()
except KeyError:
# no fields means the query has no results
assert 'fields' not in comments
# create an iterator returning one empty qresult
# if the query has no results
qres_iter = iter([QueryResult()])
for qresult in qres_iter:
for key, value in comments.items():
setattr(qresult, key, value)
yield qresult
else:
break
def _parse_comments(self):
"""Returns a dictionary containing tab file comments."""
comments = {}
while True:
# parse program and version
# example: # BLASTX 2.2.26+
if 'BLAST' in self.line and 'processed' not in self.line:
program_line = self.line[len(' #'):].split(' ')
comments['program'] = program_line[0].lower()
comments['version'] = program_line[1]
# parse query id and description (if available)
# example: # Query: gi|356995852 Mus musculus POU domain
elif 'Query' in self.line:
query_line = self.line[len('# Query: '):].split(' ', 1)
comments['id'] = query_line[0]
if len(query_line) == 2:
comments['description'] = query_line[1]
# parse target database
# example: # Database: db/minirefseq_protein
elif 'Database' in self.line:
comments['target'] = self.line[len('# Database: '):]
# parse RID (from remote searches)
elif 'RID' in self.line:
comments['rid'] = self.line[len('# RID: '):]
# parse column order, required for parsing the result lines
# example: # Fields: query id, query gi, query acc., query length
elif 'Fields' in self.line:
comments['fields'] = self._parse_fields_line()
# if the line has these strings, it's either the end of a comment
# or the end of a file, so we return all the comments we've parsed
elif ' hits found' in self.line or 'processed' in self.line:
self.line = self.handle.readline().strip()
return comments
self.line = self.handle.readline()
if not self.line:
return comments
else:
self.line = self.line.strip()
def _parse_fields_line(self):
"""Returns a list of column short names from the 'Fields'
comment line."""
raw_field_str = self.line[len('# Fields: '):]
long_fields = raw_field_str.split(', ')
fields = [_LONG_SHORT_MAP[long_name] for long_name in long_fields]
return self._prep_fields(fields)
def _parse_result_row(self):
"""Returns a dictionary of parsed row values."""
fields = self.fields
columns = self.line.strip().split('\t')
assert len(fields) == len(columns), "Expected %i columns, found: " \
"%i" % (len(fields), len(columns))
qresult, hit, hsp, frag = {}, {}, {}, {}
for idx, value in enumerate(columns):
sname = fields[idx]
# flag to check if any of the _COLUMNs contain sname
in_mapping = False
# iterate over each dict, mapping pair to determine
# attribute name and value of each column
for parsed_dict, mapping in (
(qresult, _COLUMN_QRESULT),
(hit, _COLUMN_HIT),
(hsp, _COLUMN_HSP),
(frag, _COLUMN_FRAG)):
# process parsed value according to mapping
if sname in mapping:
attr_name, caster = mapping[sname]
if caster is not str:
value = caster(value)
parsed_dict[attr_name] = value
in_mapping = True
# make sure that any unhandled field is not supported
if not in_mapping:
assert sname not in _SUPPORTED_FIELDS
return {'qresult': qresult, 'hit': hit, 'hsp': hsp, 'frag': frag}
def _get_id(self, parsed):
"""Returns the value used for a QueryResult or Hit ID from a parsed row."""
# use 'id', with 'id_all', 'accession' and 'accession_version'
# fallbacks one of these must have a value since we've checked whether
# they exist or not when parsing the comments
id_cache = parsed.get('id')
if id_cache is None and 'id_all' in parsed:
id_cache = parsed.get('id_all')[0]
if id_cache is None:
id_cache = parsed.get('accession')
if id_cache is None:
id_cache = parsed.get('accession_version')
return id_cache
def _parse_qresult(self):
"""Generator function that returns QueryResult objects."""
# state values, used to determine what to do with each line
state_EOF = 0
state_QRES_NEW = 1
state_QRES_SAME = 3
state_HIT_NEW = 2
state_HIT_SAME = 4
# dummies for initial states
qres_state = None
hit_state = None
file_state = None
cur_qid = None
cur_hid = None
# dummies for initial id caches
prev_qid = None
prev_hid = None
# dummies for initial parsed value containers
cur, prev = None, None
hit_list, hsp_list = [], []
while True:
# store previous line's parsed values if we've past the first line
if cur is not None:
prev = cur
prev_qid = cur_qid
prev_hid = cur_hid
# only parse the line if it's not EOF or not a comment line
if self.line and not self.line.startswith('#'):
cur = self._parse_result_row()
cur_qid = self._get_id(cur['qresult'])
cur_hid = self._get_id(cur['hit'])
else:
file_state = state_EOF
# mock values for cur_qid and cur_hid since the line is empty
cur_qid, cur_hid = None, None
# get the state of hit and qresult
if prev_qid != cur_qid:
qres_state = state_QRES_NEW
else:
qres_state = state_QRES_SAME
# new hits are hits with different id or hits in a new qresult
if prev_hid != cur_hid or qres_state == state_QRES_NEW:
hit_state = state_HIT_NEW
else:
hit_state = state_HIT_SAME
# we're creating objects for the previously parsed line(s),
# so nothing is done in the first parsed line (prev == None)
if prev is not None:
# every line is essentially an HSP with one fragment, so we
# create both of these for every line
frag = HSPFragment(prev_hid, prev_qid)
for attr, value in prev['frag'].items():
# adjust coordinates to Python range
# NOTE: this requires both start and end coords to be
# present, otherwise a KeyError will be raised.
# Without this limitation, we might misleadingly set the
# start / end coords
for seq_type in ('query', 'hit'):
if attr == seq_type + '_start':
value = min(value,
prev['frag'][seq_type + '_end']) - 1
elif attr == seq_type + '_end':
value = max(value,
prev['frag'][seq_type + '_start'])
setattr(frag, attr, value)
# strand and frame setattr require the full parsed values
# to be set first
for seq_type in ('hit', 'query'):
# try to set hit and query frame
frame = self._get_frag_frame(frag, seq_type,
prev['frag'])
setattr(frag, '%s_frame' % seq_type, frame)
# try to set hit and query strand
strand = self._get_frag_strand(frag, seq_type,
prev['frag'])
setattr(frag, '%s_strand' % seq_type, strand)
hsp = HSP([frag])
for attr, value in prev['hsp'].items():
setattr(hsp, attr, value)
hsp_list.append(hsp)
# create hit and append to temp hit container if hit_state
# says we're not at the same hit or at a new query
if hit_state == state_HIT_NEW:
hit = Hit(hsp_list)
for attr, value in prev['hit'].items():
if attr != 'id_all':
setattr(hit, attr, value)
else:
# not setting hit ID since it's already set from the
# prev_hid above
setattr(hit, '_id_alt', value[1:])
hit_list.append(hit)
hsp_list = []
# create qresult and yield if we're at a new qresult or EOF
if qres_state == state_QRES_NEW or file_state == state_EOF:
qresult = QueryResult(hit_list, prev_qid)
for attr, value in prev['qresult'].items():
setattr(qresult, attr, value)
yield qresult
# if current line is EOF, break
if file_state == state_EOF:
break
hit_list = []
self.line = self.handle.readline().strip()
def _get_frag_frame(self, frag, seq_type, parsedict):
"""Returns `HSPFragment` frame given the object, its sequence type,
and its parsed dictionary values."""
assert seq_type in ('query', 'hit')
frame = getattr(frag, '%s_frame' % seq_type, None)
if frame is not None:
return frame
else:
if 'frames' in parsedict:
# frames is 'x1/x2' string, x1 is query frame, x2 is hit frame
idx = 0 if seq_type == 'query' else 1
return int(parsedict['frames'].split('/')[idx])
# else implicit None return
def _get_frag_strand(self, frag, seq_type, parsedict):
"""Returns `HSPFragment` strand given the object, its sequence type,
and its parsed dictionary values."""
# NOTE: this will never set the strands as 0 for protein
# queries / hits, since we can't detect the blast flavors
# from the columns alone.
assert seq_type in ('query', 'hit')
strand = getattr(frag, '%s_strand' % seq_type, None)
if strand is not None:
return strand
else:
# using parsedict instead of the fragment object since
# we need the unadjusted coordinated values
start = parsedict.get('%s_start' % seq_type)
end = parsedict.get('%s_end' % seq_type)
if start is not None and end is not None:
return 1 if start <= end else -1
# else implicit None return
class BlastTabIndexer(SearchIndexer):
"""Indexer class for BLAST+ tab output."""
_parser = BlastTabParser
def __init__(self, filename, comments=False, fields=_DEFAULT_FIELDS):
SearchIndexer.__init__(self, filename, comments=comments, fields=fields)
# if the file doesn't have comments,
# get index of column used as the key (qseqid / qacc / qaccver)
if not self._kwargs['comments']:
if 'qseqid' in fields:
self._key_idx = fields.index('qseqid')
elif 'qacc' in fields:
self._key_idx = fields.index('qacc')
elif 'qaccver' in fields:
self._key_idx = fields.index('qaccver')
else:
raise ValueError("Custom fields is missing an ID column. "
"One of these must be present: 'qseqid', 'qacc', or 'qaccver'.")
def __iter__(self):
"""Iterates over the file handle; yields key, start offset, and length."""
handle = self._handle
handle.seek(0)
if not self._kwargs['comments']:
iterfunc = self._qresult_index
else:
iterfunc = self._qresult_index_commented
for key, offset, length in iterfunc():
yield _bytes_to_string(key), offset, length
def _qresult_index_commented(self):
"""Indexer for commented BLAST tabular files."""
handle = self._handle
handle.seek(0)
start_offset = 0
# mark of a new query
query_mark = None
# mark of the query's ID
qid_mark = _as_bytes('# Query: ')
# mark of the last line
end_mark = _as_bytes('# BLAST processed')
while True:
end_offset = handle.tell()
line = handle.readline()
if query_mark is None:
query_mark = line
start_offset = end_offset
elif line.startswith(qid_mark):
qresult_key = line[len(qid_mark):].split()[0]
elif line == query_mark or line.startswith(end_mark):
yield qresult_key, start_offset, end_offset - start_offset
start_offset = end_offset
elif not line:
break
def _qresult_index(self):
"""Indexer for noncommented BLAST tabular files."""
handle = self._handle
handle.seek(0)
start_offset = 0
qresult_key = None
key_idx = self._key_idx
tab_char = _as_bytes('\t')
while True:
# get end offset here since we only know a qresult ends after
# encountering the next one
end_offset = handle.tell()
# line = handle.readline()
line = handle.readline()
if qresult_key is None:
qresult_key = line.split(tab_char)[key_idx]
else:
try:
curr_key = line.split(tab_char)[key_idx]
except IndexError:
curr_key = _as_bytes('')
if curr_key != qresult_key:
yield qresult_key, start_offset, end_offset - start_offset
qresult_key = curr_key
start_offset = end_offset
# break if we've reached EOF
if not line:
break
def get_raw(self, offset):
"""Returns the raw string of a QueryResult object from the given offset."""
if self._kwargs['comments']:
getfunc = self._get_raw_qresult_commented
else:
getfunc = self._get_raw_qresult
return getfunc(offset)
def _get_raw_qresult(self, offset):
"""Returns the raw string of a single QueryResult from a noncommented file."""
handle = self._handle
handle.seek(offset)
qresult_raw = _as_bytes('')
tab_char = _as_bytes('\t')
key_idx = self._key_idx
qresult_key = None
while True:
line = handle.readline()
# get the key if the first line (qresult key)
if qresult_key is None:
qresult_key = line.split(tab_char)[key_idx]
else:
try:
curr_key = line.split(tab_char)[key_idx]
except IndexError:
curr_key = _as_bytes('')
# only break when qresult is finished (key is different)
if curr_key != qresult_key:
break
# append to the raw string as long as qresult is the same
qresult_raw += line
return qresult_raw
def _get_raw_qresult_commented(self, offset):
"""Returns the raw string of a single QueryResult from a commented file."""
handle = self._handle
handle.seek(offset)
qresult_raw = _as_bytes('')
end_mark = _as_bytes('# BLAST processed')
# query mark is the line marking a new query
# something like '# TBLASTN 2.2.25+'
query_mark = None
line = handle.readline()
while line:
# since query_mark depends on the BLAST search, we need to obtain it
# first
if query_mark is None:
query_mark = line
# break when we've reached the next qresult or the search ends
elif line == query_mark or line.startswith(end_mark):
break
qresult_raw += line
line = handle.readline()
return qresult_raw
class BlastTabWriter(object):
"""Writer for blast-tab output format."""
def __init__(self, handle, comments=False, fields=_DEFAULT_FIELDS):
self.handle = handle
self.has_comments = comments
self.fields = fields
def write_file(self, qresults):
"""Writes to the handle, returns how many QueryResult objects are written."""
handle = self.handle
qresult_counter, hit_counter, hsp_counter, frag_counter = 0, 0, 0, 0
for qresult in qresults:
if self.has_comments:
handle.write(self._build_comments(qresult))
if qresult:
handle.write(self._build_rows(qresult))
if not self.has_comments:
qresult_counter += 1
hit_counter += len(qresult)
hsp_counter += sum(len(hit) for hit in qresult)
frag_counter += sum(len(hit.fragments) for hit in qresult)
# if it's commented and there are no hits in the qresult, we still
# increment the counter
if self.has_comments:
qresult_counter += 1
# commented files have a line saying how many queries were processed
if self.has_comments:
handle.write('# BLAST processed %i queries' % qresult_counter)
return qresult_counter, hit_counter, hsp_counter, frag_counter
def _build_rows(self, qresult):
"""Returns a string containing tabular rows of the QueryResult object."""
coordinates = set(['qstart', 'qend', 'sstart', 'send'])
qresult_lines = ''
for hit in qresult:
for hsp in hit:
line = []
for field in self.fields:
# get the column value ~ could either be an attribute
# of qresult, hit, or hsp
if field in _COLUMN_QRESULT:
value = getattr(qresult, _COLUMN_QRESULT[field][0])
elif field in _COLUMN_HIT:
if field == 'sallseqid':
value = getattr(hit, 'id_all')
else:
value = getattr(hit, _COLUMN_HIT[field][0])
# special case, since 'frames' can be determined from
# query frame and hit frame
elif field == 'frames':
value = '%i/%i' % (hsp.query_frame, hsp.hit_frame)
elif field in _COLUMN_HSP:
try:
value = getattr(hsp, _COLUMN_HSP[field][0])
except AttributeError:
attr = _COLUMN_HSP[field][0]
_augment_blast_hsp(hsp, attr)
value = getattr(hsp, attr)
elif field in _COLUMN_FRAG:
value = getattr(hsp, _COLUMN_FRAG[field][0])
else:
assert field not in _SUPPORTED_FIELDS
continue
# adjust from and to according to strand, if from and to
# is included in the output field
if field in coordinates:
value = self._adjust_coords(field, value, hsp)
# adjust output formatting
value = self._adjust_output(field, value)
line.append(value)
hsp_line = '\t'.join(line)
qresult_lines += hsp_line + '\n'
return qresult_lines
def _adjust_coords(self, field, value, hsp):
"""Adjusts start and end coordinates according to strand."""
assert field in ('qstart', 'qend', 'sstart', 'send')
# determine sequence type to operate on based on field's first letter
seq_type = 'query' if field.startswith('q') else 'hit'
strand = getattr(hsp, '%s_strand' % seq_type, None)
if strand is None:
raise ValueError("Required attribute %r not found." %
('%s_strand' % (seq_type)))
# switch start <--> end coordinates if strand is -1
if strand < 0:
if field.endswith('start'):
value = getattr(hsp, '%s_end' % seq_type)
elif field.endswith('end'):
value = getattr(hsp, '%s_start' % seq_type) + 1
elif field.endswith('start'):
# adjust start coordinate for positive strand
value += 1
return value
def _adjust_output(self, field, value):
"""Adjusts formatting of the given field and value to mimic native tab output."""
# qseq and sseq are stored as SeqRecord, but here we only need the str
if field in ('qseq', 'sseq'):
value = str(value.seq)
# evalue formatting, adapted from BLAST+ source:
# src/objtools/align_format/align_format_util.cpp#L668
elif field == 'evalue':
if value < 1.0e-180:
value = '0.0'
elif value < 1.0e-99:
value = '%2.0e' % value
elif value < 0.0009:
value = '%3.0e' % value
elif value < 0.1:
value = '%4.3f' % value
elif value < 1.0:
value = '%3.2f' % value
elif value < 10.0:
value = '%2.1f' % value
else:
value = '%5.0f' % value
# pident and ppos formatting
elif field in ('pident', 'ppos'):
value = '%.2f' % value
# evalue formatting, adapted from BLAST+ source:
# src/objtools/align_format/align_format_util.cpp#L723
elif field == 'bitscore':
if value > 9999:
value = '%4.3e' % value
elif value > 99.9:
value = '%4.0d' % value
else:
value = '%4.1f' % value
# coverages have no comma (using floats still ~ a more proper
# representation)
elif field in ('qcovhsp', 'qcovs'):
value = '%.0f' % value
# list into '<>'-delimited string
elif field == 'salltitles':
value = '<>'.join(value)
# list into ';'-delimited string
elif field in ('sallseqid', 'sallacc', 'staxids', 'sscinames',
'scomnames', 'sblastnames', 'sskingdoms'):
value = ';'.join(value)
# everything else
else:
value = str(value)
return value
def _build_comments(self, qres):
"""Returns a string of a QueryResult tabular comment."""
comments = []
# inverse mapping of the long-short name map, required
# for writing comments
inv_field_map = dict((v, k) for k, v in _LONG_SHORT_MAP.items())
# try to anticipate qress without version
if not hasattr(qres, 'version'):
program_line = '# %s' % qres.program.upper()
else:
program_line = '# %s %s' % (qres.program.upper(), qres.version)
comments.append(program_line)
# description may or may not be None
if qres.description is None:
comments.append('# Query: %s' % qres.id)
else:
comments.append('# Query: %s %s' % (qres.id, qres.description))
# try appending RID line, if present
try:
comments.append('# RID: %s' % qres.rid)
except AttributeError:
pass
comments.append('# Database: %s' % qres.target)
# qresults without hits don't show the Fields comment
if qres:
comments.append('# Fields: %s' %
', '.join(inv_field_map[field] for field in self.fields))
comments.append('# %i hits found' % len(qres))
return '\n'.join(comments) + '\n'
# if not used as a module, run the doctest
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/SearchIO/BlastIO/blast_tab.py
|
Python
|
apache-2.0
| 33,732
|
[
"BLAST",
"Biopython"
] |
60498557660afe3f7649463063dcda9bf6a17ef60e1dc949dcf99230d974b746
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.